def CAM16_to_XYZ(CAM16_specification, XYZ_w, L_A, Y_b, surround=CAM16_VIEWING_CONDITIONS['Average'], discount_illuminant=False): """ Converts from *CAM16* specification to *CIE XYZ* tristimulus values. This is the *inverse* implementation. Parameters ---------- CAM16_specification : CAM16_Specification *CAM16* colour appearance model specification. Correlate of *Lightness* :math:`J`, correlate of *chroma* :math:`C` or correlate of *colourfulness* :math:`M` and *hue* angle :math:`h` in degrees must be specified, e.g. :math:`JCh` or :math:`JMh`. XYZ_w : array_like *CIE XYZ* tristimulus values of reference white. L_A : numeric or array_like Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken to be 20% of the luminance of a white object in the scene). Y_b : numeric or array_like Relative luminance of background :math:`Y_b` in :math:`cd/m^2`. surround : CAM16_InductionFactors, optional Surround viewing conditions. discount_illuminant : bool, optional Discount the illuminant. Returns ------- XYZ : ndarray *CIE XYZ* tristimulus values. Raises ------ ValueError If neither *C* or *M* correlates have been defined in the ``CAM16_specification`` argument. Notes ----- +---------------------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +===========================+=======================+===============+ | ``CAM16_specification.J`` | [0, 100] | [0, 1] | +---------------------------+-----------------------+---------------+ | ``CAM16_specification.C`` | [0, 100] | [0, 1] | +---------------------------+-----------------------+---------------+ | ``CAM16_specification.h`` | [0, 360] | [0, 1] | +---------------------------+-----------------------+---------------+ | ``CAM16_specification.s`` | [0, 100] | [0, 1] | +---------------------------+-----------------------+---------------+ | ``CAM16_specification.Q`` | [0, 100] | [0, 1] | +---------------------------+-----------------------+---------------+ | ``CAM16_specification.M`` | [0, 100] | [0, 1] | +---------------------------+-----------------------+---------------+ | ``CAM16_specification.H`` | [0, 360] | [0, 1] | +---------------------------+-----------------------+---------------+ | ``XYZ_w`` | [0, 100] | [0, 1] | +---------------------------+-----------------------+---------------+ +---------------------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +===========================+=======================+===============+ | ``XYZ`` | [0, 100] | [0, 1] | +---------------------------+-----------------------+---------------+ - ``CAM16_specification`` can also be passed as a compatible argument to :func:`colour.utilities.as_namedtuple` definition. References ---------- :cite:`Li2017` Examples -------- >>> specification = CAM16_Specification(J=41.731207905126638, ... C=0.103355738709070, ... h=217.067959767393010) >>> XYZ_w = np.array([95.05, 100.00, 108.88]) >>> L_A = 318.31 >>> Y_b = 20.0 >>> CAM16_to_XYZ(specification, XYZ_w, L_A, Y_b) # doctest: +ELLIPSIS array([ 19.01..., 20... , 21.78...]) """ J, C, h, _s, _Q, M, _H, _HC = as_namedtuple(CAM16_specification, CAM16_Specification) J = to_domain_100(J) C = to_domain_100(C) if C is not None else C h = to_domain_degrees(h) M = to_domain_100(M) if M is not None else M L_A = as_float_array(L_A) XYZ_w = to_domain_100(XYZ_w) _X_w, Y_w, _Z_w = tsplit(XYZ_w) # Step 0 # Converting *CIE XYZ* tristimulus values to sharpened *RGB* values. RGB_w = dot_vector(M_16, XYZ_w) # Computing degree of adaptation :math:`D`. D = (np.clip(degree_of_adaptation(surround.F, L_A), 0, 1) if not discount_illuminant else np.ones(L_A.shape)) n, F_L, N_bb, N_cb, z = tsplit( viewing_condition_dependent_parameters(Y_b, Y_w, L_A)) D_RGB = (D[..., np.newaxis] * Y_w[..., np.newaxis] / RGB_w + 1 - D[..., np.newaxis]) RGB_wc = D_RGB * RGB_w # Applying forward post-adaptation non linear response compression. RGB_aw = post_adaptation_non_linear_response_compression_forward( RGB_wc, F_L) # Computing achromatic responses for the whitepoint. A_w = achromatic_response_forward(RGB_aw, N_bb) # Step 1 if C is None and M is not None: C = M / spow(F_L, 0.25) elif C is None: raise ValueError('Either "C" or "M" correlate must be defined in ' 'the "CAM16_specification" argument!') # Step 2 # Computing temporary magnitude quantity :math:`t`. t = temporary_magnitude_quantity_inverse(C, J, n) # Computing eccentricity factor *e_t*. e_t = eccentricity_factor(h) # Computing achromatic response :math:`A` for the stimulus. A = achromatic_response_inverse(A_w, J, surround.c, z) # Computing *P_1* to *P_3*. P_n = P(surround.N_c, N_cb, e_t, t, A, N_bb) _P_1, P_2, _P_3 = tsplit(P_n) # Step 3 # Computing opponent colour dimensions :math:`a` and :math:`b`. a, b = tsplit(opponent_colour_dimensions_inverse(P_n, h)) # Step 4 # Computing post-adaptation non linear response compression matrix. RGB_a = post_adaptation_non_linear_response_compression_matrix(P_2, a, b) # Step 5 # Applying inverse post-adaptation non linear response compression. RGB_c = post_adaptation_non_linear_response_compression_inverse(RGB_a, F_L) # Step 6 RGB = RGB_c / D_RGB # Step 7 XYZ = dot_vector(M_16_INVERSE, RGB) return from_range_100(XYZ)
def CAM16_to_XYZ( specification: CAM_Specification_CAM16, XYZ_w: ArrayLike, L_A: FloatingOrArrayLike, Y_b: FloatingOrArrayLike, surround: Union[ InductionFactors_CIECAM02, InductionFactors_CAM16] = VIEWING_CONDITIONS_CAM16["Average"], discount_illuminant: Boolean = False, ) -> NDArray: """ Convert from *CAM16* specification to *CIE XYZ* tristimulus values. Parameters ---------- specification : CAM_Specification_CAM16 *CAM16* colour appearance model specification. Correlate of *Lightness* :math:`J`, correlate of *chroma* :math:`C` or correlate of *colourfulness* :math:`M` and *hue* angle :math:`h` in degrees must be specified, e.g. :math:`JCh` or :math:`JMh`. XYZ_w *CIE XYZ* tristimulus values of reference white. L_A Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken to be 20% of the luminance of a white object in the scene). Y_b Luminous factor of background :math:`Y_b` such as :math:`Y_b = 100 x L_b / L_w` where :math:`L_w` is the luminance of the light source and :math:`L_b` is the luminance of the background. For viewing images, :math:`Y_b` can be the average :math:`Y` value for the pixels in the entire image, or frequently, a :math:`Y` value of 20, approximate an :math:`L^*` of 50 is used. surround Surround viewing conditions. discount_illuminant Discount the illuminant. Returns ------- :class:`numpy.ndarray` *CIE XYZ* tristimulus values. Raises ------ ValueError If neither *C* or *M* correlates have been defined in the ``CAM_Specification_CAM16`` argument. Notes ----- +-------------------------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +===============================+=======================+===============+ | ``CAM_Specification_CAM16.J`` | [0, 100] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_CAM16.C`` | [0, 100] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_CAM16.h`` | [0, 360] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_CAM16.s`` | [0, 100] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_CAM16.Q`` | [0, 100] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_CAM16.M`` | [0, 100] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_CAM16.H`` | [0, 360] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``XYZ_w`` | [0, 100] | [0, 1] | +-------------------------------+-----------------------+---------------+ +-----------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +===========+=======================+===============+ | ``XYZ`` | [0, 100] | [0, 1] | +-----------+-----------------------+---------------+ References ---------- :cite:`Li2017` Examples -------- >>> specification = CAM_Specification_CAM16(J=41.731207905126638, ... C=0.103355738709070, ... h=217.067959767393010) >>> XYZ_w = np.array([95.05, 100.00, 108.88]) >>> L_A = 318.31 >>> Y_b = 20.0 >>> CAM16_to_XYZ(specification, XYZ_w, L_A, Y_b) # doctest: +ELLIPSIS array([ 19.01..., 20... , 21.78...]) """ J, C, h, _s, _Q, M, _H, _HC = astuple(specification) J = to_domain_100(J) C = to_domain_100(C) h = to_domain_degrees(h) M = to_domain_100(M) L_A = as_float_array(L_A) XYZ_w = to_domain_100(XYZ_w) _X_w, Y_w, _Z_w = tsplit(XYZ_w) # Step 0 # Converting *CIE XYZ* tristimulus values to sharpened *RGB* values. RGB_w = vector_dot(MATRIX_16, XYZ_w) # Computing degree of adaptation :math:`D`. D = (np.clip(degree_of_adaptation(surround.F, L_A), 0, 1) if not discount_illuminant else ones(L_A.shape)) n, F_L, N_bb, N_cb, z = viewing_condition_dependent_parameters( Y_b, Y_w, L_A) D_RGB = (D[..., np.newaxis] * Y_w[..., np.newaxis] / RGB_w + 1 - D[..., np.newaxis]) RGB_wc = D_RGB * RGB_w # Applying forward post-adaptation non-linear response compression. RGB_aw = post_adaptation_non_linear_response_compression_forward( RGB_wc, F_L) # Computing achromatic responses for the whitepoint. A_w = achromatic_response_forward(RGB_aw, N_bb) # Step 1 if has_only_nan(C) and not has_only_nan(M): C = M / spow(F_L, 0.25) elif has_only_nan(C): raise ValueError('Either "C" or "M" correlate must be defined in ' 'the "CAM_Specification_CAM16" argument!') # Step 2 # Computing temporary magnitude quantity :math:`t`. t = temporary_magnitude_quantity_inverse(C, J, n) # Computing eccentricity factor *e_t*. e_t = eccentricity_factor(h) # Computing achromatic response :math:`A` for the stimulus. A = achromatic_response_inverse(A_w, J, surround.c, z) # Computing *P_1* to *P_3*. P_n = P(surround.N_c, N_cb, e_t, t, A, N_bb) _P_1, P_2, _P_3 = tsplit(P_n) # Step 3 # Computing opponent colour dimensions :math:`a` and :math:`b`. a, b = tsplit(opponent_colour_dimensions_inverse(P_n, h)) # Step 4 # Applying post-adaptation non-linear response compression matrix. RGB_a = matrix_post_adaptation_non_linear_response_compression(P_2, a, b) # Step 5 # Applying inverse post-adaptation non-linear response compression. RGB_c = post_adaptation_non_linear_response_compression_inverse(RGB_a, F_L) # Step 6 RGB = RGB_c / D_RGB # Step 7 XYZ = vector_dot(MATRIX_INVERSE_16, RGB) return from_range_100(XYZ)