def test_has_only_nan(self): """Test :func:`colour.utilities.array.has_only_nan` definition.""" self.assertTrue(has_only_nan(None)) self.assertTrue(has_only_nan([None, None])) self.assertFalse(has_only_nan([True, None])) self.assertFalse(has_only_nan([0.1, np.nan, 0.3]))
def CIECAM02_to_XYZ( specification: CAM_Specification_CIECAM02, XYZ_w: ArrayLike, L_A: FloatingOrArrayLike, Y_b: FloatingOrArrayLike, surround: InductionFactors_CIECAM02 = VIEWING_CONDITIONS_CIECAM02[ "Average"], discount_illuminant: Boolean = False, ) -> NDArray: """ Convert from *CIECAM02* specification to *CIE XYZ* tristimulus values. Parameters ---------- specification *CIECAM02* colour appearance model specification. Correlate of *Lightness* :math:`J`, correlate of *chroma* :math:`C` or correlate of *colourfulness* :math:`M` and *hue* angle :math:`h` in degrees must be specified, e.g. :math:`JCh` or :math:`JMh`. XYZ_w *CIE XYZ* tristimulus values of reference white. L_A Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken to be 20% of the luminance of a white object in the scene). Y_b Luminous factor of background :math:`Y_b` such as :math:`Y_b = 100 x L_b / L_w` where :math:`L_w` is the luminance of the light source and :math:`L_b` is the luminance of the background. For viewing images, :math:`Y_b` can be the average :math:`Y` value for the pixels in the entire image, or frequently, a :math:`Y` value of 20, approximate an :math:`L^*` of 50 is used. surround Surround viewing conditions. discount_illuminant Discount the illuminant. Returns ------- :class:`numpy.ndarray` *CIE XYZ* tristimulus values. Raises ------ ValueError If neither *C* or *M* correlates have been defined in the ``CAM_Specification_CIECAM02`` argument. Notes ----- +----------------------------------+-----------------------\ +---------------+ | **Domain** | **Scale - Reference** \ | **Scale - 1** | +==================================+=======================\ +===============+ | ``CAM_Specification_CIECAM02.J`` | [0, 100] \ | [0, 1] | +----------------------------------+-----------------------\ +---------------+ | ``CAM_Specification_CIECAM02.C`` | [0, 100] \ | [0, 1] | +----------------------------------+-----------------------\ +---------------+ | ``CAM_Specification_CIECAM02.h`` | [0, 360] \ | [0, 1] | +----------------------------------+-----------------------\ +---------------+ | ``CAM_Specification_CIECAM02.s`` | [0, 100] \ | [0, 1] | +----------------------------------+-----------------------\ +---------------+ | ``CAM_Specification_CIECAM02.Q`` | [0, 100] \ | [0, 1] | +----------------------------------+-----------------------\ +---------------+ | ``CAM_Specification_CIECAM02.M`` | [0, 100] \ | [0, 1] | +----------------------------------+-----------------------\ +---------------+ | ``CAM_Specification_CIECAM02.H`` | [0, 360] \ | [0, 1] | +----------------------------------+-----------------------\ +---------------+ | ``XYZ_w`` | [0, 100] \ | [0, 1] | +----------------------------------+-----------------------\ +---------------+ +-----------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +===========+=======================+===============+ | ``XYZ`` | [0, 100] | [0, 1] | +-----------+-----------------------+---------------+ References ---------- :cite:`Fairchild2004c`, :cite:`Luo2013`, :cite:`Moroneya`, :cite:`Wikipedia2007a` Examples -------- >>> specification = CAM_Specification_CIECAM02(J=41.731091132513917, ... C=0.104707757171031, ... h=219.048432658311780) >>> XYZ_w = np.array([95.05, 100.00, 108.88]) >>> L_A = 318.31 >>> Y_b = 20.0 >>> CIECAM02_to_XYZ(specification, XYZ_w, L_A, Y_b) # doctest: +ELLIPSIS array([ 19.01..., 20... , 21.78...]) """ J, C, h, _s, _Q, M, _H, _HC = astuple(specification) J = to_domain_100(J) C = to_domain_100(C) h = to_domain_degrees(h) M = to_domain_100(M) L_A = as_float_array(L_A) XYZ_w = to_domain_100(XYZ_w) _X_w, Y_w, _Z_w = tsplit(XYZ_w) n, F_L, N_bb, N_cb, z = viewing_condition_dependent_parameters( Y_b, Y_w, L_A) if has_only_nan(C) and not has_only_nan(M): C = M / spow(F_L, 0.25) elif has_only_nan(C): raise ValueError('Either "C" or "M" correlate must be defined in ' 'the "CAM_Specification_CIECAM02" argument!') # Converting *CIE XYZ* tristimulus values to *CMCCAT2000* transform # sharpened *RGB* values. RGB_w = vector_dot(CAT_CAT02, XYZ_w) # Computing degree of adaptation :math:`D`. D = (degree_of_adaptation(surround.F, L_A) if not discount_illuminant else ones(L_A.shape)) # Computing full chromatic adaptation. RGB_wc = full_chromatic_adaptation_forward(RGB_w, RGB_w, Y_w, D) # Converting to *Hunt-Pointer-Estevez* colourspace. RGB_pw = RGB_to_rgb(RGB_wc) # Applying post-adaptation non-linear response compression. RGB_aw = post_adaptation_non_linear_response_compression_forward( RGB_pw, F_L) # Computing achromatic response for the whitepoint. A_w = achromatic_response_forward(RGB_aw, N_bb) # Computing temporary magnitude quantity :math:`t`. t = temporary_magnitude_quantity_inverse(C, J, n) # Computing eccentricity factor *e_t*. e_t = eccentricity_factor(h) # Computing achromatic response :math:`A` for the stimulus. A = achromatic_response_inverse(A_w, J, surround.c, z) # Computing *P_1* to *P_3*. P_n = P(surround.N_c, N_cb, e_t, t, A, N_bb) _P_1, P_2, _P_3 = tsplit(P_n) # Computing opponent colour dimensions :math:`a` and :math:`b`. a, b = tsplit(opponent_colour_dimensions_inverse(P_n, h)) # Applying post-adaptation non-linear response compression matrix. RGB_a = matrix_post_adaptation_non_linear_response_compression(P_2, a, b) # Applying inverse post-adaptation non-linear response compression. RGB_p = post_adaptation_non_linear_response_compression_inverse(RGB_a, F_L) # Converting to *Hunt-Pointer-Estevez* colourspace. RGB_c = rgb_to_RGB(RGB_p) # Applying inverse full chromatic adaptation. RGB = full_chromatic_adaptation_inverse(RGB_c, RGB_w, Y_w, D) # Converting *CMCCAT2000* transform sharpened *RGB* values to *CIE XYZ* # tristimulus values. XYZ = vector_dot(CAT_INVERSE_CAT02, RGB) return from_range_100(XYZ)
def CAM16_to_XYZ( specification: CAM_Specification_CAM16, XYZ_w: ArrayLike, L_A: FloatingOrArrayLike, Y_b: FloatingOrArrayLike, surround: Union[ InductionFactors_CIECAM02, InductionFactors_CAM16] = VIEWING_CONDITIONS_CAM16["Average"], discount_illuminant: Boolean = False, ) -> NDArray: """ Convert from *CAM16* specification to *CIE XYZ* tristimulus values. Parameters ---------- specification : CAM_Specification_CAM16 *CAM16* colour appearance model specification. Correlate of *Lightness* :math:`J`, correlate of *chroma* :math:`C` or correlate of *colourfulness* :math:`M` and *hue* angle :math:`h` in degrees must be specified, e.g. :math:`JCh` or :math:`JMh`. XYZ_w *CIE XYZ* tristimulus values of reference white. L_A Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken to be 20% of the luminance of a white object in the scene). Y_b Luminous factor of background :math:`Y_b` such as :math:`Y_b = 100 x L_b / L_w` where :math:`L_w` is the luminance of the light source and :math:`L_b` is the luminance of the background. For viewing images, :math:`Y_b` can be the average :math:`Y` value for the pixels in the entire image, or frequently, a :math:`Y` value of 20, approximate an :math:`L^*` of 50 is used. surround Surround viewing conditions. discount_illuminant Discount the illuminant. Returns ------- :class:`numpy.ndarray` *CIE XYZ* tristimulus values. Raises ------ ValueError If neither *C* or *M* correlates have been defined in the ``CAM_Specification_CAM16`` argument. Notes ----- +-------------------------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +===============================+=======================+===============+ | ``CAM_Specification_CAM16.J`` | [0, 100] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_CAM16.C`` | [0, 100] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_CAM16.h`` | [0, 360] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_CAM16.s`` | [0, 100] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_CAM16.Q`` | [0, 100] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_CAM16.M`` | [0, 100] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_CAM16.H`` | [0, 360] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``XYZ_w`` | [0, 100] | [0, 1] | +-------------------------------+-----------------------+---------------+ +-----------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +===========+=======================+===============+ | ``XYZ`` | [0, 100] | [0, 1] | +-----------+-----------------------+---------------+ References ---------- :cite:`Li2017` Examples -------- >>> specification = CAM_Specification_CAM16(J=41.731207905126638, ... C=0.103355738709070, ... h=217.067959767393010) >>> XYZ_w = np.array([95.05, 100.00, 108.88]) >>> L_A = 318.31 >>> Y_b = 20.0 >>> CAM16_to_XYZ(specification, XYZ_w, L_A, Y_b) # doctest: +ELLIPSIS array([ 19.01..., 20... , 21.78...]) """ J, C, h, _s, _Q, M, _H, _HC = astuple(specification) J = to_domain_100(J) C = to_domain_100(C) h = to_domain_degrees(h) M = to_domain_100(M) L_A = as_float_array(L_A) XYZ_w = to_domain_100(XYZ_w) _X_w, Y_w, _Z_w = tsplit(XYZ_w) # Step 0 # Converting *CIE XYZ* tristimulus values to sharpened *RGB* values. RGB_w = vector_dot(MATRIX_16, XYZ_w) # Computing degree of adaptation :math:`D`. D = (np.clip(degree_of_adaptation(surround.F, L_A), 0, 1) if not discount_illuminant else ones(L_A.shape)) n, F_L, N_bb, N_cb, z = viewing_condition_dependent_parameters( Y_b, Y_w, L_A) D_RGB = (D[..., np.newaxis] * Y_w[..., np.newaxis] / RGB_w + 1 - D[..., np.newaxis]) RGB_wc = D_RGB * RGB_w # Applying forward post-adaptation non-linear response compression. RGB_aw = post_adaptation_non_linear_response_compression_forward( RGB_wc, F_L) # Computing achromatic responses for the whitepoint. A_w = achromatic_response_forward(RGB_aw, N_bb) # Step 1 if has_only_nan(C) and not has_only_nan(M): C = M / spow(F_L, 0.25) elif has_only_nan(C): raise ValueError('Either "C" or "M" correlate must be defined in ' 'the "CAM_Specification_CAM16" argument!') # Step 2 # Computing temporary magnitude quantity :math:`t`. t = temporary_magnitude_quantity_inverse(C, J, n) # Computing eccentricity factor *e_t*. e_t = eccentricity_factor(h) # Computing achromatic response :math:`A` for the stimulus. A = achromatic_response_inverse(A_w, J, surround.c, z) # Computing *P_1* to *P_3*. P_n = P(surround.N_c, N_cb, e_t, t, A, N_bb) _P_1, P_2, _P_3 = tsplit(P_n) # Step 3 # Computing opponent colour dimensions :math:`a` and :math:`b`. a, b = tsplit(opponent_colour_dimensions_inverse(P_n, h)) # Step 4 # Applying post-adaptation non-linear response compression matrix. RGB_a = matrix_post_adaptation_non_linear_response_compression(P_2, a, b) # Step 5 # Applying inverse post-adaptation non-linear response compression. RGB_c = post_adaptation_non_linear_response_compression_inverse(RGB_a, F_L) # Step 6 RGB = RGB_c / D_RGB # Step 7 XYZ = vector_dot(MATRIX_INVERSE_16, RGB) return from_range_100(XYZ)
def ZCAM_to_XYZ( specification: CAM_Specification_ZCAM, XYZ_w: ArrayLike, L_A: FloatingOrArrayLike, Y_b: FloatingOrArrayLike, surround: InductionFactors_ZCAM = VIEWING_CONDITIONS_ZCAM["Average"], discount_illuminant: Boolean = False, ) -> NDArray: """ Convert from *ZCAM* specification to *CIE XYZ* tristimulus values. Parameters ---------- specification *ZCAM* colour appearance model specification. Correlate of *Lightness* :math:`J`, correlate of *chroma* :math:`C` or correlate of *colourfulness* :math:`M` and *hue* angle :math:`h` in degrees must be specified, e.g. :math:`JCh` or :math:`JMh`. XYZ_w Absolute *CIE XYZ* tristimulus values of the white under reference illuminant. L_A Test adapting field *luminance* :math:`L_A` in :math:`cd/m^2` such as :math:`L_A = L_w * Y_b / 100` (where :math:`L_w` is luminance of the reference white and :math:`Y_b` is the background luminance factor). Y_b Luminous factor of background :math:`Y_b` such as :math:`Y_b = 100 x L_b / L_w` where :math:`L_w` is the luminance of the light source and :math:`L_b` is the luminance of the background. For viewing images, :math:`Y_b` can be the average :math:`Y` value for the pixels in the entire image, or frequently, a :math:`Y` value of 20, approximate an :math:`L^*` of 50 is used. surround Surround viewing conditions induction factors. discount_illuminant Truth value indicating if the illuminant should be discounted. Returns ------- :class:`numpy.ndarray` *CIE XYZ* tristimulus values. Raises ------ ValueError If neither *C* or *M* correlates have been defined in the ``CAM_Specification_ZCAM`` argument. Warnings -------- The underlying *SMPTE ST 2084:2014* transfer function is an absolute transfer function. Notes ----- - *Safdar, Hardeberg and Luo (2021)* does not specify how the chromatic adaptation to *CIE Standard Illuminant D65* in *Step 0* should be performed. A one-step *Von Kries* chromatic adaptation transform is not symetrical or transitive when a degree of adptation is involved. *Safdar, Hardeberg and Luo (2018)* uses *Zhai and Luo (2018)* two-steps chromatic adaptation transform, thus it seems sensible to adopt this transform for the *ZCAM* colour appearance model until more information is available. It is worth noting that a one-step *Von Kries* chromatic adaptation transform with support for degree of adaptation produces values closer to the supplemental document compared to the *Zhai and Luo (2018)* two-steps chromatic adaptation transform but then the *ZCAM* colour appearance model does not round-trip properly. - *Step 4* of the inverse model uses a rounded exponent of 1.3514 preventing the model to round-trip properly. Given that this implementation takes some liberties with respect to the chromatic adaptation transform to use, it was deemed appropriate to use an exponent value that enables the *ZCAM* colour appearance model to round-trip. - The underlying *SMPTE ST 2084:2014* transfer function is an absolute transfer function, thus the domain and range values for the *Reference* and *1* scales are only indicative that the data is not affected by scale transformations. +-------------------------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +===============================+=======================+===============+ | ``CAM_Specification_ZCAM.J`` | [UN] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_ZCAM.C`` | [UN] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_ZCAM.h`` | [0, 360] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_ZCAM.s`` | [UN] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_ZCAM.Q`` | [UN] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_ZCAM.M`` | [UN] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_ZCAM.H`` | [0, 400] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_ZCAM.HC`` | [UN] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_ZCAM.V`` | [UN] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_ZCAM.K`` | [UN] | [0, 1] | +-------------------------------+-----------------------+---------------+ | ``CAM_Specification_ZCAM.H`` | [UN] | [0, 1] | +-------------------------------+-----------------------+---------------+ +-----------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +===========+=======================+===============+ | ``XYZ`` | [UN] | [UN] | +-----------+-----------------------+---------------+ References ---------- :cite:`Safdar2018`, :cite:`Safdar2021`, :cite:`Zhai2018` Examples -------- >>> specification = CAM_Specification_ZCAM(J=92.250443780723629, ... C=3.0216926733329013, ... h=196.32457375575581) >>> XYZ_w = np.array([256, 264, 202]) >>> L_A = 264 >>> Y_b = 100 >>> surround = VIEWING_CONDITIONS_ZCAM['Average'] >>> ZCAM_to_XYZ(specification, XYZ_w, L_A, Y_b, surround) ... # doctest: +ELLIPSIS array([ 185., 206., 163.]) """ J_z, C_z, h_z, _S_z, _Q_z, M_z, _H, _H_Z, _V_z, _K_z, _W_z = astuple( specification) J_z = to_domain_1(J_z) C_z = to_domain_1(C_z) h_z = to_domain_degrees(h_z) M_z = to_domain_1(M_z) XYZ_w = to_domain_1(XYZ_w) _X_w, Y_w, _Z_w = tsplit(XYZ_w) L_A = as_float_array(L_A) Y_b = as_float_array(Y_b) F_s, F, c, N_c = surround # Step 0 (Forward) - Chromatic adaptation from reference illuminant to # "CIE Standard Illuminant D65" illuminant using "CAT02". # Computing degree of adaptation :math:`D`. D = (degree_of_adaptation(surround.F, L_A) if not discount_illuminant else ones(L_A.shape)) # Step 1 (Forward) - Computing factors related with viewing conditions and # independent of the test stimulus. # Background factor :math:`F_b` F_b = np.sqrt(Y_b / Y_w) # Luminance level adaptation factor :math:`F_L` F_L = 0.171 * spow(L_A, 1 / 3) * (1 - np.exp(-48 / 9 * L_A)) # Step 2 (Forward) - Computing achromatic response (:math:`I_{z,w}`), # redness-greenness (:math:`a_{z,w}`), and yellowness-blueness # (:math:`b_{z,w}`). with domain_range_scale("ignore"): I_z_w, _A_z_w, B_z_w = tsplit( XYZ_to_Izazbz(XYZ_w, method="Safdar 2021")) # Step 1 (Inverse) - Computing achromatic response (:math:`I_z`). Q_z_p = (1.6 * F_s) / F_b**0.12 Q_z_m = F_s**2.2 * F_b**0.5 * spow(F_L, 0.2) Q_z_w = 2700 * spow(I_z_w, Q_z_p) * Q_z_m I_z_p = (F_b**0.12) / (1.6 * F_s) I_z_d = 2700 * 100 * Q_z_m I_z = spow((J_z * Q_z_w) / I_z_d, I_z_p) # Step 2 (Inverse) - Computing chroma :math:`C_z`. if has_only_nan(M_z) and not has_only_nan(C_z): M_z = (C_z * Q_z_w) / 100 elif has_only_nan(M_z): raise ValueError('Either "C" or "M" correlate must be defined in ' 'the "CAM_Specification_ZCAM" argument!') # Step 3 (Inverse) - Computing hue angle :math:`h_z` # :math:`h_z` is currently required as an input. # Computing eccentricity factor :math:`e_z`. e_z = 1.015 + np.cos(np.radians(89.038 + h_z % 360)) h_z_r = np.radians(h_z) # Step 4 (Inverse) - Computing redness-greenness (:math:`a_z`), and # yellowness-blueness (:math:`b_z`). # C_z_p_e = 1.3514 C_z_p_e = 50 / 37 C_z_p = spow( (M_z * spow(I_z_w, 0.78) * F_b**0.1) / (100 * e_z**0.068 * spow(F_L, 0.2)), C_z_p_e, ) a_z = C_z_p * np.cos(h_z_r) b_z = C_z_p * np.sin(h_z_r) # Step 5 (Inverse) - Computing tristimulus values :math:`XYZ_{D65}`. with domain_range_scale("ignore"): XYZ_D65 = Izazbz_to_XYZ(tstack([I_z, a_z, b_z]), method="Safdar 2021") XYZ = chromatic_adaptation_Zhai2018(XYZ_D65, TVS_D65, XYZ_w, D, D, transform="CAT02") return from_range_1(XYZ)
def Kim2009_to_XYZ( specification: CAM_Specification_Kim2009, XYZ_w: ArrayLike, L_A: FloatingOrArrayLike, media: MediaParameters_Kim2009 = MEDIA_PARAMETERS_KIM2009["CRT Displays"], surround: InductionFactors_Kim2009 = VIEWING_CONDITIONS_KIM2009["Average"], discount_illuminant: Boolean = False, n_c: Floating = 0.57, ) -> NDArray: """ Convert from *Kim, Weyrich and Kautz (2009)* specification to *CIE XYZ* tristimulus values. Parameters ---------- specification *Kim, Weyrich and Kautz (2009)* colour appearance model specification. Correlate of *Lightness* :math:`J`, correlate of *chroma* :math:`C` or correlate of *colourfulness* :math:`M` and *hue* angle :math:`h` in degrees must be specified, e.g. :math:`JCh` or :math:`JMh`. XYZ_w *CIE XYZ* tristimulus values of reference white. L_A Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken to be 20% of the luminance of a white object in the scene). media Media parameters. surroundl Surround viewing conditions induction factors. discount_illuminant Discount the illuminant. n_c Cone response sigmoidal curve modulating factor :math:`n_c`. Returns ------- :class:`numpy.ndarray` *CIE XYZ* tristimulus values. Raises ------ ValueError If neither *C* or *M* correlates have been defined in the ``CAM_Specification_Kim2009`` argument. Notes ----- +---------------------------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +=================================+=======================+===============+ | ``CAM_Specification_Kim2009.J`` | [0, 100] | [0, 1] | +---------------------------------+-----------------------+---------------+ | ``CAM_Specification_Kim2009.C`` | [0, 100] | [0, 1] | +---------------------------------+-----------------------+---------------+ | ``CAM_Specification_Kim2009.h`` | [0, 360] | [0, 1] | +---------------------------------+-----------------------+---------------+ | ``CAM_Specification_Kim2009.s`` | [0, 100] | [0, 1] | +---------------------------------+-----------------------+---------------+ | ``CAM_Specification_Kim2009.Q`` | [0, 100] | [0, 1] | +---------------------------------+-----------------------+---------------+ | ``CAM_Specification_Kim2009.M`` | [0, 100] | [0, 1] | +---------------------------------+-----------------------+---------------+ | ``CAM_Specification_Kim2009.H`` | [0, 360] | [0, 1] | +---------------------------------+-----------------------+---------------+ | ``XYZ_w`` | [0, 100] | [0, 1] | +---------------------------------+-----------------------+---------------+ +-----------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +===========+=======================+===============+ | ``XYZ`` | [0, 100] | [0, 1] | +-----------+-----------------------+---------------+ References ---------- :cite:`Kim2009` Examples -------- >>> specification = CAM_Specification_Kim2009(J=28.861908975839647, ... C=0.5592455924373706, ... h=219.04806677662953) >>> XYZ_w = np.array([95.05, 100.00, 108.88]) >>> L_A = 318.31 >>> media = MEDIA_PARAMETERS_KIM2009['CRT Displays'] >>> surround = VIEWING_CONDITIONS_KIM2009['Average'] >>> Kim2009_to_XYZ(specification, XYZ_w, L_A, media, surround) ... # doctest: +ELLIPSIS array([ 19.0099995..., 19.9999999..., 21.7800000...]) """ J, C, h, _s, _Q, M, _H, _HC = astuple(specification) J = to_domain_100(J) C = to_domain_100(C) h = to_domain_degrees(h) M = to_domain_100(M) L_A = as_float_array(L_A) XYZ_w = to_domain_100(XYZ_w) _X_w, Y_w, _Z_w = tsplit(XYZ_w) # Converting *CIE XYZ* tristimulus values to *CMCCAT2000* transform # sharpened *RGB* values. RGB_w = vector_dot(CAT_CAT02, XYZ_w) # Computing degree of adaptation :math:`D`. D = ( degree_of_adaptation(surround.F, L_A) if not discount_illuminant else ones(L_A.shape) ) # Computing full chromatic adaptation. XYZ_wc = full_chromatic_adaptation_forward(RGB_w, RGB_w, Y_w, D) # Converting to *Hunt-Pointer-Estevez* colourspace. LMS_w = RGB_to_rgb(XYZ_wc) # n_q = 0.1308 # J = Q / spow(Y_w, n_q) if has_only_nan(C) and not has_only_nan(M): a_m, b_m = 0.11, 0.61 C = M / (a_m * np.log10(Y_w) + b_m) elif has_only_nan(C): raise ValueError( 'Either "C" or "M" correlate must be defined in ' 'the "CAM_Specification_Kim2009" argument!' ) # Cones absolute response. LMS_w_n_c = spow(LMS_w, n_c) L_A_n_c = spow(L_A, n_c) LMS_wp = LMS_w_n_c / (LMS_w_n_c + L_A_n_c) # Achromatic signal :math:`A_w` v_A = np.array([40, 20, 1]) A_w = np.sum(v_A * LMS_wp, axis=-1) / 61 # Perceived *Lightness* :math:`J_p`. J_p = (J / 100 - 1) / media.E + 1 # Achromatic signal :math:`A`. a_j, b_j, n_j, o_j = 0.89, 0.24, 3.65, 0.65 J_p_n_j = spow(J_p, n_j) A = A_w * ((a_j * J_p_n_j) / (J_p_n_j + spow(o_j, n_j)) + b_j) # Opponent signals :math:`a` and :math:`b`. a_k, n_k = 456.5, 0.62 C_a_k_n_k = spow(C / a_k, 1 / n_k) hr = np.radians(h) a, b = np.cos(hr) * C_a_k_n_k, np.sin(hr) * C_a_k_n_k # Cones absolute response. M = np.array( [ [1.0000, 0.3215, 0.2053], [1.0000, -0.6351, -0.1860], [1.0000, -0.1568, -4.4904], ] ) LMS_p = vector_dot(M, tstack([A, a, b])) LMS = spow((-spow(L_A, n_c) * LMS_p) / (LMS_p - 1), 1 / n_c) # Converting to *Hunt-Pointer-Estevez* colourspace. RGB_c = rgb_to_RGB(LMS) # Applying inverse full chromatic adaptation. RGB = full_chromatic_adaptation_inverse(RGB_c, RGB_w, Y_w, D) XYZ = vector_dot(CAT_INVERSE_CAT02, RGB) return from_range_100(XYZ)