def chromatic_adaptation_matrix_VonKries(XYZ_w, XYZ_wr, transform='CAT02'): M = CHROMATIC_ADAPTATION_TRANSFORMS.get(transform) print('XYZ_w', XYZ_w) print('XYZ_wr', XYZ_wr) if M is None: raise KeyError( '"{0}" chromatic adaptation transform is not defined! Supported ' 'methods: "{1}".'.format(transform, CHROMATIC_ADAPTATION_TRANSFORMS.keys())) print('M', M) print('M inv', np.linalg.inv(M)) rgb_w = np.einsum('...i,...ij->...j', XYZ_w, np.transpose(M)) rgb_wr = np.einsum('...i,...ij->...j', XYZ_wr, np.transpose(M)) print('rgb_w: ', rgb_w) print('rgb_wr: ', rgb_wr) D = rgb_wr / rgb_w print('D: ', D) D = row_as_diagonal(D) M_CAT = dot_matrix(np.linalg.inv(M), D) M_CAT = dot_matrix(M_CAT, M) return M_CAT
def RGB_to_RGB(RGB, input_colourspace, output_colourspace, chromatic_adaptation_transform='CAT02'): """ Converts from given input *RGB* colourspace to output *RGB* colourspace using given *chromatic adaptation* method. Parameters ---------- RGB : array_like *RGB* colourspace array. input_colourspace : RGB_Colourspace *RGB* input colourspace. output_colourspace : RGB_Colourspace *RGB* output colourspace. chromatic_adaptation_transform : unicode, optional **{'CAT02', 'XYZ Scaling', 'Von Kries', 'Bradford', 'Sharp', 'Fairchild', 'CMCCAT97', 'CMCCAT2000', 'CAT02_BRILL_CAT', 'Bianco', 'Bianco PC'}**, *Chromatic adaptation* transform. Returns ------- ndarray *RGB* colourspace array. Notes ----- - Input / output *RGB* colourspace arrays are in domain / range [0, 1]. - Input / output *RGB* colourspace arrays are assumed to be representing linear light values. Examples -------- >>> from colour import sRGB_COLOURSPACE, PROPHOTO_RGB_COLOURSPACE >>> RGB = np.array([0.01103742, 0.12734226, 0.11632971]) >>> RGB_to_RGB( ... RGB, ... sRGB_COLOURSPACE, ... PROPHOTO_RGB_COLOURSPACE) # doctest: +ELLIPSIS array([ 0.0643538..., 0.1157289..., 0.1158038...]) """ cat = chromatic_adaptation_matrix_VonKries( xy_to_XYZ(input_colourspace.whitepoint), xy_to_XYZ(output_colourspace.whitepoint), chromatic_adaptation_transform) M = dot_matrix(cat, input_colourspace.RGB_to_XYZ_matrix) M = dot_matrix(output_colourspace.XYZ_to_RGB_matrix, M) RGB = dot_vector(M, RGB) return RGB
def RGB_to_RGB(RGB, input_colourspace, output_colourspace, chromatic_adaptation_transform='CAT02'): """ Converts from given input *RGB* colourspace to output *RGB* colourspace using given *chromatic adaptation* method. Parameters ---------- RGB : array_like *RGB* colourspace array. input_colourspace : RGB_Colourspace *RGB* input colourspace. output_colourspace : RGB_Colourspace *RGB* output colourspace. chromatic_adaptation_transform : unicode, optional **{'CAT02', 'XYZ Scaling', 'Von Kries', 'Bradford', 'Sharp', 'Fairchild, 'CMCCAT97', 'CMCCAT2000', 'CAT02_BRILL_CAT', 'Bianco', 'Bianco PC'}**, *Chromatic adaptation* transform. Returns ------- ndarray *RGB* colourspace array. Notes ----- - *RGB* colourspace arrays are in domain [0, 1]. Examples -------- >>> from colour import sRGB_COLOURSPACE, PROPHOTO_RGB_COLOURSPACE >>> RGB = np.array([0.01103604, 0.12734466, 0.11631037]) >>> RGB_to_RGB( ... RGB, ... sRGB_COLOURSPACE, ... PROPHOTO_RGB_COLOURSPACE) # doctest: +ELLIPSIS array([ 0.0643338..., 0.1157362..., 0.1157614...]) """ cat = chromatic_adaptation_matrix_VonKries( xy_to_XYZ(input_colourspace.whitepoint), xy_to_XYZ(output_colourspace.whitepoint), chromatic_adaptation_transform) M = dot_matrix(cat, input_colourspace.RGB_to_XYZ_matrix) M = dot_matrix(output_colourspace.XYZ_to_RGB_matrix, M) RGB = dot_vector(M, RGB) return RGB
def RGB_to_RGB_matrix(input_colourspace, output_colourspace, chromatic_adaptation_transform='CAT02'): """ Computes the matrix :math:`M` converting from given input *RGB* colourspace to output *RGB* colourspace using given *chromatic adaptation* method. Parameters ---------- input_colourspace : RGB_Colourspace *RGB* input colourspace. output_colourspace : RGB_Colourspace *RGB* output colourspace. chromatic_adaptation_transform : unicode, optional **{'CAT02', 'XYZ Scaling', 'Von Kries', 'Bradford', 'Sharp', 'Fairchild', 'CMCCAT97', 'CMCCAT2000', 'CAT02_BRILL_CAT', 'Bianco', 'Bianco PC', None}**, *Chromatic adaptation* transform, if *None* no chromatic adaptation is performed. Returns ------- ndarray Conversion matrix :math:`M`. Examples -------- >>> from colour.models import sRGB_COLOURSPACE, PROPHOTO_RGB_COLOURSPACE >>> RGB_to_RGB_matrix(sRGB_COLOURSPACE, PROPHOTO_RGB_COLOURSPACE) ... # doctest: +ELLIPSIS array([[ 0.5288241..., 0.3340609..., 0.1373616...], [ 0.0975294..., 0.8790074..., 0.0233981...], [ 0.0163599..., 0.1066124..., 0.8772485...]]) """ M = input_colourspace.RGB_to_XYZ_matrix if chromatic_adaptation_transform is not None: M_CAT = chromatic_adaptation_matrix_VonKries( xy_to_XYZ(input_colourspace.whitepoint), xy_to_XYZ(output_colourspace.whitepoint), chromatic_adaptation_transform) M = dot_matrix(M_CAT, input_colourspace.RGB_to_XYZ_matrix) M = dot_matrix(output_colourspace.XYZ_to_RGB_matrix, M) return M
def test_dot_matrix(self): """ Tests :func:`colour.utilities.array.dot_matrix` definition. """ a = np.array([[0.7328, 0.4296, -0.1624], [-0.7036, 1.6975, 0.0061], [0.0030, 0.0136, 0.9834]]) a = np.reshape(np.tile(a, (6, 1)), (6, 3, 3)) b = a np.testing.assert_almost_equal( dot_matrix(a, b), np.array([[[0.23424208, 1.04184824, -0.27609032], [-1.70994078, 2.57932265, 0.13061813], [-0.00442036, 0.03774904, 0.96667132]], [[0.23424208, 1.04184824, -0.27609032], [-1.70994078, 2.57932265, 0.13061813], [-0.00442036, 0.03774904, 0.96667132]], [[0.23424208, 1.04184824, -0.27609032], [-1.70994078, 2.57932265, 0.13061813], [-0.00442036, 0.03774904, 0.96667132]], [[0.23424208, 1.04184824, -0.27609032], [-1.70994078, 2.57932265, 0.13061813], [-0.00442036, 0.03774904, 0.96667132]], [[0.23424208, 1.04184824, -0.27609032], [-1.70994078, 2.57932265, 0.13061813], [-0.00442036, 0.03774904, 0.96667132]], [[0.23424208, 1.04184824, -0.27609032], [-1.70994078, 2.57932265, 0.13061813], [-0.00442036, 0.03774904, 0.96667132]]]), decimal=7)
def rgb_to_RGB(rgb): """ Converts given *Hunt-Pointer-Estevez* :math:`\\rho\gamma\\beta` colourspace array to *RGB* array. Parameters ---------- rgb : array_like *Hunt-Pointer-Estevez* :math:`\\rho\gamma\\beta` colourspace array. Returns ------- ndarray *RGB* array. Examples -------- >>> rgb = np.array([19.99693975, 20.00186123, 20.01350530]) >>> rgb_to_RGB(rgb) # doctest: +ELLIPSIS array([ 19.9937078..., 20.0039363..., 20.0132638...]) """ RGB = dot_vector(dot_matrix(CAT02_CAT, HPE_TO_XYZ_MATRIX), rgb) return RGB
def RGB_to_rgb(RGB): """ Converts given *RGB* array to *Hunt-Pointer-Estevez* :math:`\\rho\gamma\\beta` colourspace. Parameters ---------- RGB : array_like *RGB* array. Returns ------- ndarray *Hunt-Pointer-Estevez* :math:`\\rho\gamma\\beta` colourspace array. Examples -------- >>> RGB = np.array([19.99370783, 20.00393634, 20.01326387]) >>> RGB_to_rgb(RGB) # doctest: +ELLIPSIS array([ 19.9969397..., 20.0018612..., 20.0135053...]) """ rgb = dot_vector(dot_matrix(XYZ_TO_HPE_MATRIX, CAT02_INVERSE_CAT), RGB) return rgb
def anomalous_trichromacy_matrix_Machado2009(cmfs, primaries, d_LMS): """ Computes *Machado et al. (2009)* *CVD* matrix for given *LMS* cone fundamentals colour matching functions and display primaries tri-spectral distributions with given :math:`\\Delta_{LMS}` shift amount in nanometers to simulate anomalous trichromacy. Parameters ---------- cmfs : LMS_ConeFundamentals *LMS* cone fundamentals colour matching functions. primaries : RGB_DisplayPrimaries *RGB* display primaries tri-spectral distributions. d_LMS : array_like :math:`\\Delta_{LMS}` shift amount in nanometers. Notes ----- - Input *LMS* cone fundamentals colour matching functions interval is expected to be 1 nanometer, incompatible input will be interpolated at 1 nanometer interval. - Input :math:`\\Delta_{LMS}` shift amount is in domain [0, 20]. Returns ------- ndarray Anomalous trichromacy matrix. References ---------- :cite:`Colblindorb`, :cite:`Colblindora`, :cite:`Colblindorc`, :cite:`Machado2009` Examples -------- >>> from colour import DISPLAYS_RGB_PRIMARIES, LMS_CMFS >>> cmfs = LMS_CMFS['Stockman & Sharpe 2 Degree Cone Fundamentals'] >>> d_LMS = np.array([15, 0, 0]) >>> primaries = DISPLAYS_RGB_PRIMARIES['Apple Studio Display'] >>> anomalous_trichromacy_matrix_Machado2009(cmfs, primaries, d_LMS) ... # doctest: +ELLIPSIS array([[-0.2777465..., 2.6515008..., -1.3737543...], [ 0.2718936..., 0.2004786..., 0.5276276...], [ 0.0064404..., 0.2592157..., 0.7343437...]]) """ if cmfs.shape.interval != 1: cmfs = cmfs.copy().interpolate(SpectralShape(interval=1)) M_n = RGB_to_WSYBRG_matrix(cmfs, primaries) cmfs_a = anomalous_trichromacy_cmfs_Machado2009(cmfs, d_LMS) M_a = RGB_to_WSYBRG_matrix(cmfs_a, primaries) return dot_matrix(np.linalg.inv(M_n), M_a)
def camera_space_to_RGB(RGB, M_XYZ_to_camera_space, RGB_to_XYZ_matrix): """ Converts given *RGB* array from *camera space* to given *RGB* colourspace. Parameters ---------- RGB : array_like Camera space *RGB* colourspace array. XYZ_to_camera_matrix : array_like Matrix converting from *CIE XYZ* tristimulus values to *camera space*. RGB_to_XYZ_matrix : array_like Matrix converting from *RGB* colourspace to *CIE XYZ* tristimulus values. Returns ------- ndarray *RGB* colourspace array. Examples -------- >>> RGB = np.array([0.80660, 0.81638, 0.65885]) >>> M_XYZ_to_camera_space = np.array([ ... [0.47160000, 0.06030000, -0.08300000], ... [-0.77980000, 1.54740000, 0.24800000], ... [-0.14960000, 0.19370000, 0.66510000]]) >>> RGB_to_XYZ_matrix = np.array([ ... [0.41238656, 0.35759149, 0.18045049], ... [0.21263682, 0.71518298, 0.07218020], ... [0.01933062, 0.11919716, 0.95037259]]) >>> camera_space_to_RGB( ... RGB, ... M_XYZ_to_camera_space, ... RGB_to_XYZ_matrix) # doctest: +ELLIPSIS array([ 0.7564180..., 0.8683192..., 0.6044589...]) """ M_RGB_camera = dot_matrix(M_XYZ_to_camera_space, RGB_to_XYZ_matrix) M_RGB_camera /= np.transpose(np.sum(M_RGB_camera, axis=1)[np.newaxis]) RGB_f = dot_vector(np.linalg.inv(M_RGB_camera), RGB) return RGB_f
def XYZ_to_RLAB(XYZ, XYZ_n, Y_n, sigma=RLAB_VIEWING_CONDITIONS.get('Average'), D=RLAB_D_FACTOR.get('Hard Copy Images')): """ Computes the RLAB model color appearance correlates. Parameters ---------- XYZ : array_like *CIE XYZ* tristimulus values of test sample / stimulus in domain [0, 100]. XYZ_n : array_like *CIE XYZ* tristimulus values of reference white in domain [0, 100]. Y_n : numeric or array_like Absolute adapting luminance in :math:`cd/m^2`. sigma : numeric or array_like, optional Relative luminance of the surround, see :attr:`RLAB_VIEWING_CONDITIONS` for reference. D : numeric or array_like, optional *Discounting-the-Illuminant* factor in domain [0, 1]. Returns ------- RLAB_Specification RLAB colour appearance model specification. Warning ------- The input domain of that definition is non standard! Notes ----- - Input *CIE XYZ* tristimulus values are in domain [0, 100]. - Input *CIE XYZ_n* tristimulus values are in domain [0, 100]. Examples -------- >>> XYZ = np.array([19.01, 20.00, 21.78]) >>> XYZ_n = np.array([109.85, 100, 35.58]) >>> Y_n = 31.83 >>> sigma = RLAB_VIEWING_CONDITIONS['Average'] >>> D = RLAB_D_FACTOR['Hard Copy Images'] >>> XYZ_to_RLAB(XYZ, XYZ_n, Y_n, sigma, D) # doctest: +ELLIPSIS RLAB_Specification(J=49.8347069..., C=54.8700585..., h=286.4860208..., \ s=1.1010410..., HC=None, a=15.5711021..., b=-52.6142956...) """ Y_n = np.asarray(Y_n) D = np.asarray(D) sigma = np.asarray(sigma) # Converting to cone responses. LMS_n = XYZ_to_rgb(XYZ_n) # Computing the :math:`A` matrix. LMS_l_E = (3 * LMS_n) / (LMS_n[0] + LMS_n[1] + LMS_n[2]) LMS_p_L = ((1 + (Y_n[..., np.newaxis]**(1 / 3)) + LMS_l_E) / (1 + (Y_n[..., np.newaxis]**(1 / 3)) + (1 / LMS_l_E))) LMS_a_L = (LMS_p_L + D[..., np.newaxis] * (1 - LMS_p_L)) / LMS_n aR = row_as_diagonal(LMS_a_L) M = dot_matrix(dot_matrix(R_MATRIX, aR), XYZ_TO_HPE_MATRIX) XYZ_ref = dot_vector(M, XYZ) X_ref, Y_ref, Z_ref = tsplit(XYZ_ref) # ------------------------------------------------------------------------- # Computing the correlate of *Lightness* :math:`L^R`. # ------------------------------------------------------------------------- LR = 100 * (Y_ref**sigma) # Computing opponent colour dimensions :math:`a^R` and :math:`b^R`. aR = 430 * ((X_ref**sigma) - (Y_ref**sigma)) bR = 170 * ((Y_ref**sigma) - (Z_ref**sigma)) # ------------------------------------------------------------------------- # Computing the *hue* angle :math:`h^R`. # ------------------------------------------------------------------------- hR = np.degrees(np.arctan2(bR, aR)) % 360 # TODO: Implement hue composition computation. # ------------------------------------------------------------------------- # Computing the correlate of *chroma* :math:`C^R`. # ------------------------------------------------------------------------- CR = np.sqrt((aR**2) + (bR**2)) # ------------------------------------------------------------------------- # Computing the correlate of *saturation* :math:`s^R`. # ------------------------------------------------------------------------- sR = CR / LR return RLAB_Specification(LR, CR, hR, sR, None, aR, bR)
def chromatic_adaptation_matrix_VonKries(XYZ_w, XYZ_wr, transform='CAT02'): """ Computes the *chromatic adaptation* matrix from test viewing conditions to reference viewing conditions. Parameters ---------- XYZ_w : array_like Test viewing condition *CIE XYZ* tristimulus values of whitepoint. XYZ_wr : array_like Reference viewing condition *CIE XYZ* tristimulus values of whitepoint. transform : unicode, optional **{'CAT02', 'XYZ Scaling', 'Von Kries', 'Bradford', 'Sharp', 'Fairchild', 'CMCCAT97', 'CMCCAT2000', 'CAT02_BRILL_CAT', 'Bianco', 'Bianco PC'}**, Chromatic adaptation transform. Returns ------- ndarray Chromatic adaptation matrix :math:`M_{cat}`. Raises ------ KeyError If chromatic adaptation method is not defined. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``XYZ_w`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ | ``XYZ_wr`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`Fairchild2013t` Examples -------- >>> XYZ_w = np.array([0.95045593, 1.00000000, 1.08905775]) >>> XYZ_wr = np.array([0.96429568, 1.00000000, 0.82510460]) >>> chromatic_adaptation_matrix_VonKries(XYZ_w, XYZ_wr) ... # doctest: +ELLIPSIS array([[ 1.0425738..., 0.0308910..., -0.0528125...], [ 0.0221934..., 1.0018566..., -0.0210737...], [-0.0011648..., -0.0034205..., 0.7617890...]]) Using Bradford method: >>> XYZ_w = np.array([0.95045593, 1.00000000, 1.08905775]) >>> XYZ_wr = np.array([0.96429568, 1.00000000, 0.82510460]) >>> method = 'Bradford' >>> chromatic_adaptation_matrix_VonKries(XYZ_w, XYZ_wr, method) ... # doctest: +ELLIPSIS array([[ 1.0479297..., 0.0229468..., -0.0501922...], [ 0.0296278..., 0.9904344..., -0.0170738...], [-0.0092430..., 0.0150551..., 0.7518742...]]) """ XYZ_w = to_domain_1(XYZ_w) XYZ_wr = to_domain_1(XYZ_wr) M = CHROMATIC_ADAPTATION_TRANSFORMS.get(transform) if M is None: raise KeyError( '"{0}" chromatic adaptation transform is not defined! Supported ' 'methods: "{1}".'.format(transform, CHROMATIC_ADAPTATION_TRANSFORMS.keys())) rgb_w = np.einsum('...i,...ij->...j', XYZ_w, np.transpose(M)) rgb_wr = np.einsum('...i,...ij->...j', XYZ_wr, np.transpose(M)) D = rgb_wr / rgb_w D = row_as_diagonal(D) M_CAT = dot_matrix(np.linalg.inv(M), D) M_CAT = dot_matrix(M_CAT, M) return M_CAT
def XYZ_to_RLAB(XYZ, XYZ_n, Y_n, sigma=RLAB_VIEWING_CONDITIONS.get('Average'), D=RLAB_D_FACTOR.get('Hard Copy Images')): """ Computes the RLAB model color appearance correlates. Parameters ---------- XYZ : array_like *CIE XYZ* tristimulus values of test sample / stimulus in domain [0, 100]. XYZ_n : array_like *CIE XYZ* tristimulus values of reference white in domain [0, 100]. Y_n : numeric or array_like Absolute adapting luminance in :math:`cd/m^2`. sigma : numeric or array_like, optional Relative luminance of the surround, see :attr:`RLAB_VIEWING_CONDITIONS` for reference. D : numeric or array_like, optional *Discounting-the-Illuminant* factor in domain [0, 1]. Returns ------- RLAB_Specification RLAB colour appearance model specification. Warning ------- The input domain of that definition is non standard! Notes ----- - Input *CIE XYZ* tristimulus values are in domain [0, 100]. - Input *CIE XYZ_n* tristimulus values are in domain [0, 100]. Examples -------- >>> XYZ = np.array([19.01, 20.00, 21.78]) >>> XYZ_n = np.array([109.85, 100, 35.58]) >>> Y_n = 31.83 >>> sigma = RLAB_VIEWING_CONDITIONS['Average'] >>> D = RLAB_D_FACTOR['Hard Copy Images'] >>> XYZ_to_RLAB(XYZ, XYZ_n, Y_n, sigma, D) # doctest: +ELLIPSIS RLAB_Specification(J=49.8347069..., C=54.8700585..., h=286.4860208..., \ s=1.1010410..., HC=None, a=15.5711021..., b=-52.6142956...) """ Y_n = np.asarray(Y_n) D = np.asarray(D) sigma = np.asarray(sigma) # Converting to cone responses. LMS_n = XYZ_to_rgb(XYZ_n) # Computing the :math:`A` matrix. LMS_l_E = (3 * LMS_n) / (LMS_n[0] + LMS_n[1] + LMS_n[2]) LMS_p_L = ((1 + (Y_n[..., np.newaxis] ** (1 / 3)) + LMS_l_E) / (1 + (Y_n[..., np.newaxis] ** (1 / 3)) + (1 / LMS_l_E))) LMS_a_L = (LMS_p_L + D[..., np.newaxis] * (1 - LMS_p_L)) / LMS_n aR = row_as_diagonal(LMS_a_L) M = dot_matrix(dot_matrix(R_MATRIX, aR), XYZ_TO_HPE_MATRIX) XYZ_ref = dot_vector(M, XYZ) X_ref, Y_ref, Z_ref = tsplit(XYZ_ref) # ------------------------------------------------------------------------- # Computing the correlate of *Lightness* :math:`L^R`. # ------------------------------------------------------------------------- LR = 100 * (Y_ref ** sigma) # Computing opponent colour dimensions :math:`a^R` and :math:`b^R`. aR = 430 * ((X_ref ** sigma) - (Y_ref ** sigma)) bR = 170 * ((Y_ref ** sigma) - (Z_ref ** sigma)) # ------------------------------------------------------------------------- # Computing the *hue* angle :math:`h^R`. # ------------------------------------------------------------------------- hR = np.degrees(np.arctan2(bR, aR)) % 360 # TODO: Implement hue composition computation. # ------------------------------------------------------------------------- # Computing the correlate of *chroma* :math:`C^R`. # ------------------------------------------------------------------------- CR = np.sqrt((aR ** 2) + (bR ** 2)) # ------------------------------------------------------------------------- # Computing the correlate of *saturation* :math:`s^R`. # ------------------------------------------------------------------------- sR = CR / LR return RLAB_Specification(LR, CR, hR, sR, None, aR, bR)
def XYZ_to_RLAB(XYZ, XYZ_n, Y_n, sigma=RLAB_VIEWING_CONDITIONS['Average'], D=RLAB_D_FACTOR['Hard Copy Images']): """ Computes the *RLAB* model color appearance correlates. Parameters ---------- XYZ : array_like *CIE XYZ* tristimulus values of test sample / stimulus. XYZ_n : array_like *CIE XYZ* tristimulus values of reference white. Y_n : numeric or array_like Absolute adapting luminance in :math:`cd/m^2`. sigma : numeric or array_like, optional Relative luminance of the surround, see :attr:`colour.RLAB_VIEWING_CONDITIONS` for reference. D : numeric or array_like, optional *Discounting-the-Illuminant* factor normalised to domain [0, 1]. Returns ------- RLAB_Specification *RLAB* colour appearance model specification. Notes ----- +--------------------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +==========================+=======================+===============+ | ``XYZ`` | [0, 100] | [0, 1] | +--------------------------+-----------------------+---------------+ | ``XYZ_n`` | [0, 100] | [0, 1] | +--------------------------+-----------------------+---------------+ +--------------------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +==========================+=======================+===============+ | ``RLAB_Specification.h`` | [0, 360] | [0, 1] | +--------------------------+-----------------------+---------------+ References ---------- :cite:`Fairchild1996a`, :cite:`Fairchild2013w` Examples -------- >>> XYZ = np.array([19.01, 20.00, 21.78]) >>> XYZ_n = np.array([109.85, 100, 35.58]) >>> Y_n = 31.83 >>> sigma = RLAB_VIEWING_CONDITIONS['Average'] >>> D = RLAB_D_FACTOR['Hard Copy Images'] >>> XYZ_to_RLAB(XYZ, XYZ_n, Y_n, sigma, D) # doctest: +ELLIPSIS RLAB_Specification(J=49.8347069..., C=54.8700585..., h=286.4860208..., \ s=1.1010410..., HC=None, a=15.5711021..., b=-52.6142956...) """ XYZ = to_domain_100(XYZ) XYZ_n = to_domain_100(XYZ_n) Y_n = as_float_array(Y_n) D = as_float_array(D) sigma = as_float_array(sigma) # Converting to cone responses. LMS_n = XYZ_to_rgb(XYZ_n) # Computing the :math:`A` matrix. LMS_l_E = (3 * LMS_n) / (LMS_n[0] + LMS_n[1] + LMS_n[2]) LMS_p_L = ((1 + spow(Y_n[..., np.newaxis], 1 / 3) + LMS_l_E) / (1 + spow(Y_n[..., np.newaxis], 1 / 3) + (1 / LMS_l_E))) LMS_a_L = (LMS_p_L + D[..., np.newaxis] * (1 - LMS_p_L)) / LMS_n aR = row_as_diagonal(LMS_a_L) M = dot_matrix(dot_matrix(R_MATRIX, aR), XYZ_TO_HPE_MATRIX) XYZ_ref = dot_vector(M, XYZ) X_ref, Y_ref, Z_ref = tsplit(XYZ_ref) # Computing the correlate of *Lightness* :math:`L^R`. LR = 100 * spow(Y_ref, sigma) # Computing opponent colour dimensions :math:`a^R` and :math:`b^R`. aR = 430 * (spow(X_ref, sigma) - spow(Y_ref, sigma)) bR = 170 * (spow(Y_ref, sigma) - spow(Z_ref, sigma)) # Computing the *hue* angle :math:`h^R`. hR = np.degrees(np.arctan2(bR, aR)) % 360 # TODO: Implement hue composition computation. # Computing the correlate of *chroma* :math:`C^R`. CR = np.hypot(aR, bR) # Computing the correlate of *saturation* :math:`s^R`. sR = CR / LR return RLAB_Specification(LR, CR, from_range_degrees(hR), sR, None, aR, bR)
def chromatic_adaptation_matrix_VonKries(XYZ_w, XYZ_wr, transform='CAT02'): """ Computes the *chromatic adaptation* matrix from test viewing conditions to reference viewing conditions. Parameters ---------- XYZ_w : array_like Test viewing condition *CIE XYZ* tristimulus values of whitepoint. XYZ_wr : array_like Reference viewing condition *CIE XYZ* tristimulus values of whitepoint. transform : unicode, optional **{'CAT02', 'XYZ Scaling', 'Von Kries', 'Bradford', 'Sharp', 'Fairchild', 'CMCCAT97', 'CMCCAT2000', 'CAT02_BRILL_CAT', 'Bianco', 'Bianco PC'}**, Chromatic adaptation transform. Returns ------- ndarray Chromatic adaptation matrix. Raises ------ KeyError If chromatic adaptation method is not defined. Examples -------- >>> XYZ_w = np.array([1.09846607, 1.00000000, 0.35582280]) >>> XYZ_wr = np.array([0.95042855, 1.00000000, 1.08890037]) >>> chromatic_adaptation_matrix_VonKries( # doctest: +ELLIPSIS ... XYZ_w, XYZ_wr) array([[ 0.8687653..., -0.1416539..., 0.3871961...], [-0.1030072..., 1.0584014..., 0.1538646...], [ 0.0078167..., 0.0267875..., 2.9608177...]]) Using Bradford method: >>> XYZ_w = np.array([1.09846607, 1.00000000, 0.35582280]) >>> XYZ_wr = np.array([0.95042855, 1.00000000, 1.08890037]) >>> method = 'Bradford' >>> chromatic_adaptation_matrix_VonKries( # doctest: +ELLIPSIS ... XYZ_w, XYZ_wr, method) array([[ 0.8446794..., -0.1179355..., 0.3948940...], [-0.1366408..., 1.1041236..., 0.1291981...], [ 0.0798671..., -0.1349315..., 3.1928829...]]) """ M = CHROMATIC_ADAPTATION_TRANSFORMS.get(transform) if M is None: raise KeyError( '"{0}" chromatic adaptation transform is not defined! Supported ' 'methods: "{1}".'.format(transform, CHROMATIC_ADAPTATION_TRANSFORMS.keys())) rgb_w = np.einsum('...i,...ij->...j', XYZ_w, np.transpose(M)) rgb_wr = np.einsum('...i,...ij->...j', XYZ_wr, np.transpose(M)) D = rgb_wr / rgb_w D = row_as_diagonal(D) cat = dot_matrix(np.linalg.inv(M), D) cat = dot_matrix(cat, M) return cat
def XYZ_to_camera_space_matrix(xy, CCT_calibration_illuminant_1, CCT_calibration_illuminant_2, M_color_matrix_1, M_color_matrix_2, M_camera_calibration_1, M_camera_calibration_2, analog_balance): """ Returns the *CIE XYZ* to *Camera Space* matrix for given *xy* white balance chromaticity coordinates. Parameters ---------- xy : array_like *xy* white balance chromaticity coordinates. CCT_calibration_illuminant_1 : numeric Correlated colour temperature of *CalibrationIlluminant1*. CCT_calibration_illuminant_2 : numeric Correlated colour temperature of *CalibrationIlluminant2*. M_color_matrix_1 : array_like *ColorMatrix1* tag matrix. M_color_matrix_2 : array_like *ColorMatrix2* tag matrix. M_camera_calibration_1 : array_like *CameraCalibration1* tag matrix. M_camera_calibration_2 : array_like *CameraCalibration2* tag matrix. analog_balance : array_like *AnalogBalance* tag vector. Returns ------- ndarray *CIE XYZ* to *Camera Space* matrix. Notes ----- - The reference illuminant is D50 as defined per :attr:`colour_hdri.models.dataset.dng.ADOBE_DNG_XYZ_ILLUMINANT` attribute. References ---------- - :cite:`AdobeSystems2012f` - :cite:`AdobeSystems2015d` - :cite:`McGuffog2012a` Examples -------- >>> M_color_matrix_1 = np.array( ... [[0.5309, -0.0229, -0.0336], ... [-0.6241, 1.3265, 0.3337], ... [-0.0817, 0.1215, 0.6664]]) >>> M_color_matrix_2 = np.array( ... [[0.4716, 0.0603, -0.0830], ... [-0.7798, 1.5474, 0.2480], ... [-0.1496, 0.1937, 0.6651]]) >>> M_camera_calibration_1 = np.identity(3) >>> M_camera_calibration_2 = np.identity(3) >>> analog_balance = np.ones(3) >>> XYZ_to_camera_space_matrix( # doctest: +ELLIPSIS ... np.array([0.34510414, 0.35162252]), ... 2850, ... 6500, ... M_color_matrix_1, ... M_color_matrix_2, ... M_camera_calibration_1, ... M_camera_calibration_2, ... analog_balance) array([[ 0.4854908..., 0.0408106..., -0.0714282...], [-0.7433278..., 1.4956549..., 0.2680749...], [-0.1336946..., 0.1767874..., 0.6654045...]]) """ M_AB = np.diagflat(analog_balance) uv = UCS_to_uv(XYZ_to_UCS(xy_to_XYZ(xy))) CCT, _D_uv = uv_to_CCT_Robertson1968(uv) if is_identity(M_color_matrix_1) or is_identity(M_color_matrix_2): M_CM = (M_color_matrix_1 if is_identity(M_color_matrix_2) else M_color_matrix_2) else: M_CM = interpolated_matrix(CCT, CCT_calibration_illuminant_1, CCT_calibration_illuminant_2, M_color_matrix_1, M_color_matrix_2) M_CC = interpolated_matrix(CCT, CCT_calibration_illuminant_1, CCT_calibration_illuminant_2, M_camera_calibration_1, M_camera_calibration_2) M_XYZ_to_camera_space = dot_matrix(dot_matrix(M_AB, M_CC), M_CM) return M_XYZ_to_camera_space
def camera_space_to_XYZ_matrix(xy, CCT_calibration_illuminant_1, CCT_calibration_illuminant_2, M_color_matrix_1, M_color_matrix_2, M_camera_calibration_1, M_camera_calibration_2, analog_balance, M_forward_matrix_1, M_forward_matrix_2, chromatic_adaptation_transform='Bradford'): """ Returns the *Camera Space* to *CIE XYZ* matrix for given *xy* white balance chromaticity coordinates. Parameters ---------- xy : array_like *xy* white balance chromaticity coordinates. CCT_calibration_illuminant_1 : numeric Correlated colour temperature of *CalibrationIlluminant1*. CCT_calibration_illuminant_2 : numeric Correlated colour temperature of *CalibrationIlluminant2*. M_color_matrix_1 : array_like *ColorMatrix1* tag matrix. M_color_matrix_2 : array_like *ColorMatrix2* tag matrix. M_camera_calibration_1 : array_like *CameraCalibration1* tag matrix. M_camera_calibration_2 : array_like *CameraCalibration2* tag matrix. analog_balance : array_like *AnalogBalance* tag vector. M_forward_matrix_1 : array_like *ForwardMatrix1* tag matrix. M_forward_matrix_2 : array_like *ForwardMatrix2* tag matrix. chromatic_adaptation_transform : unicode, optional **{'CAT02', 'XYZ Scaling', 'Von Kries', 'Bradford', 'Sharp', 'Fairchild', 'CMCCAT97', 'CMCCAT2000', 'CAT02_BRILL_CAT', 'Bianco', 'Bianco PC'}**, Chromatic adaptation transform. Returns ------- ndarray *Camera Space* to *CIE XYZ* matrix. Notes ----- - The reference illuminant is D50 as defined per :attr:`colour_hdri.models.dataset.dng.ADOBE_DNG_XYZ_ILLUMINANT` attribute. References ---------- - :cite:`AdobeSystems2012f` - :cite:`AdobeSystems2012g` - :cite:`AdobeSystems2015d` - :cite:`McGuffog2012a` Examples -------- >>> M_color_matrix_1 = np.array( ... [[0.5309, -0.0229, -0.0336], ... [-0.6241, 1.3265, 0.3337], ... [-0.0817, 0.1215, 0.6664]]) >>> M_color_matrix_2 = np.array( ... [[0.4716, 0.0603, -0.0830], ... [-0.7798, 1.5474, 0.2480], ... [-0.1496, 0.1937, 0.6651]]) >>> M_camera_calibration_1 = np.identity(3) >>> M_camera_calibration_2 = np.identity(3) >>> analog_balance = np.ones(3) >>> M_forward_matrix_1 = np.array( ... [[0.8924, -0.1041, 0.1760], ... [0.4351, 0.6621, -0.0972], ... [0.0505, -0.1562, 0.9308]]) >>> M_forward_matrix_2 = np.array( ... [[0.8924, -0.1041, 0.1760], ... [0.4351, 0.6621, -0.0972], ... [0.0505, -0.1562, 0.9308]]) >>> camera_space_to_XYZ_matrix( # doctest: +ELLIPSIS ... np.array([0.32816244, 0.34698169]), ... 2850, ... 6500, ... M_color_matrix_1, ... M_color_matrix_2, ... M_camera_calibration_1, ... M_camera_calibration_2, ... analog_balance, ... M_forward_matrix_1, ... M_forward_matrix_2) array([[ 2.1604087..., -0.1041... , 0.2722498...], [ 1.0533324..., 0.6621... , -0.1503561...], [ 0.1222553..., -0.1562... , 1.4398304...]]) """ # *ForwardMatrix1* and *ForwardMatrix2* are not included in the camera # profile. if is_identity(M_forward_matrix_1) and is_identity(M_forward_matrix_2): M_camera_to_XYZ = np.linalg.inv( XYZ_to_camera_space_matrix(xy, CCT_calibration_illuminant_1, CCT_calibration_illuminant_2, M_color_matrix_1, M_color_matrix_2, M_camera_calibration_1, M_camera_calibration_2, analog_balance)) M_CAT = chromatic_adaptation_matrix_VonKries( xy_to_XYZ(xy), xy_to_XYZ(ADOBE_DNG_XYZ_ILLUMINANT), chromatic_adaptation_transform) M_camera_space_to_XYZ = dot_matrix(M_CAT, M_camera_to_XYZ) else: uv = UCS_to_uv(XYZ_to_UCS(xy_to_XYZ(xy))) CCT, _D_uv = uv_to_CCT_Robertson1968(uv) M_CC = interpolated_matrix(CCT, CCT_calibration_illuminant_1, CCT_calibration_illuminant_2, M_camera_calibration_1, M_camera_calibration_2) # The reference implementation :cite:`AdobeSystems2015d` diverges from # the white-paper :cite:`AdobeSystems2012f`: # The reference implementation directly computes the camera neutral by # multiplying directly the interpolated colour matrix :math:`CM` with # the tristimulus values of the *xy* white balance chromaticity # coordinates. # The current implementation is based on the white-paper so that the # interpolated camera calibration matrix :math:`CC` and the # analog balance matrix :math:`AB` are accounted for. camera_neutral = xy_to_camera_neutral( xy, CCT_calibration_illuminant_1, CCT_calibration_illuminant_2, M_color_matrix_1, M_color_matrix_2, M_camera_calibration_1, M_camera_calibration_2, analog_balance) M_AB = np.diagflat(analog_balance) M_reference_neutral = dot_vector(np.linalg.inv(dot_matrix(M_AB, M_CC)), camera_neutral) M_D = np.linalg.inv(np.diagflat(M_reference_neutral)) M_FM = interpolated_matrix(CCT, CCT_calibration_illuminant_1, CCT_calibration_illuminant_2, M_forward_matrix_1, M_forward_matrix_2) M_camera_space_to_XYZ = dot_matrix( dot_matrix(M_FM, M_D), np.linalg.inv(dot_matrix(M_AB, M_CC))) return M_camera_space_to_XYZ