예제 #1
0
    def test_as_namedtuple(self):
        """
        Tests :func:`colour.utilities.array.as_namedtuple` definition.
        """

        NamedTuple = namedtuple('NamedTuple', 'a b c')

        a_a = np.ones(3)
        a_b = np.ones(3) + 1
        a_c = np.ones(3) + 2

        named_tuple = NamedTuple(a_a, a_b, a_c)

        self.assertEqual(named_tuple, as_namedtuple(named_tuple, NamedTuple))

        self.assertEqual(
            named_tuple,
            as_namedtuple({
                'a': a_a,
                'b': a_b,
                'c': a_c
            }, NamedTuple))

        self.assertEqual(named_tuple, as_namedtuple([a_a, a_b, a_c],
                                                    NamedTuple))

        a_r = np.array(
            [tuple(a) for a in np.transpose((a_a, a_b, a_c)).tolist()],
            dtype=[(str('a'), str('f8')),
                   (str('b'), str('f8')),
                   (str('c'), str('f8'))])  # yapf: disable
        np.testing.assert_array_equal(
            np.array(named_tuple), np.array(as_namedtuple(a_r, NamedTuple)))
예제 #2
0
    def test_as_namedtuple(self):
        """
        Tests :func:`colour.utilities.array.as_namedtuple` definition.
        """

        NamedTuple = namedtuple('NamedTuple', 'a b c')

        a_a = np.ones(3)
        a_b = np.ones(3) + 1
        a_c = np.ones(3) + 2

        named_tuple = NamedTuple(a_a, a_b, a_c)

        self.assertEqual(named_tuple, as_namedtuple(named_tuple, NamedTuple))

        self.assertEqual(
            named_tuple,
            as_namedtuple({
                'a': a_a,
                'b': a_b,
                'c': a_c
            }, NamedTuple))

        self.assertEqual(named_tuple, as_namedtuple([a_a, a_b, a_c],
                                                    NamedTuple))

        a_r = np.array(
            [tuple(a) for a in np.transpose((a_a, a_b, a_c)).tolist()],
            dtype=[(str('a'), str('f8')),
                   (str('b'), str('f8')),
                   (str('c'), str('f8'))])  # yapf: disable
        np.testing.assert_array_equal(np.array(named_tuple),
                                      np.array(as_namedtuple(a_r, NamedTuple)))
예제 #3
0
    def _XYZ_from_data(self, data, correlates):
        """
        Returns the *CIE XYZ* tristimulus values from given *CAM16* colour
        appearance model input data.

        Parameters
        ----------
        data : list
            Fixture data.
        correlates : array_like
            Correlates used to build the input *CAM16* colour appearance
            model specification.

        Returns
        -------
        array_like
            *CIE XYZ* tristimulus values
        """

        XYZ_w = tstack([data['X_w'], data['Y_w'], data['Z_w']])

        i, j, k = correlates
        CAM16_specification = as_namedtuple(
            {
                i: data[i],
                j: data[j],
                k: data[k]
            }, CAM16_Specification)

        XYZ = CAM16_to_XYZ(
            CAM16_specification, XYZ_w, data['L_A'], data['Y_b'],
            CAM16_InductionFactors(data['F'], data['c'], data['N_c']))

        return XYZ
예제 #4
0
    def _XYZ_from_data(self, data, correlates):
        """
        Returns the *CIE XYZ* tristimulus values from given *CAM16* colour
        appearance model input data.

        Parameters
        ----------
        data : list
            Fixture data.
        correlates : array_like
            Correlates used to build the input *CAM16* colour appearance
            model specification.

        Returns
        -------
        array_like
            *CIE XYZ* tristimulus values
        """

        XYZ_w = tstack([data['X_w'], data['Y_w'], data['Z_w']])

        i, j, k = correlates
        CAM16_specification = as_namedtuple({
            i: data[i],
            j: data[j],
            k: data[k]
        }, CAM16_Specification)

        XYZ = CAM16_to_XYZ(
            CAM16_specification, XYZ_w, data['L_A'], data['Y_b'],
            CAM16_InductionFactors(data['F'], data['c'], data['N_c']))

        return XYZ
예제 #5
0
def CIECAM02_to_XYZ(CIECAM02_specification,
                    XYZ_w,
                    L_A,
                    Y_b,
                    surround=CIECAM02_VIEWING_CONDITIONS['Average'],
                    discount_illuminant=False):
    """
    Converts *CIECAM02* specification to *CIE XYZ* tristimulus values.

    This is the *reverse* implementation.

    Parameters
    ----------
    CIECAM02_specification : CIECAM02_Specification
        *CIECAM02* colour appearance model specification. Correlate of
        *Lightness* :math:`J`, correlate of *chroma* :math:`C` or correlate of
        *colourfulness* :math:`M` and *hue* angle :math:`h` in degrees must be
        specified, e.g. :math:`JCh` or :math:`JMh`.
    XYZ_w : array_like
        *CIE XYZ* tristimulus values of reference white.
    L_A : numeric or array_like
        Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken
        to be 20% of the luminance of a white object in the scene).
    Y_b : numeric or array_like
        Relative luminance of background :math:`Y_b` in :math:`cd/m^2`.
    surround : CIECAM02_InductionFactors, optional
        Surround viewing conditions.
    discount_illuminant : bool, optional
        Discount the illuminant.

    Returns
    -------
    XYZ : ndarray
        *CIE XYZ* tristimulus values.

    Raises
    ------
    ValueError
        If neither *C* or *M* correlates have been defined in the
        ``CIECAM02_specification`` argument.

    Warning
    -------
    The output range of that definition is non standard!

    Notes
    -----

    +------------------------------+-----------------------+---------------+
    | **Domain**                   | **Scale - Reference** | **Scale - 1** |
    +==============================+=======================+===============+
    | ``CIECAM02_specification.h`` | [0, 360]              | [0, 1]        |
    +------------------------------+-----------------------+---------------+
    | ``CIECAM02_specification.H`` | [0, 360]              | [0, 1]        |
    +------------------------------+-----------------------+---------------+
    | ``XYZ_w``                    | [0, 100]              | [0, 1]        |
    +------------------------------+-----------------------+---------------+

    +------------------------------+-----------------------+---------------+
    | **Range**                    | **Scale - Reference** | **Scale - 1** |
    +==============================+=======================+===============+
    | ``XYZ``                      | [0, 100]              | [0, 1]        |
    +------------------------------+-----------------------+---------------+

    -   ``CIECAM02_specification`` can also be passed as a compatible argument
        to :func:`colour.utilities.as_namedtuple` definition.

    References
    ----------
    :cite:`Fairchild2004c`, :cite:`Luo2013`, :cite:`Moroneya`,
    :cite:`Wikipedia2007a`

    Examples
    --------
    >>> specification = CIECAM02_Specification(J=41.731091132513917,
    ...                                        C=0.104707757171031,
    ...                                        h=219.048432658311780)
    >>> XYZ_w = np.array([95.05, 100.00, 108.88])
    >>> L_A = 318.31
    >>> Y_b = 20.0
    >>> CIECAM02_to_XYZ(specification, XYZ_w, L_A, Y_b)  # doctest: +ELLIPSIS
    array([ 19.01...,  20...  ,  21.78...])
    """

    J, C, h, _s, _Q, M, _H, _HC = as_namedtuple(CIECAM02_specification,
                                                CIECAM02_Specification)
    L_A = as_float_array(L_A)

    h = to_domain_degrees(h)
    XYZ_w = to_domain_100(XYZ_w)
    _X_w, Y_w, _Z_w = tsplit(XYZ_w)

    n, F_L, N_bb, N_cb, z = tsplit(
        viewing_condition_dependent_parameters(Y_b, Y_w, L_A))

    if C is None and M is not None:
        C = M / spow(F_L, 0.25)
    elif C is None:
        raise ValueError('Either "C" or "M" correlate must be defined in '
                         'the "CIECAM02_specification" argument!')

    # Converting *CIE XYZ* tristimulus values to *CMCCAT2000* transform
    # sharpened *RGB* values.
    RGB_w = dot_vector(CAT02_CAT, XYZ_w)

    # Computing degree of adaptation :math:`D`.
    D = (degree_of_adaptation(surround.F, L_A)
         if not discount_illuminant else np.ones(L_A.shape))

    # Computing full chromatic adaptation.
    RGB_wc = full_chromatic_adaptation_forward(RGB_w, RGB_w, Y_w, D)

    # Converting to *Hunt-Pointer-Estevez* colourspace.
    RGB_pw = RGB_to_rgb(RGB_wc)

    # Applying post-adaptation non linear response compression.
    RGB_aw = post_adaptation_non_linear_response_compression_forward(
        RGB_pw, F_L)

    # Computing achromatic response for the whitepoint.
    A_w = achromatic_response_forward(RGB_aw, N_bb)

    # Computing temporary magnitude quantity :math:`t`.
    t = temporary_magnitude_quantity_reverse(C, J, n)

    # Computing eccentricity factor *e_t*.
    e_t = eccentricity_factor(h)

    # Computing achromatic response :math:`A` for the stimulus.
    A = achromatic_response_reverse(A_w, J, surround.c, z)

    # Computing *P_1* to *P_3*.
    P_n = P(surround.N_c, N_cb, e_t, t, A, N_bb)
    _P_1, P_2, _P_3 = tsplit(P_n)

    # Computing opponent colour dimensions :math:`a` and :math:`b`.
    a, b = tsplit(opponent_colour_dimensions_reverse(P_n, h))

    # Computing post-adaptation non linear response compression matrix.
    RGB_a = post_adaptation_non_linear_response_compression_matrix(P_2, a, b)

    # Applying reverse post-adaptation non linear response compression.
    RGB_p = post_adaptation_non_linear_response_compression_reverse(RGB_a, F_L)

    # Converting to *Hunt-Pointer-Estevez* colourspace.
    RGB_c = rgb_to_RGB(RGB_p)

    # Applying reverse full chromatic adaptation.
    RGB = full_chromatic_adaptation_reverse(RGB_c, RGB_w, Y_w, D)

    # Converting *CMCCAT2000* transform sharpened *RGB* values to *CIE XYZ*
    # tristimulus values.
    XYZ = dot_vector(CAT02_INVERSE_CAT, RGB)

    return from_range_100(XYZ)
예제 #6
0
파일: cam16.py 프로젝트: yixw/colour
def CAM16_to_XYZ(CAM16_specification,
                 XYZ_w,
                 L_A,
                 Y_b,
                 surround=CAM16_VIEWING_CONDITIONS['Average'],
                 discount_illuminant=False):
    """
    Converts from *CAM16* specification to *CIE XYZ* tristimulus values.

    This is the *inverse* implementation.

    Parameters
    ----------
    CAM16_specification : CAM16_Specification
        *CAM16* colour appearance model specification. Correlate of
        *Lightness* :math:`J`, correlate of *chroma* :math:`C` or correlate of
        *colourfulness* :math:`M` and *hue* angle :math:`h` in degrees must be
        specified, e.g. :math:`JCh` or :math:`JMh`.
    XYZ_w : array_like
        *CIE XYZ* tristimulus values of reference white.
    L_A : numeric or array_like
        Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken
        to be 20% of the luminance of a white object in the scene).
    Y_b : numeric or array_like
        Relative luminance of background :math:`Y_b` in :math:`cd/m^2`.
    surround : CAM16_InductionFactors, optional
        Surround viewing conditions.
    discount_illuminant : bool, optional
        Discount the illuminant.

    Returns
    -------
    XYZ : ndarray
        *CIE XYZ* tristimulus values.

    Raises
    ------
    ValueError
        If neither *C* or *M* correlates have been defined in the
        ``CAM16_specification`` argument.

    Notes
    -----

    +---------------------------+-----------------------+---------------+
    | **Domain**                | **Scale - Reference** | **Scale - 1** |
    +===========================+=======================+===============+
    | ``CAM16_specification.J`` | [0, 100]              | [0, 1]        |
    +---------------------------+-----------------------+---------------+
    | ``CAM16_specification.C`` | [0, 100]              | [0, 1]        |
    +---------------------------+-----------------------+---------------+
    | ``CAM16_specification.h`` | [0, 360]              | [0, 1]        |
    +---------------------------+-----------------------+---------------+
    | ``CAM16_specification.s`` | [0, 100]              | [0, 1]        |
    +---------------------------+-----------------------+---------------+
    | ``CAM16_specification.Q`` | [0, 100]              | [0, 1]        |
    +---------------------------+-----------------------+---------------+
    | ``CAM16_specification.M`` | [0, 100]              | [0, 1]        |
    +---------------------------+-----------------------+---------------+
    | ``CAM16_specification.H`` | [0, 360]              | [0, 1]        |
    +---------------------------+-----------------------+---------------+
    | ``XYZ_w``                 | [0, 100]              | [0, 1]        |
    +---------------------------+-----------------------+---------------+

    +---------------------------+-----------------------+---------------+
    | **Range**                 | **Scale - Reference** | **Scale - 1** |
    +===========================+=======================+===============+
    | ``XYZ``                   | [0, 100]              | [0, 1]        |
    +---------------------------+-----------------------+---------------+

    -   ``CAM16_specification`` can also be passed as a compatible argument
        to :func:`colour.utilities.as_namedtuple` definition.

    References
    ----------
    :cite:`Li2017`

    Examples
    --------
    >>> specification = CAM16_Specification(J=41.731207905126638,
    ...                                     C=0.103355738709070,
    ...                                     h=217.067959767393010)
    >>> XYZ_w = np.array([95.05, 100.00, 108.88])
    >>> L_A = 318.31
    >>> Y_b = 20.0
    >>> CAM16_to_XYZ(specification, XYZ_w, L_A, Y_b)  # doctest: +ELLIPSIS
    array([ 19.01...,  20...  ,  21.78...])
    """

    J, C, h, _s, _Q, M, _H, _HC = as_namedtuple(CAM16_specification,
                                                CAM16_Specification)
    J = to_domain_100(J)
    C = to_domain_100(C) if C is not None else C
    h = to_domain_degrees(h)
    M = to_domain_100(M) if M is not None else M
    L_A = as_float_array(L_A)
    XYZ_w = to_domain_100(XYZ_w)
    _X_w, Y_w, _Z_w = tsplit(XYZ_w)

    # Step 0
    # Converting *CIE XYZ* tristimulus values to sharpened *RGB* values.
    RGB_w = dot_vector(M_16, XYZ_w)

    # Computing degree of adaptation :math:`D`.
    D = (np.clip(degree_of_adaptation(surround.F, L_A), 0, 1)
         if not discount_illuminant else np.ones(L_A.shape))

    n, F_L, N_bb, N_cb, z = tsplit(
        viewing_condition_dependent_parameters(Y_b, Y_w, L_A))

    D_RGB = (D[..., np.newaxis] * Y_w[..., np.newaxis] / RGB_w + 1 -
             D[..., np.newaxis])
    RGB_wc = D_RGB * RGB_w

    # Applying forward post-adaptation non linear response compression.
    RGB_aw = post_adaptation_non_linear_response_compression_forward(
        RGB_wc, F_L)

    # Computing achromatic responses for the whitepoint.
    A_w = achromatic_response_forward(RGB_aw, N_bb)

    # Step 1
    if C is None and M is not None:
        C = M / spow(F_L, 0.25)
    elif C is None:
        raise ValueError('Either "C" or "M" correlate must be defined in '
                         'the "CAM16_specification" argument!')

    # Step 2
    # Computing temporary magnitude quantity :math:`t`.
    t = temporary_magnitude_quantity_inverse(C, J, n)

    # Computing eccentricity factor *e_t*.
    e_t = eccentricity_factor(h)

    # Computing achromatic response :math:`A` for the stimulus.
    A = achromatic_response_inverse(A_w, J, surround.c, z)

    # Computing *P_1* to *P_3*.
    P_n = P(surround.N_c, N_cb, e_t, t, A, N_bb)
    _P_1, P_2, _P_3 = tsplit(P_n)

    # Step 3
    # Computing opponent colour dimensions :math:`a` and :math:`b`.
    a, b = tsplit(opponent_colour_dimensions_inverse(P_n, h))

    # Step 4
    # Computing post-adaptation non linear response compression matrix.
    RGB_a = post_adaptation_non_linear_response_compression_matrix(P_2, a, b)

    # Step 5
    # Applying inverse post-adaptation non linear response compression.
    RGB_c = post_adaptation_non_linear_response_compression_inverse(RGB_a, F_L)

    # Step 6
    RGB = RGB_c / D_RGB

    # Step 7
    XYZ = dot_vector(M_16_INVERSE, RGB)

    return from_range_100(XYZ)
예제 #7
0
def CAM16_to_XYZ(CAM16_specification,
                 XYZ_w,
                 L_A,
                 Y_b,
                 surround=CAM16_VIEWING_CONDITIONS['Average'],
                 discount_illuminant=False):
    """
    Converts *CAM16* specification to *CIE XYZ* tristimulus values.

    This is the *reverse* implementation.

    Parameters
    ----------
    CAM16_specification : CAM16_Specification
        *CAM16* colour appearance model specification. Correlate of
        *Lightness* :math:`J`, correlate of *chroma* :math:`C` or correlate of
        *colourfulness* :math:`M` and *hue* angle :math:`h` in degrees must be
        specified, e.g. :math:`JCh` or :math:`JMh`.
    XYZ_w : array_like
        *CIE XYZ* tristimulus values of reference white.
    L_A : numeric or array_like
        Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken
        to be 20% of the luminance of a white object in the scene).
    Y_b : numeric or array_like
        Relative luminance of background :math:`Y_b` in :math:`cd/m^2`.
    surround : CAM16_InductionFactors, optional
        Surround viewing conditions.
    discount_illuminant : bool, optional
        Discount the illuminant.

    Returns
    -------
    XYZ : ndarray
        *CIE XYZ* tristimulus values.

    Raises
    ------
    ValueError
        If neither *C* or *M* correlates have been defined in the
        ``CAM16_specification`` argument.

    Notes
    -----

    +---------------------------+-----------------------+---------------+
    | **Domain**                | **Scale - Reference** | **Scale - 1** |
    +===========================+=======================+===============+
    | ``CAM16_specification.h`` | [0, 360]              | [0, 1]        |
    +---------------------------+-----------------------+---------------+
    | ``CAM16_specification.H`` | [0, 360]              | [0, 1]        |
    +---------------------------+-----------------------+---------------+
    | ``XYZ_w``                 | [0, 100]              | [0, 1]        |
    +---------------------------+-----------------------+---------------+

    +---------------------------+-----------------------+---------------+
    | **Range**                 | **Scale - Reference** | **Scale - 1** |
    +===========================+=======================+===============+
    | ``XYZ``                   | [0, 100]              | [0, 1]        |
    +---------------------------+-----------------------+---------------+

    -   ``CAM16_specification`` can also be passed as a compatible argument
        to :func:`colour.utilities.as_namedtuple` definition.

    References
    ----------
    :cite:`Li2017`

    Examples
    --------
    >>> specification = CAM16_Specification(J=41.731207905126638,
    ...                                     C=0.103355738709070,
    ...                                     h=217.067959767393010)
    >>> XYZ_w = np.array([95.05, 100.00, 108.88])
    >>> L_A = 318.31
    >>> Y_b = 20.0
    >>> CAM16_to_XYZ(specification, XYZ_w, L_A, Y_b)  # doctest: +ELLIPSIS
    array([ 19.01...,  20...  ,  21.78...])
    """

    J, C, h, _s, _Q, M, _H, _HC = as_namedtuple(CAM16_specification,
                                                CAM16_Specification)
    L_A = as_float_array(L_A)

    h = to_domain_degrees(h)
    XYZ_w = to_domain_100(XYZ_w)
    _X_w, Y_w, _Z_w = tsplit(XYZ_w)

    # Step 0
    # Converting *CIE XYZ* tristimulus values to sharpened *RGB* values.
    RGB_w = dot_vector(M_16, XYZ_w)

    # Computing degree of adaptation :math:`D`.
    D = (np.clip(degree_of_adaptation(surround.F, L_A), 0, 1)
         if not discount_illuminant else np.ones(L_A.shape))

    n, F_L, N_bb, N_cb, z = tsplit(
        viewing_condition_dependent_parameters(Y_b, Y_w, L_A))

    D_RGB = (D[..., np.newaxis] * Y_w[..., np.newaxis] / RGB_w + 1 -
             D[..., np.newaxis])
    RGB_wc = D_RGB * RGB_w

    # Applying forward post-adaptation non linear response compression.
    RGB_aw = post_adaptation_non_linear_response_compression_forward(
        RGB_wc, F_L)

    # Computing achromatic responses for the whitepoint.
    A_w = achromatic_response_forward(RGB_aw, N_bb)

    # Step 1
    if C is None and M is not None:
        C = M / spow(F_L, 0.25)
    elif C is None:
        raise ValueError('Either "C" or "M" correlate must be defined in '
                         'the "CAM16_specification" argument!')

    # Step 2
    # Computing temporary magnitude quantity :math:`t`.
    t = temporary_magnitude_quantity_reverse(C, J, n)

    # Computing eccentricity factor *e_t*.
    e_t = eccentricity_factor(h)

    # Computing achromatic response :math:`A` for the stimulus.
    A = achromatic_response_reverse(A_w, J, surround.c, z)

    # Computing *P_1* to *P_3*.
    P_n = P(surround.N_c, N_cb, e_t, t, A, N_bb)
    _P_1, P_2, _P_3 = tsplit(P_n)

    # Step 3
    # Computing opponent colour dimensions :math:`a` and :math:`b`.
    a, b = tsplit(opponent_colour_dimensions_reverse(P_n, h))

    # Step 4
    # Computing post-adaptation non linear response compression matrix.
    RGB_a = post_adaptation_non_linear_response_compression_matrix(P_2, a, b)

    # Step 5
    # Applying reverse post-adaptation non linear response compression.
    RGB_c = post_adaptation_non_linear_response_compression_reverse(RGB_a, F_L)

    # Step 6
    RGB = RGB_c / D_RGB

    # Step 7
    XYZ = dot_vector(M_16_INVERSE, RGB)

    return from_range_100(XYZ)
예제 #8
0
파일: cam16.py 프로젝트: ajun73/Work_Code
def CAM16_to_XYZ(CAM16_specification,
                 XYZ_w,
                 L_A,
                 Y_b,
                 surround=CAM16_VIEWING_CONDITIONS['Average'],
                 discount_illuminant=False):
    """
    Converts *CAM16* specification to *CIE XYZ* tristimulus values.

    This is the *reverse* implementation.

    Parameters
    ----------
    CAM16_specification : CAM16_Specification
        *CAM16* colour appearance model specification. Correlate of
        *Lightness* :math:`J`, correlate of *chroma* :math:`C` or correlate of
        *colourfulness* :math:`M` and *hue* angle :math:`h` in degrees must be
        specified, e.g. :math:`JCh` or :math:`JMh`.
    XYZ_w : array_like
        *CIE XYZ* tristimulus values of reference white.
    L_A : numeric or array_like
        Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken
        to be 20% of the luminance of a white object in the scene).
    Y_b : numeric or array_like
        Relative luminance of background :math:`Y_b` in :math:`cd/m^2`.
    surround : CAM16_InductionFactors, optional
        Surround viewing conditions.
    discount_illuminant : bool, optional
        Discount the illuminant.

    Returns
    -------
    XYZ : ndarray
        *CIE XYZ* tristimulus values.

    Raises
    ------
    ValueError
        If neither *C* or *M* correlates have been defined in the
        ``CAM16_specification`` argument.

    Warning
    -------
    The output range of that definition is non standard!

    Notes
    -----
    -   ``CAM16_specification`` can also be passed as a compatible argument
        :func:`colour.utilities.as_namedtuple` definition.
    -   Input *CIE XYZ_w* tristimulus values are in domain [0, 100].
    -   Output *CIE XYZ* tristimulus values are in range [0, 100].

    References
    ----------
    -   :cite:`Li2017`

    Examples
    --------
    >>> specification = CAM16_Specification(J=41.718025051415616,
    ...                                     C=11.941344635245843,
    ...                                     h=210.38389558131118)
    >>> XYZ_w = np.array([95.05, 100.00, 108.88])
    >>> L_A = 318.31
    >>> Y_b = 20.0
    >>> CAM16_to_XYZ(specification, XYZ_w, L_A, Y_b)  # doctest: +ELLIPSIS
    array([ 19.01...,  20...  ,  21.78...])
    """

    J, C, h, _s, _Q, M, _H, _HC = as_namedtuple(CAM16_specification,
                                                CAM16_Specification)

    _X_w, Y_w, _Zw = tsplit(XYZ_w)

    # Step 0
    # Converting *CIE XYZ* tristimulus values to sharpened *RGB* values.
    RGB_w = dot_vector(M_16, XYZ_w)

    # Computing degree of adaptation :math:`D`.
    D = (np.clip(degree_of_adaptation(surround.F, L_A), 0, 1)
         if not discount_illuminant else 1)

    n, F_L, N_bb, N_cb, z = tsplit(
        viewing_condition_dependent_parameters(Y_b, Y_w, L_A))

    D_RGB = D[..., np.newaxis] * XYZ_w / RGB_w + 1 - D[..., np.newaxis]
    RGB_wc = D_RGB * RGB_w

    # Applying forward post-adaptation non linear response compression.
    RGB_aw = post_adaptation_non_linear_response_compression_forward(
        RGB_wc, F_L)

    # Computing achromatic responses for the whitepoint.
    A_w = achromatic_response_forward(RGB_aw, N_bb)

    # Step 1
    if C is None and M is not None:
        C = M / F_L**0.25
    elif C is None:
        raise ValueError('Either "C" or "M" correlate must be defined in '
                         'the "CAM16_specification" argument!')

    # Step 2
    # Computing temporary magnitude quantity :math:`t`.
    t = temporary_magnitude_quantity_reverse(C, J, n)

    # Computing eccentricity factor *e_t*.
    e_t = eccentricity_factor(h)

    # Computing achromatic response :math:`A` for the stimulus.
    A = achromatic_response_reverse(A_w, J, surround.c, z)

    # Computing *P_1* to *P_3*.
    P_n = P(surround.N_c, N_cb, e_t, t, A, N_bb)
    _P_1, P_2, _P_3 = tsplit(P_n)

    # Step 3
    # Computing opponent colour dimensions :math:`a` and :math:`b`.
    a, b = tsplit(opponent_colour_dimensions_reverse(P_n, h))

    # Step 4
    # Computing post-adaptation non linear response compression matrix.
    RGB_a = post_adaptation_non_linear_response_compression_matrix(P_2, a, b)

    # Step 5
    # Applying reverse post-adaptation non linear response compression.
    RGB_c = post_adaptation_non_linear_response_compression_reverse(RGB_a, F_L)

    # Step 6
    RGB = RGB_c / D_RGB

    # Step 7
    XYZ = dot_vector(M_16_INVERSE, RGB)

    return XYZ
예제 #9
0
def CIECAM02_to_XYZ(CIECAM02_specification,
                    XYZ_w,
                    L_A,
                    Y_b,
                    surround=CIECAM02_VIEWING_CONDITIONS['Average'],
                    discount_illuminant=False):
    """
    Converts *CIECAM02* specification to *CIE XYZ* tristimulus values.

    This is the *reverse* implementation.

    Parameters
    ----------
    CIECAM02_specification : CIECAM02_Specification
        *CIECAM02* colour appearance model specification. Correlate of
        *Lightness* :math:`J`, correlate of *chroma* :math:`C` or correlate of
        *colourfulness* :math:`M` and *hue* angle :math:`h` in degrees must be
        specified, e.g. :math:`JCh` or :math:`JMh`.
    XYZ_w : array_like
        *CIE XYZ* tristimulus values of reference white.
    L_A : numeric or array_like
        Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken
        to be 20% of the luminance of a white object in the scene).
    Y_b : numeric or array_like
        Relative luminance of background :math:`Y_b` in :math:`cd/m^2`.
    surround : CIECAM02_InductionFactors, optional
        Surround viewing conditions.
    discount_illuminant : bool, optional
        Discount the illuminant.

    Returns
    -------
    XYZ : ndarray
        *CIE XYZ* tristimulus values.

    Raises
    ------
    ValueError
        If neither *C* or *M* correlates have been defined in the
        ``CIECAM02_specification`` argument.

    Warning
    -------
    The output range of that definition is non standard!

    Notes
    -----

    +------------------------------+-----------------------+---------------+
    | **Domain**                   | **Scale - Reference** | **Scale - 1** |
    +==============================+=======================+===============+
    | ``CIECAM02_specification.h`` | [0, 360]              | [0, 1]        |
    +------------------------------+-----------------------+---------------+
    | ``CIECAM02_specification.H`` | [0, 360]              | [0, 1]        |
    +------------------------------+-----------------------+---------------+
    | ``XYZ_w``                    | [0, 100]              | [0, 1]        |
    +------------------------------+-----------------------+---------------+

    +------------------------------+-----------------------+---------------+
    | **Range**                    | **Scale - Reference** | **Scale - 1** |
    +==============================+=======================+===============+
    | ``XYZ``                      | [0, 100]              | [0, 1]        |
    +------------------------------+-----------------------+---------------+

    -   ``CIECAM02_specification`` can also be passed as a compatible argument
        to :func:`colour.utilities.as_namedtuple` definition.

    References
    ----------
    :cite:`Fairchild2004c`, :cite:`Luo2013`, :cite:`Moroneya`,
    :cite:`Wikipedia2007a`

    Examples
    --------
    >>> specification = CIECAM02_Specification(J=41.731091132513917,
    ...                                        C=0.104707757171031,
    ...                                        h=219.048432658311780)
    >>> XYZ_w = np.array([95.05, 100.00, 108.88])
    >>> L_A = 318.31
    >>> Y_b = 20.0
    >>> CIECAM02_to_XYZ(specification, XYZ_w, L_A, Y_b)  # doctest: +ELLIPSIS
    array([ 19.01...,  20...  ,  21.78...])
    """

    J, C, h, _s, _Q, M, _H, _HC = as_namedtuple(CIECAM02_specification,
                                                CIECAM02_Specification)
    L_A = as_float_array(L_A)

    h = to_domain_degrees(h)
    XYZ_w = to_domain_100(XYZ_w)
    _X_w, Y_w, _Z_w = tsplit(XYZ_w)

    n, F_L, N_bb, N_cb, z = tsplit(
        viewing_condition_dependent_parameters(Y_b, Y_w, L_A))

    if C is None and M is not None:
        C = M / spow(F_L, 0.25)
    elif C is None:
        raise ValueError('Either "C" or "M" correlate must be defined in '
                         'the "CIECAM02_specification" argument!')

    # Converting *CIE XYZ* tristimulus values to *CMCCAT2000* transform
    # sharpened *RGB* values.
    RGB_w = dot_vector(CAT02_CAT, XYZ_w)

    # Computing degree of adaptation :math:`D`.
    D = (degree_of_adaptation(surround.F, L_A)
         if not discount_illuminant else np.ones(L_A.shape))

    # Computing full chromatic adaptation.
    RGB_wc = full_chromatic_adaptation_forward(RGB_w, RGB_w, Y_w, D)

    # Converting to *Hunt-Pointer-Estevez* colourspace.
    RGB_pw = RGB_to_rgb(RGB_wc)

    # Applying post-adaptation non linear response compression.
    RGB_aw = post_adaptation_non_linear_response_compression_forward(
        RGB_pw, F_L)

    # Computing achromatic response for the whitepoint.
    A_w = achromatic_response_forward(RGB_aw, N_bb)

    # Computing temporary magnitude quantity :math:`t`.
    t = temporary_magnitude_quantity_reverse(C, J, n)

    # Computing eccentricity factor *e_t*.
    e_t = eccentricity_factor(h)

    # Computing achromatic response :math:`A` for the stimulus.
    A = achromatic_response_reverse(A_w, J, surround.c, z)

    # Computing *P_1* to *P_3*.
    P_n = P(surround.N_c, N_cb, e_t, t, A, N_bb)
    _P_1, P_2, _P_3 = tsplit(P_n)

    # Computing opponent colour dimensions :math:`a` and :math:`b`.
    a, b = tsplit(opponent_colour_dimensions_reverse(P_n, h))

    # Computing post-adaptation non linear response compression matrix.
    RGB_a = post_adaptation_non_linear_response_compression_matrix(P_2, a, b)

    # Applying reverse post-adaptation non linear response compression.
    RGB_p = post_adaptation_non_linear_response_compression_reverse(RGB_a, F_L)

    # Converting to *Hunt-Pointer-Estevez* colourspace.
    RGB_c = rgb_to_RGB(RGB_p)

    # Applying reverse full chromatic adaptation.
    RGB = full_chromatic_adaptation_reverse(RGB_c, RGB_w, Y_w, D)

    # Converting *CMCCAT2000* transform sharpened *RGB* values to *CIE XYZ*
    # tristimulus values.
    XYZ = dot_vector(CAT02_INVERSE_CAT, RGB)

    return from_range_100(XYZ)