Ejemplo n.º 1
0
def projective_transformation(a: ArrayLike, Q: ArrayLike) -> NDArray:
    """
    Transform given array :math:`a` with the projective transformation matrix
    :math:`Q`.

    Parameters
    ----------
    a
        Array :math:`a` to apply the projective transformation matrix onto.
    Q
        Projective transformation matrix :math:`Q`.

    Returns
    -------
    :class:`numpy.ndarray`
        Transformed array :math:`a`.
    """

    a = as_float_array(a)
    Q = as_float_array(Q)

    shape = list(a.shape)
    shape[-1] = shape[-1] + 1

    M = ones(tuple(shape))
    M[..., :-1] = a

    homography = np.dot(M, np.transpose(Q))
    homography[..., 0:-1] /= homography[..., -1][..., np.newaxis]

    return homography[..., 0:-1]
Ejemplo n.º 2
0
    def test_derivatives(self):
        """
        Tests the gradients computed using closed-form expressions of the
        derivatives with finite difference approximations.
        """

        samples = np.linspace(-10, 10, 1000)
        ds = samples[1] - samples[0]

        # Vary one coefficient at a time, keeping the others fixed to 1.
        for coefficient_i in range(3):
            errors = np.empty_like(samples)
            derrors = np.empty_like(samples)

            for i, sample in enumerate(samples):
                coefficients = ones(3)
                coefficients[coefficient_i] = sample

                error, derror = error_function(coefficients, self._Lab_e,
                                               self._cmfs, self._sd_D65)

                errors[i] = error
                derrors[i] = derror[coefficient_i]

            staggered_derrors = (derrors[:-1] + derrors[1:]) / 2
            approximate_derrors = np.diff(errors) / ds

            # The approximated derivatives aren't too accurate, so tolerances
            # have to be rather loose.
            np.testing.assert_allclose(staggered_derrors,
                                       approximate_derrors,
                                       atol=1e-3,
                                       rtol=1e-2)
Ejemplo n.º 3
0
def P(
    N_c: FloatingOrArrayLike,
    N_cb: FloatingOrArrayLike,
    e_t: FloatingOrArrayLike,
    t: FloatingOrArrayLike,
    A: FloatingOrArrayLike,
    N_bb: FloatingOrArrayLike,
) -> NDArray:
    """
    Return the points :math:`P_1`, :math:`P_2` and :math:`P_3`.

    Parameters
    ----------
    N_c
        Surround chromatic induction factor :math:`N_{c}`.
    N_cb
        Chromatic induction factor :math:`N_{cb}`.
    e_t
        Eccentricity factor :math:`e_t`.
    t
        Temporary magnitude quantity :math:`t`.
    A
        Achromatic response  :math:`A` for the stimulus.
    N_bb
        Chromatic induction factor :math:`N_{bb}`.

    Returns
    -------
    :class:`numpy.ndarray`
        Points :math:`P`.

    Examples
    --------
    >>> N_c = 1.0
    >>> N_cb = 1.00030400456
    >>> e_t = 1.174005472851914
    >>> t = 0.149746202921
    >>> A = 23.9394809667
    >>> N_bb = 1.00030400456
    >>> P(N_c, N_cb, e_t, t, A, N_bb)  # doctest: +ELLIPSIS
    array([  3.0162890...e+04,   2.4237205...e+01,   1.0500000...e+00])
    """

    N_c = as_float_array(N_c)
    N_cb = as_float_array(N_cb)
    e_t = as_float_array(e_t)
    t = as_float_array(t)
    A = as_float_array(A)
    N_bb = as_float_array(N_bb)

    P_1 = ((50000 / 13) * N_c * N_cb * e_t) / t
    P_2 = A / N_bb + 0.305
    P_3 = ones(P_1.shape) * (21 / 20)

    P_n = tstack([P_1, P_2, P_3])

    return P_n
Ejemplo n.º 4
0
    def __init__(
        self,
        matrix: Optional[ArrayLike] = None,
        offset: Optional[ArrayLike] = None,
        *args: Any,
        **kwargs: Any,
    ):
        super().__init__(*args, **kwargs)

        # TODO: Remove pragma when https://github.com/python/mypy/issues/3004
        # is resolved.
        self._matrix: NDArray = np.diag(ones(4))
        self.matrix = cast(ArrayLike,
                           optional(matrix,
                                    self._matrix))  # type: ignore[assignment]
        self._offset: NDArray = zeros(4)
        self.offset = cast(ArrayLike,
                           optional(offset,
                                    self._offset))  # type: ignore[assignment]
Ejemplo n.º 5
0
def primaries_whitepoint(npm):
    """
    Computes the *primaries* and *whitepoint* :math:`xy` chromaticity
    coordinates using given *Normalised Primary Matrix* (NPM).

    Parameters
    ----------
    npm : array_like, (3, 3)
        *Normalised Primary Matrix*.

    Returns
    -------
    tuple
        *Primaries* and *whitepoint* :math:`xy` chromaticity coordinates.

    References
    ----------
    :cite:`Trieu2015a`

    Examples
    --------
    >>> npm = np.array([[9.52552396e-01, 0.00000000e+00, 9.36786317e-05],
    ...                 [3.43966450e-01, 7.28166097e-01, -7.21325464e-02],
    ...                 [0.00000000e+00, 0.00000000e+00, 1.00882518e+00]])
    >>> p, w = primaries_whitepoint(npm)
    >>> p  # doctest: +ELLIPSIS
    array([[  7.3470000...e-01,   2.6530000...e-01],
           [  0.0000000...e+00,   1.0000000...e+00],
           [  1.0000000...e-04,  -7.7000000...e-02]])
    >>> w # doctest: +ELLIPSIS
    array([ 0.32168,  0.33767])
    """

    npm = npm.reshape([3, 3])

    primaries = XYZ_to_xy(np.transpose(np.dot(npm, np.identity(3))))
    whitepoint = np.squeeze(XYZ_to_xy(np.transpose(np.dot(npm, ones([3, 1])))))

    # TODO: Investigate if we return an ndarray here with primaries and
    # whitepoint stacked together.
    return primaries, whitepoint
Ejemplo n.º 6
0
    def test_LUT3D_Jakob2019(self):
        """
        Tests the entirety of the
        :class:`colour.recovery.jakob2019.LUT3D_Jakob2019`class.
        """

        LUT = LUT3D_Jakob2019()
        LUT.generate(self._RGB_colourspace, self._cmfs, self._sd_D65, 5)

        path = os.path.join(self._temporary_directory, 'Test_Jakob2019.coeff')

        LUT.write(path)
        LUT.read(path)

        for RGB in [
                np.array([1, 0, 0]),
                np.array([0, 1, 0]),
                np.array([0, 0, 1]),
                zeros(3),
                full(3, 0.5),
                ones(3),
        ]:
            XYZ = RGB_to_XYZ(RGB, self._RGB_colourspace.whitepoint,
                             self._xy_D65,
                             self._RGB_colourspace.matrix_RGB_to_XYZ)
            Lab = XYZ_to_Lab(XYZ, self._xy_D65)

            recovered_sd = LUT.RGB_to_sd(RGB)
            recovered_XYZ = sd_to_XYZ(recovered_sd, self._cmfs,
                                      self._sd_D65) / 100
            recovered_Lab = XYZ_to_Lab(recovered_XYZ, self._xy_D65)

            error = delta_E_CIE1976(Lab, recovered_Lab)

            if error > 2 * JND_CIE1976 / 100:
                self.fail(
                    'Delta E for RGB={0} in colourspace {1} is {2}!'.format(
                        RGB, self._RGB_colourspace.name, error))
Ejemplo n.º 7
0
def matrix_augmented_Cheung2004(RGB, terms=3):
    """
    Performs polynomial expansion of given *RGB* colourspace array using
    *Cheung et al. (2004)* method.

    Parameters
    ----------
    RGB : array_like
        *RGB* colourspace array to expand.
    terms : int, optional
        Number of terms of the expanded polynomial, must be one of
        *[3, 5, 7, 8, 10, 11, 14, 16, 17, 19, 20, 22]*.

    Returns
    -------
    ndarray
        Expanded *RGB* colourspace array.

    Notes
    -----
    -   This definition combines the augmented matrices given in
        :cite:`Cheung2004` and :cite:`Westland2004`.

    References
    ----------
    :cite:`Cheung2004`, :cite:`Westland2004`

    Examples
    --------
    >>> RGB = np.array([0.17224810, 0.09170660, 0.06416938])
    >>> matrix_augmented_Cheung2004(RGB, terms=5)  # doctest: +ELLIPSIS
    array([ 0.1722481...,  0.0917066...,  0.0641693...,  0.0010136...,  1...])
    """

    R, G, B = tsplit(RGB)
    tail = ones(R.shape)

    existing_terms = np.array([3, 5, 7, 8, 10, 11, 14, 16, 17, 19, 20, 22])
    closest_terms = as_int(closest(existing_terms, terms))
    if closest_terms != terms:
        raise ValueError('"Cheung et al. (2004)" method does not define '
                         'an augmented matrix with {0} terms, '
                         'closest augmented matrix has {1} terms!'.format(
                             terms, closest_terms))

    if terms == 3:
        return RGB
    elif terms == 5:
        return tstack([
            R,
            G,
            B,
            R * G * B,
            tail,
        ])
    elif terms == 7:
        return tstack([
            R,
            G,
            B,
            R * G,
            R * B,
            G * B,
            tail,
        ])
    elif terms == 8:
        return tstack([
            R,
            G,
            B,
            R * G,
            R * B,
            G * B,
            R * G * B,
            tail,
        ])
    elif terms == 10:
        return tstack([
            R,
            G,
            B,
            R * G,
            R * B,
            G * B,
            R**2,
            G**2,
            B**2,
            tail,
        ])
    elif terms == 11:
        return tstack([
            R,
            G,
            B,
            R * G,
            R * B,
            G * B,
            R**2,
            G**2,
            B**2,
            R * G * B,
            tail,
        ])
    elif terms == 14:
        return tstack([
            R,
            G,
            B,
            R * G,
            R * B,
            G * B,
            R**2,
            G**2,
            B**2,
            R * G * B,
            R**3,
            G**3,
            B**3,
            tail,
        ])
    elif terms == 16:
        return tstack([
            R,
            G,
            B,
            R * G,
            R * B,
            G * B,
            R**2,
            G**2,
            B**2,
            R * G * B,
            R**2 * G,
            G**2 * B,
            B**2 * R,
            R**3,
            G**3,
            B**3,
        ])
    elif terms == 17:
        return tstack([
            R,
            G,
            B,
            R * G,
            R * B,
            G * B,
            R**2,
            G**2,
            B**2,
            R * G * B,
            R**2 * G,
            G**2 * B,
            B**2 * R,
            R**3,
            G**3,
            B**3,
            tail,
        ])
    elif terms == 19:
        return tstack([
            R,
            G,
            B,
            R * G,
            R * B,
            G * B,
            R**2,
            G**2,
            B**2,
            R * G * B,
            R**2 * G,
            G**2 * B,
            B**2 * R,
            R**2 * B,
            G**2 * R,
            B**2 * G,
            R**3,
            G**3,
            B**3,
        ])
    elif terms == 20:
        return tstack([
            R,
            G,
            B,
            R * G,
            R * B,
            G * B,
            R**2,
            G**2,
            B**2,
            R * G * B,
            R**2 * G,
            G**2 * B,
            B**2 * R,
            R**2 * B,
            G**2 * R,
            B**2 * G,
            R**3,
            G**3,
            B**3,
            tail,
        ])
    elif terms == 22:
        return tstack([
            R,
            G,
            B,
            R * G,
            R * B,
            G * B,
            R**2,
            G**2,
            B**2,
            R * G * B,
            R**2 * G,
            G**2 * B,
            B**2 * R,
            R**2 * B,
            G**2 * R,
            B**2 * G,
            R**3,
            G**3,
            B**3,
            R**2 * G * B,
            R * G**2 * B,
            R * G * B**2,
        ])
Ejemplo n.º 8
0
def XYZ_to_Kim2009(
    XYZ: ArrayLike,
    XYZ_w: ArrayLike,
    L_A: FloatingOrArrayLike,
    media: MediaParameters_Kim2009 = MEDIA_PARAMETERS_KIM2009["CRT Displays"],
    surround: InductionFactors_Kim2009 = VIEWING_CONDITIONS_KIM2009["Average"],
    discount_illuminant: Boolean = False,
    n_c: Floating = 0.57,
) -> CAM_Specification_Kim2009:
    """
    Compute the *Kim, Weyrich and Kautz (2009)* colour appearance model
    correlates from given *CIE XYZ* tristimulus values.

    Parameters
    ----------
    XYZ
        *CIE XYZ* tristimulus values of test sample / stimulus.
    XYZ_w
        *CIE XYZ* tristimulus values of reference white.
    L_A
        Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken
        to be 20% of the luminance of a white object in the scene).
    media
        Media parameters.
    surround
        Surround viewing conditions induction factors.
    discount_illuminant
        Truth value indicating if the illuminant should be discounted.
    n_c
        Cone response sigmoidal curve modulating factor :math:`n_c`.

    Returns
    -------
    :class:`colour.CAM_Specification_Kim2009`
       *Kim, Weyrich and Kautz (2009)* colour appearance model specification.

    Notes
    -----
    +------------+-----------------------+---------------+
    | **Domain** | **Scale - Reference** | **Scale - 1** |
    +============+=======================+===============+
    | ``XYZ``    | [0, 100]              | [0, 1]        |
    +------------+-----------------------+---------------+
    | ``XYZ_w``  | [0, 100]              | [0, 1]        |
    +------------+-----------------------+---------------+

    +---------------------------------+-----------------------+---------------+
    | **Range**                       | **Scale - Reference** | **Scale - 1** |
    +=================================+=======================+===============+
    | ``CAM_Specification_Kim2009.J`` | [0, 100]              | [0, 1]        |
    +---------------------------------+-----------------------+---------------+
    | ``CAM_Specification_Kim2009.C`` | [0, 100]              | [0, 1]        |
    +---------------------------------+-----------------------+---------------+
    | ``CAM_Specification_Kim2009.h`` | [0, 360]              | [0, 1]        |
    +---------------------------------+-----------------------+---------------+
    | ``CAM_Specification_Kim2009.s`` | [0, 100]              | [0, 1]        |
    +---------------------------------+-----------------------+---------------+
    | ``CAM_Specification_Kim2009.Q`` | [0, 100]              | [0, 1]        |
    +---------------------------------+-----------------------+---------------+
    | ``CAM_Specification_Kim2009.M`` | [0, 100]              | [0, 1]        |
    +---------------------------------+-----------------------+---------------+
    | ``CAM_Specification_Kim2009.H`` | [0, 400]              | [0, 1]        |
    +---------------------------------+-----------------------+---------------+

    References
    ----------
    :cite:`Kim2009`

    Examples
    --------
    >>> XYZ = np.array([19.01, 20.00, 21.78])
    >>> XYZ_w = np.array([95.05, 100.00, 108.88])
    >>> L_A = 318.31
    >>> media = MEDIA_PARAMETERS_KIM2009['CRT Displays']
    >>> surround = VIEWING_CONDITIONS_KIM2009['Average']
    >>> XYZ_to_Kim2009(XYZ, XYZ_w, L_A, media, surround)
    ... # doctest: +ELLIPSIS
    CAM_Specification_Kim2009(J=28.8619089..., C=0.5592455..., \
h=219.0480667..., s=9.3837797..., Q=52.7138883..., M=0.4641738..., \
H=278.0602824..., HC=None)
    """

    XYZ = to_domain_100(XYZ)
    XYZ_w = to_domain_100(XYZ_w)
    _X_w, Y_w, _Z_w = tsplit(XYZ_w)
    L_A = as_float_array(L_A)

    # Converting *CIE XYZ* tristimulus values to *CMCCAT2000* transform
    # sharpened *RGB* values.
    RGB = vector_dot(CAT_CAT02, XYZ)
    RGB_w = vector_dot(CAT_CAT02, XYZ_w)

    # Computing degree of adaptation :math:`D`.
    D = (
        degree_of_adaptation(surround.F, L_A)
        if not discount_illuminant
        else ones(L_A.shape)
    )

    # Computing full chromatic adaptation.
    XYZ_c = full_chromatic_adaptation_forward(RGB, RGB_w, Y_w, D)
    XYZ_wc = full_chromatic_adaptation_forward(RGB_w, RGB_w, Y_w, D)

    # Converting to *Hunt-Pointer-Estevez* colourspace.
    LMS = RGB_to_rgb(XYZ_c)
    LMS_w = RGB_to_rgb(XYZ_wc)

    # Cones absolute response.
    LMS_n_c = spow(LMS, n_c)
    LMS_w_n_c = spow(LMS_w, n_c)
    L_A_n_c = spow(L_A, n_c)
    LMS_p = LMS_n_c / (LMS_n_c + L_A_n_c)
    LMS_wp = LMS_w_n_c / (LMS_w_n_c + L_A_n_c)

    # Achromatic signal :math:`A` and :math:`A_w`.
    v_A = np.array([40, 20, 1])
    A = np.sum(v_A * LMS_p, axis=-1) / 61
    A_w = np.sum(v_A * LMS_wp, axis=-1) / 61

    # Perceived *Lightness* :math:`J_p`.
    a_j, b_j, o_j, n_j = 0.89, 0.24, 0.65, 3.65
    A_A_w = A / A_w
    J_p = spow(
        (-(A_A_w - b_j) * spow(o_j, n_j)) / (A_A_w - b_j - a_j), 1 / n_j
    )

    # Computing the media dependent *Lightness* :math:`J`.
    J = 100 * (media.E * (J_p - 1) + 1)

    # Computing the correlate of *brightness* :math:`Q`.
    n_q = 0.1308
    Q = J * spow(Y_w, n_q)

    # Opponent signals :math:`a` and :math:`b`.
    a = (1 / 11) * np.sum(np.array([11, -12, 1]) * LMS_p, axis=-1)
    b = (1 / 9) * np.sum(np.array([1, 1, -2]) * LMS_p, axis=-1)

    # Computing the correlate of *chroma* :math:`C`.
    a_k, n_k = 456.5, 0.62
    C = a_k * spow(np.sqrt(a**2 + b**2), n_k)

    # Computing the correlate of *colourfulness* :math:`M`.
    a_m, b_m = 0.11, 0.61
    M = C * (a_m * np.log10(Y_w) + b_m)

    # Computing the correlate of *saturation* :math:`s`.
    s = 100 * np.sqrt(M / Q)

    # Computing the *hue* angle :math:`h`.
    h = np.degrees(np.arctan2(b, a)) % 360

    # Computing hue :math:`h` quadrature :math:`H`.
    H = hue_quadrature(h)

    return CAM_Specification_Kim2009(
        as_float(from_range_100(J)),
        as_float(from_range_100(C)),
        as_float(from_range_degrees(h)),
        as_float(from_range_100(s)),
        as_float(from_range_100(Q)),
        as_float(from_range_100(M)),
        as_float(from_range_degrees(H, 400)),
        None,
    )
Ejemplo n.º 9
0
def Kim2009_to_XYZ(
    specification: CAM_Specification_Kim2009,
    XYZ_w: ArrayLike,
    L_A: FloatingOrArrayLike,
    media: MediaParameters_Kim2009 = MEDIA_PARAMETERS_KIM2009["CRT Displays"],
    surround: InductionFactors_Kim2009 = VIEWING_CONDITIONS_KIM2009["Average"],
    discount_illuminant: Boolean = False,
    n_c: Floating = 0.57,
) -> NDArray:
    """
    Convert from *Kim, Weyrich and Kautz (2009)* specification to *CIE XYZ*
    tristimulus values.

    Parameters
    ----------
    specification
         *Kim, Weyrich and Kautz (2009)* colour appearance model specification.
         Correlate of *Lightness* :math:`J`, correlate of *chroma* :math:`C` or
         correlate of *colourfulness* :math:`M` and *hue* angle :math:`h` in
         degrees must be specified, e.g. :math:`JCh` or :math:`JMh`.
    XYZ_w
        *CIE XYZ* tristimulus values of reference white.
    L_A
        Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken
        to be 20% of the luminance of a white object in the scene).
    media
        Media parameters.
    surroundl
        Surround viewing conditions induction factors.
    discount_illuminant
        Discount the illuminant.
    n_c
        Cone response sigmoidal curve modulating factor :math:`n_c`.

    Returns
    -------
    :class:`numpy.ndarray`
        *CIE XYZ* tristimulus values.

    Raises
    ------
    ValueError
        If neither *C* or *M* correlates have been defined in the
        ``CAM_Specification_Kim2009`` argument.

    Notes
    -----
    +---------------------------------+-----------------------+---------------+
    | **Domain**                      | **Scale - Reference** | **Scale - 1** |
    +=================================+=======================+===============+
    | ``CAM_Specification_Kim2009.J`` | [0, 100]              | [0, 1]        |
    +---------------------------------+-----------------------+---------------+
    | ``CAM_Specification_Kim2009.C`` | [0, 100]              | [0, 1]        |
    +---------------------------------+-----------------------+---------------+
    | ``CAM_Specification_Kim2009.h`` | [0, 360]              | [0, 1]        |
    +---------------------------------+-----------------------+---------------+
    | ``CAM_Specification_Kim2009.s`` | [0, 100]              | [0, 1]        |
    +---------------------------------+-----------------------+---------------+
    | ``CAM_Specification_Kim2009.Q`` | [0, 100]              | [0, 1]        |
    +---------------------------------+-----------------------+---------------+
    | ``CAM_Specification_Kim2009.M`` | [0, 100]              | [0, 1]        |
    +---------------------------------+-----------------------+---------------+
    | ``CAM_Specification_Kim2009.H`` | [0, 360]              | [0, 1]        |
    +---------------------------------+-----------------------+---------------+
    | ``XYZ_w``                       | [0, 100]              | [0, 1]        |
    +---------------------------------+-----------------------+---------------+

    +-----------+-----------------------+---------------+
    | **Range** | **Scale - Reference** | **Scale - 1** |
    +===========+=======================+===============+
    | ``XYZ``   | [0, 100]              | [0, 1]        |
    +-----------+-----------------------+---------------+

    References
    ----------
    :cite:`Kim2009`

    Examples
    --------
    >>> specification = CAM_Specification_Kim2009(J=28.861908975839647,
    ...                                           C=0.5592455924373706,
    ...                                           h=219.04806677662953)
    >>> XYZ_w = np.array([95.05, 100.00, 108.88])
    >>> L_A = 318.31
    >>> media = MEDIA_PARAMETERS_KIM2009['CRT Displays']
    >>> surround = VIEWING_CONDITIONS_KIM2009['Average']
    >>> Kim2009_to_XYZ(specification, XYZ_w, L_A, media, surround)
    ... # doctest: +ELLIPSIS
    array([ 19.0099995...,  19.9999999...,  21.7800000...])
    """

    J, C, h, _s, _Q, M, _H, _HC = astuple(specification)

    J = to_domain_100(J)
    C = to_domain_100(C)
    h = to_domain_degrees(h)
    M = to_domain_100(M)
    L_A = as_float_array(L_A)
    XYZ_w = to_domain_100(XYZ_w)
    _X_w, Y_w, _Z_w = tsplit(XYZ_w)

    # Converting *CIE XYZ* tristimulus values to *CMCCAT2000* transform
    # sharpened *RGB* values.
    RGB_w = vector_dot(CAT_CAT02, XYZ_w)

    # Computing degree of adaptation :math:`D`.
    D = (
        degree_of_adaptation(surround.F, L_A)
        if not discount_illuminant
        else ones(L_A.shape)
    )

    # Computing full chromatic adaptation.
    XYZ_wc = full_chromatic_adaptation_forward(RGB_w, RGB_w, Y_w, D)

    # Converting to *Hunt-Pointer-Estevez* colourspace.
    LMS_w = RGB_to_rgb(XYZ_wc)

    # n_q = 0.1308
    # J = Q / spow(Y_w, n_q)
    if has_only_nan(C) and not has_only_nan(M):
        a_m, b_m = 0.11, 0.61
        C = M / (a_m * np.log10(Y_w) + b_m)
    elif has_only_nan(C):
        raise ValueError(
            'Either "C" or "M" correlate must be defined in '
            'the "CAM_Specification_Kim2009" argument!'
        )

    # Cones absolute response.
    LMS_w_n_c = spow(LMS_w, n_c)
    L_A_n_c = spow(L_A, n_c)
    LMS_wp = LMS_w_n_c / (LMS_w_n_c + L_A_n_c)

    # Achromatic signal :math:`A_w`
    v_A = np.array([40, 20, 1])
    A_w = np.sum(v_A * LMS_wp, axis=-1) / 61

    # Perceived *Lightness* :math:`J_p`.
    J_p = (J / 100 - 1) / media.E + 1

    # Achromatic signal :math:`A`.
    a_j, b_j, n_j, o_j = 0.89, 0.24, 3.65, 0.65
    J_p_n_j = spow(J_p, n_j)
    A = A_w * ((a_j * J_p_n_j) / (J_p_n_j + spow(o_j, n_j)) + b_j)

    # Opponent signals :math:`a` and :math:`b`.
    a_k, n_k = 456.5, 0.62
    C_a_k_n_k = spow(C / a_k, 1 / n_k)
    hr = np.radians(h)
    a, b = np.cos(hr) * C_a_k_n_k, np.sin(hr) * C_a_k_n_k

    # Cones absolute response.
    M = np.array(
        [
            [1.0000, 0.3215, 0.2053],
            [1.0000, -0.6351, -0.1860],
            [1.0000, -0.1568, -4.4904],
        ]
    )
    LMS_p = vector_dot(M, tstack([A, a, b]))
    LMS = spow((-spow(L_A, n_c) * LMS_p) / (LMS_p - 1), 1 / n_c)

    # Converting to *Hunt-Pointer-Estevez* colourspace.
    RGB_c = rgb_to_RGB(LMS)

    # Applying inverse full chromatic adaptation.
    RGB = full_chromatic_adaptation_inverse(RGB_c, RGB_w, Y_w, D)

    XYZ = vector_dot(CAT_INVERSE_CAT02, RGB)

    return from_range_100(XYZ)
Ejemplo n.º 10
0
def XYZ_to_CIECAM02(
    XYZ: ArrayLike,
    XYZ_w: ArrayLike,
    L_A: FloatingOrArrayLike,
    Y_b: FloatingOrArrayLike,
    surround: InductionFactors_CIECAM02 = VIEWING_CONDITIONS_CIECAM02[
        "Average"],
    discount_illuminant: Boolean = False,
) -> CAM_Specification_CIECAM02:
    """
    Compute the *CIECAM02* colour appearance model correlates from given
    *CIE XYZ* tristimulus values.

    Parameters
    ----------
    XYZ
        *CIE XYZ* tristimulus values of test sample / stimulus.
    XYZ_w
        *CIE XYZ* tristimulus values of reference white.
    L_A
        Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken
        to be 20% of the luminance of a white object in the scene).
    Y_b
        Luminous factor of background :math:`Y_b` such as
        :math:`Y_b = 100 x L_b / L_w` where :math:`L_w` is the luminance of the
        light source and :math:`L_b` is the luminance of the background. For
        viewing images, :math:`Y_b` can be the average :math:`Y` value for the
        pixels in the entire image, or frequently, a :math:`Y` value of 20,
        approximate an :math:`L^*` of 50 is used.
    surround
        Surround viewing conditions induction factors.
    discount_illuminant
        Truth value indicating if the illuminant should be discounted.

    Returns
    -------
    :class:`colour.CAM_Specification_CIECAM02`
        *CIECAM02* colour appearance model specification.

    Notes
    -----
    +------------+-----------------------+---------------+
    | **Domain** | **Scale - Reference** | **Scale - 1** |
    +============+=======================+===============+
    | ``XYZ``    | [0, 100]              | [0, 1]        |
    +------------+-----------------------+---------------+
    | ``XYZ_w``  | [0, 100]              | [0, 1]        |
    +------------+-----------------------+---------------+

    +----------------------------------+-----------------------\
+---------------+
    | **Range**                        | **Scale - Reference** \
| **Scale - 1** |
    +==================================+=======================\
+===============+
    | ``CAM_Specification_CIECAM02.J`` | [0, 100]              \
| [0, 1]        |
    +----------------------------------+-----------------------\
+---------------+
    | ``CAM_Specification_CIECAM02.C`` | [0, 100]              \
| [0, 1]        |
    +----------------------------------+-----------------------\
+---------------+
    | ``CAM_Specification_CIECAM02.h`` | [0, 360]              \
| [0, 1]        |
    +----------------------------------+-----------------------\
+---------------+
    | ``CAM_Specification_CIECAM02.s`` | [0, 100]              \
| [0, 1]        |
    +----------------------------------+-----------------------\
+---------------+
    | ``CAM_Specification_CIECAM02.Q`` | [0, 100]              \
| [0, 1]        |
    +----------------------------------+-----------------------\
+---------------+
    | ``CAM_Specification_CIECAM02.M`` | [0, 100]              \
| [0, 1]        |
    +----------------------------------+-----------------------\
+---------------+
    | ``CAM_Specification_CIECAM02.H`` | [0, 400]              \
| [0, 1]        |
    +----------------------------------+-----------------------\
+---------------+

    References
    ----------
    :cite:`Fairchild2004c`, :cite:`Luo2013`, :cite:`Moroneya`,
    :cite:`Wikipedia2007a`

    Examples
    --------
    >>> XYZ = np.array([19.01, 20.00, 21.78])
    >>> XYZ_w = np.array([95.05, 100.00, 108.88])
    >>> L_A = 318.31
    >>> Y_b = 20.0
    >>> surround = VIEWING_CONDITIONS_CIECAM02['Average']
    >>> XYZ_to_CIECAM02(XYZ, XYZ_w, L_A, Y_b, surround)  # doctest: +ELLIPSIS
    CAM_Specification_CIECAM02(J=41.7310911..., C=0.1047077..., \
h=219.0484326..., s=2.3603053..., Q=195.3713259..., M=0.1088421..., \
H=278.0607358..., HC=None)
    """

    XYZ = to_domain_100(XYZ)
    XYZ_w = to_domain_100(XYZ_w)
    _X_w, Y_w, _Z_w = tsplit(XYZ_w)
    L_A = as_float_array(L_A)
    Y_b = as_float_array(Y_b)

    n, F_L, N_bb, N_cb, z = viewing_condition_dependent_parameters(
        Y_b, Y_w, L_A)

    # Converting *CIE XYZ* tristimulus values to *CMCCAT2000* transform
    # sharpened *RGB* values.
    RGB = vector_dot(CAT_CAT02, XYZ)
    RGB_w = vector_dot(CAT_CAT02, XYZ_w)

    # Computing degree of adaptation :math:`D`.
    D = (degree_of_adaptation(surround.F, L_A)
         if not discount_illuminant else ones(L_A.shape))

    # Computing full chromatic adaptation.
    RGB_c = full_chromatic_adaptation_forward(RGB, RGB_w, Y_w, D)
    RGB_wc = full_chromatic_adaptation_forward(RGB_w, RGB_w, Y_w, D)

    # Converting to *Hunt-Pointer-Estevez* colourspace.
    RGB_p = RGB_to_rgb(RGB_c)
    RGB_pw = RGB_to_rgb(RGB_wc)

    # Applying forward post-adaptation non-linear response compression.
    RGB_a = post_adaptation_non_linear_response_compression_forward(RGB_p, F_L)
    RGB_aw = post_adaptation_non_linear_response_compression_forward(
        RGB_pw, F_L)

    # Converting to preliminary cartesian coordinates.
    a, b = tsplit(opponent_colour_dimensions_forward(RGB_a))

    # Computing the *hue* angle :math:`h`.
    h = hue_angle(a, b)

    # Computing hue :math:`h` quadrature :math:`H`.
    H = hue_quadrature(h)
    # TODO: Compute hue composition.

    # Computing eccentricity factor *e_t*.
    e_t = eccentricity_factor(h)

    # Computing achromatic responses for the stimulus and the whitepoint.
    A = achromatic_response_forward(RGB_a, N_bb)
    A_w = achromatic_response_forward(RGB_aw, N_bb)

    # Computing the correlate of *Lightness* :math:`J`.
    J = lightness_correlate(A, A_w, surround.c, z)

    # Computing the correlate of *brightness* :math:`Q`.
    Q = brightness_correlate(surround.c, J, A_w, F_L)

    # Computing the correlate of *chroma* :math:`C`.
    C = chroma_correlate(J, n, surround.N_c, N_cb, e_t, a, b, RGB_a)

    # Computing the correlate of *colourfulness* :math:`M`.
    M = colourfulness_correlate(C, F_L)

    # Computing the correlate of *saturation* :math:`s`.
    s = saturation_correlate(M, Q)

    return CAM_Specification_CIECAM02(
        as_float(from_range_100(J)),
        as_float(from_range_100(C)),
        as_float(from_range_degrees(h)),
        as_float(from_range_100(s)),
        as_float(from_range_100(Q)),
        as_float(from_range_100(M)),
        as_float(from_range_degrees(H, 400)),
        None,
    )
Ejemplo n.º 11
0
def primitive_grid(width=1,
                   height=1,
                   width_segments=1,
                   height_segments=1,
                   axis='+z'):
    """
    Generates vertices and indices for a filled and outlined grid primitive.

    Parameters
    ----------
    width : float, optional
        Grid width.
    height : float, optional
        Grid height.
    width_segments : int, optional
        Grid segments count along the width.
    height_segments : float, optional
        Grid segments count along the height.
    axis : unicode, optional
        **{'+z', '-x', '+x', '-y', '+y', '-z',
        'xy', 'xz', 'yz', 'yx', 'zx', 'zy'}**,
        Axis the primitive will be normal to, or plane the primitive will be
        co-planar with.

    Returns
    -------
    tuple
        Tuple of grid vertices, face indices to produce a filled grid and
        outline indices to produce an outline of the faces of the grid.

    References
    ----------
    :cite:`Cabello2015`

    Examples
    --------
    >>> vertices, faces, outline = primitive_grid()
    >>> print(vertices)
    [([-0.5,  0.5,  0. ], [ 0.,  1.], [ 0.,  0.,  1.], [ 0.,  1.,  0.,  1.])
     ([ 0.5,  0.5,  0. ], [ 1.,  1.], [ 0.,  0.,  1.], [ 1.,  1.,  0.,  1.])
     ([-0.5, -0.5,  0. ], [ 0.,  0.], [ 0.,  0.,  1.], [ 0.,  0.,  0.,  1.])
     ([ 0.5, -0.5,  0. ], [ 1.,  0.], [ 0.,  0.,  1.], [ 1.,  0.,  0.,  1.])]
    >>> print(faces)
    [[0 2 1]
     [2 3 1]]
    >>> print(outline)
    [[0 2]
     [2 3]
     [3 1]
     [1 0]]
    """

    axis = PLANE_TO_AXIS_MAPPING.get(axis, axis).lower()

    x_grid = width_segments
    y_grid = height_segments

    x_grid1 = x_grid + 1
    y_grid1 = y_grid + 1

    # Positions, normals and uvs.
    positions = zeros(x_grid1 * y_grid1 * 3)
    normals = zeros(x_grid1 * y_grid1 * 3)
    uvs = zeros(x_grid1 * y_grid1 * 2)

    y = np.arange(y_grid1) * height / y_grid - height / 2
    x = np.arange(x_grid1) * width / x_grid - width / 2

    positions[::3] = np.tile(x, y_grid1)
    positions[1::3] = -np.repeat(y, x_grid1)

    normals[2::3] = 1

    uvs[::2] = np.tile(np.arange(x_grid1) / x_grid, y_grid1)
    uvs[1::2] = np.repeat(1 - np.arange(y_grid1) / y_grid, x_grid1)

    # Faces and outline.
    faces, outline = [], []
    for i_y in range(y_grid):
        for i_x in range(x_grid):
            a = i_x + x_grid1 * i_y
            b = i_x + x_grid1 * (i_y + 1)
            c = (i_x + 1) + x_grid1 * (i_y + 1)
            d = (i_x + 1) + x_grid1 * i_y

            faces.extend([(a, b, d), (b, c, d)])
            outline.extend([(a, b), (b, c), (c, d), (d, a)])

    positions = np.reshape(positions, (-1, 3))
    uvs = np.reshape(uvs, (-1, 2))
    normals = np.reshape(normals, (-1, 3))

    faces = np.reshape(faces, (-1, 3)).astype(np.uint32)
    outline = np.reshape(outline, (-1, 2)).astype(np.uint32)

    if axis in ('-x', '+x'):
        shift, zero_axis = 1, 0
    elif axis in ('-y', '+y'):
        shift, zero_axis = -1, 1
    elif axis in ('-z', '+z'):
        shift, zero_axis = 0, 2

    sign = -1 if '-' in axis else 1

    positions = np.roll(positions, shift, -1)
    normals = np.roll(normals, shift, -1) * sign
    vertex_colours = np.ravel(positions)
    vertex_colours = np.hstack([
        np.reshape(
            np.interp(vertex_colours,
                      (np.min(vertex_colours), np.max(vertex_colours)),
                      (0, 1)), positions.shape),
        ones([positions.shape[0], 1])
    ])
    vertex_colours[..., zero_axis] = 0

    vertices = zeros(positions.shape[0], [
        ('position', DEFAULT_FLOAT_DTYPE, 3),
        ('uv', DEFAULT_FLOAT_DTYPE, 2),
        ('normal', DEFAULT_FLOAT_DTYPE, 3),
        ('colour', DEFAULT_FLOAT_DTYPE, 4),
    ])

    vertices['position'] = positions
    vertices['uv'] = uvs
    vertices['normal'] = normals
    vertices['colour'] = vertex_colours

    return vertices, faces, outline
Ejemplo n.º 12
0
def CAM16_to_XYZ(specification,
                 XYZ_w,
                 L_A,
                 Y_b,
                 surround=VIEWING_CONDITIONS_CAM16['Average'],
                 discount_illuminant=False):
    """
    Converts from *CAM16* specification to *CIE XYZ* tristimulus values.

    This is the *inverse* implementation.

    Parameters
    ----------
    specification : CAM_Specification_CAM16
        *CAM16* colour appearance model specification. Correlate of
        *Lightness* :math:`J`, correlate of *chroma* :math:`C` or correlate of
        *colourfulness* :math:`M` and *hue* angle :math:`h` in degrees must be
        specified, e.g. :math:`JCh` or :math:`JMh`.
    XYZ_w : array_like
        *CIE XYZ* tristimulus values of reference white.
    L_A : numeric or array_like
        Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken
        to be 20% of the luminance of a white object in the scene).
    Y_b : numeric or array_like
        Luminous factor of background :math:`Y_b` such as
        :math:`Y_b = 100 x L_b / L_w` where :math:`L_w` is the luminance of the
        light source and :math:`L_b` is the luminance of the background. For
        viewing images, :math:`Y_b` can be the average :math:`Y` value for the
        pixels in the entire image, or frequently, a :math:`Y` value of 20,
        approximate an :math:`L^*` of 50 is used.
    surround : InductionFactors_CAM16, optional
        Surround viewing conditions.
    discount_illuminant : bool, optional
        Discount the illuminant.

    Returns
    -------
    XYZ : ndarray
        *CIE XYZ* tristimulus values.

    Raises
    ------
    ValueError
        If neither *C* or *M* correlates have been defined in the
        ``CAM_Specification_CAM16`` argument.

    Notes
    -----

    +-------------------------------+-----------------------+---------------+
    | **Domain**                    | **Scale - Reference** | **Scale - 1** |
    +===============================+=======================+===============+
    | ``CAM_Specification_CAM16.J`` | [0, 100]              | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_CAM16.C`` | [0, 100]              | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_CAM16.h`` | [0, 360]              | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_CAM16.s`` | [0, 100]              | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_CAM16.Q`` | [0, 100]              | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_CAM16.M`` | [0, 100]              | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_CAM16.H`` | [0, 360]              | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``XYZ_w``                     | [0, 100]              | [0, 1]        |
    +-------------------------------+-----------------------+---------------+

    +---------------------------+-----------------------+---------------+
    | **Range**                 | **Scale - Reference** | **Scale - 1** |
    +===========================+=======================+===============+
    | ``XYZ``                   | [0, 100]              | [0, 1]        |
    +---------------------------+-----------------------+---------------+

    -   ``CAM_Specification_CAM16`` can also be passed as a compatible argument
        to :func:`colour.utilities.as_namedtuple` definition.

    References
    ----------
    :cite:`Li2017`

    Examples
    --------
    >>> specification = CAM_Specification_CAM16(J=41.731207905126638,
    ...                                     C=0.103355738709070,
    ...                                     h=217.067959767393010)
    >>> XYZ_w = np.array([95.05, 100.00, 108.88])
    >>> L_A = 318.31
    >>> Y_b = 20.0
    >>> CAM16_to_XYZ(specification, XYZ_w, L_A, Y_b)  # doctest: +ELLIPSIS
    array([ 19.01...,  20...  ,  21.78...])
    """

    J, C, h, _s, _Q, M, _H, _HC = as_namedtuple(specification,
                                                CAM_Specification_CAM16)
    J = to_domain_100(J)
    C = to_domain_100(C) if C is not None else C
    h = to_domain_degrees(h)
    M = to_domain_100(M) if M is not None else M
    L_A = as_float_array(L_A)
    XYZ_w = to_domain_100(XYZ_w)
    _X_w, Y_w, _Z_w = tsplit(XYZ_w)

    # Step 0
    # Converting *CIE XYZ* tristimulus values to sharpened *RGB* values.
    RGB_w = vector_dot(MATRIX_16, XYZ_w)

    # Computing degree of adaptation :math:`D`.
    D = (np.clip(degree_of_adaptation(surround.F, L_A), 0, 1)
         if not discount_illuminant else ones(L_A.shape))

    n, F_L, N_bb, N_cb, z = tsplit(
        viewing_condition_dependent_parameters(Y_b, Y_w, L_A))

    D_RGB = (D[..., np.newaxis] * Y_w[..., np.newaxis] / RGB_w + 1 -
             D[..., np.newaxis])
    RGB_wc = D_RGB * RGB_w

    # Applying forward post-adaptation non linear response compression.
    RGB_aw = post_adaptation_non_linear_response_compression_forward(
        RGB_wc, F_L)

    # Computing achromatic responses for the whitepoint.
    A_w = achromatic_response_forward(RGB_aw, N_bb)

    # Step 1
    if C is None and M is not None:
        C = M / spow(F_L, 0.25)
    elif C is None:
        raise ValueError('Either "C" or "M" correlate must be defined in '
                         'the "CAM_Specification_CAM16" argument!')

    # Step 2
    # Computing temporary magnitude quantity :math:`t`.
    t = temporary_magnitude_quantity_inverse(C, J, n)

    # Computing eccentricity factor *e_t*.
    e_t = eccentricity_factor(h)

    # Computing achromatic response :math:`A` for the stimulus.
    A = achromatic_response_inverse(A_w, J, surround.c, z)

    # Computing *P_1* to *P_3*.
    P_n = P(surround.N_c, N_cb, e_t, t, A, N_bb)
    _P_1, P_2, _P_3 = tsplit(P_n)

    # Step 3
    # Computing opponent colour dimensions :math:`a` and :math:`b`.
    a, b = tsplit(opponent_colour_dimensions_inverse(P_n, h))

    # Step 4
    # Computing post-adaptation non linear response compression matrix.
    RGB_a = matrix_post_adaptation_non_linear_response_compression(P_2, a, b)

    # Step 5
    # Applying inverse post-adaptation non linear response compression.
    RGB_c = post_adaptation_non_linear_response_compression_inverse(RGB_a, F_L)

    # Step 6
    RGB = RGB_c / D_RGB

    # Step 7
    XYZ = vector_dot(MATRIX_INVERSE_16, RGB)

    return from_range_100(XYZ)
Ejemplo n.º 13
0
def chromatic_adaptation(
    XYZ: ArrayLike,
    XYZ_w: ArrayLike,
    XYZ_b: ArrayLike,
    L_A,
    F_L,
    XYZ_p: ArrayLike = None,
    p: Optional[FloatingOrArrayLike] = None,
    helson_judd_effect: Boolean = False,
    discount_illuminant: Boolean = True,
) -> NDArray:
    """
    Apply chromatic adaptation to given *CIE XYZ* tristimulus values.

    Parameters
    ----------
    XYZ
        *CIE XYZ* tristimulus values of test sample.
    XYZ_b
        *CIE XYZ* tristimulus values of background.
    XYZ_w
        *CIE XYZ* tristimulus values of reference white.
    L_A
        Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`.
    F_L
        Luminance adaptation factor :math:`F_L`.
    XYZ_p
        *CIE XYZ* tristimulus values of proximal field, assumed to be equal to
        background if not specified.
    p
        Simultaneous contrast / assimilation factor :math:`p` with value
        normalised to  domain [-1, 0] when simultaneous contrast occurs and
        normalised to domain [0, 1] when assimilation occurs.
    helson_judd_effect
        Truth value indicating whether the *Helson-Judd* effect should be
        accounted for.
    discount_illuminant
       Truth value indicating if the illuminant should be discounted.

    Returns
    -------
    :class:`numpy.ndarray`
        Adapted *CIE XYZ* tristimulus values.

    Examples
    --------
    >>> XYZ = np.array([19.01, 20.00, 21.78])
    >>> XYZ_b = np.array([95.05, 100.00, 108.88])
    >>> XYZ_w = np.array([95.05, 100.00, 108.88])
    >>> L_A = 318.31
    >>> F_L = 1.16754446415
    >>> chromatic_adaptation(XYZ, XYZ_w, XYZ_b, L_A, F_L)  # doctest: +ELLIPSIS
    array([ 6.8959454...,  6.8959991...,  6.8965708...])

    # Coverage Doctests

    >>> chromatic_adaptation(XYZ, XYZ_w, XYZ_b, L_A, F_L,
    ...                      discount_illuminant=False)  # doctest: +ELLIPSIS
    array([ 6.8525880...,  6.8874417...,  6.9461478...])
    >>> chromatic_adaptation(XYZ, XYZ_w, XYZ_b, L_A, F_L,
    ...                      helson_judd_effect=True)  # doctest: +ELLIPSIS
    array([ 6.8959454...,  6.8959991...,  6.8965708...])
    >>> chromatic_adaptation(XYZ, XYZ_w, XYZ_b, L_A, F_L,
    ...                      XYZ_p=XYZ_b, p=0.5)  # doctest: +ELLIPSIS
    array([ 9.2069020...,  9.2070219...,  9.2078373...])
    """

    XYZ_w = as_float_array(XYZ_w)
    XYZ_b = as_float_array(XYZ_b)
    L_A = as_float_array(L_A)
    F_L = as_float_array(F_L)

    rgb = XYZ_to_rgb(XYZ)
    rgb_w = XYZ_to_rgb(XYZ_w)
    Y_w = XYZ_w[..., 1]
    Y_b = XYZ_b[..., 1]

    h_rgb = 3 * rgb_w / np.sum(rgb_w, axis=-1)[..., np.newaxis]

    # Computing chromatic adaptation factors.
    if not discount_illuminant:
        L_A_p = spow(L_A, 1 / 3)
        F_rgb = (1 + L_A_p + h_rgb) / (1 + L_A_p + (1 / h_rgb))
    else:
        F_rgb = ones(h_rgb.shape)

    # Computing Helson-Judd effect parameters.
    if helson_judd_effect:
        D_rgb = f_n((Y_b / Y_w) * F_L * F_rgb[..., 1]) - f_n(
            (Y_b / Y_w) * F_L * F_rgb)
    else:
        D_rgb = zeros(F_rgb.shape)

    # Computing cone bleach factors.
    B_rgb = (10**7) / ((10**7) + 5 * L_A[..., np.newaxis] * (rgb_w / 100))

    # Computing adjusted reference white signals.
    if XYZ_p is not None and p is not None:
        rgb_p = XYZ_to_rgb(XYZ_p)
        rgb_w = adjusted_reference_white_signals(rgb_p, B_rgb, rgb_w, p)

    # Computing adapted cone responses.
    rgb_a = 1 + B_rgb * (f_n(F_L[..., np.newaxis] * F_rgb * rgb / rgb_w) +
                         D_rgb)

    return rgb_a
Ejemplo n.º 14
0
def sd_multi_leds_Ohno2005(peak_wavelengths,
                           fwhm,
                           peak_power_ratios=None,
                           shape=SPECTRAL_SHAPE_DEFAULT):
    """
    Returns a multi *LED* spectral distribution of given spectral shape at
    given peak wavelengths and full widths at half maximum according to
    *Ohno (2005)* method.

    The multi *LED* spectral distribution is generated using many single
    *LED* spectral distributions generated with
    :func:`colour.sd_single_led_Ohno2005` definition.

    Parameters
    ----------
    peak_wavelengths : array_like
        Wavelengths the multi *LED* spectral distribution will peak at, i.e.
        the peaks for each generated single *LED* spectral distributions.
    fwhm : array_like
        Full widths at half maximum, i.e. widths of the underlying gaussian
        spectral distributions measured between those points on the *y* axis
        which are half the maximum amplitude.
    peak_power_ratios : array_like, optional
        Peak power ratios for each generated single *LED* spectral
        distributions.
    shape : SpectralShape, optional
        Spectral shape used to create the spectral distribution.

    Returns
    -------
    SpectralDistribution
        Multi *LED* spectral distribution.

    Notes
    -----
    -   By default, the spectral distribution will use the shape given by
        :attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.

    References
    ----------
    :cite:`Ohno2005`, :cite:`Ohno2008a`

    Examples
    --------
    >>> sd = sd_multi_leds_Ohno2005(
    ...     np.array([457, 530, 615]),
    ...     np.array([20, 30, 20]),
    ...     np.array([0.731, 1.000, 1.660]),
    ... )
    >>> sd.shape
    SpectralShape(360.0, 780.0, 1.0)
    >>> sd[500]  # doctest: +ELLIPSIS
    0.1295132...
    """

    peak_wavelengths = as_float_array(peak_wavelengths)
    fwhm = np.resize(fwhm, peak_wavelengths.shape)
    if peak_power_ratios is None:
        peak_power_ratios = ones(peak_wavelengths.shape)
    else:
        peak_power_ratios = np.resize(peak_power_ratios,
                                      peak_wavelengths.shape)

    sd = sd_zeros(shape)

    for (peak_wavelength, fwhm_s, peak_power_ratio) in zip(
            peak_wavelengths, fwhm, peak_power_ratios):
        sd += sd_single_led_Ohno2005(peak_wavelength,
                                     fwhm_s) * peak_power_ratio

    def _format_array(a):
        """
        Formats given array :math:`a`.

        Parameters
        ----------
        a : array_like
            Array to format

        Returns
        -------
        unicode
            Formatted array :math:`a`.
        """

        return ', '.join([str(e) for e in a])

    sd.name = (
        '{0}nm - {1}FWHM - {2} Peak Power Ratios - LED - Ohno (2005)'.format(
            _format_array(peak_wavelengths),
            _format_array(fwhm),
            _format_array(peak_power_ratios),
        ))

    return sd
Ejemplo n.º 15
0
def XYZ_to_CAM16(XYZ,
                 XYZ_w,
                 L_A,
                 Y_b,
                 surround=VIEWING_CONDITIONS_CAM16['Average'],
                 discount_illuminant=False):
    """
    Computes the *CAM16* colour appearance model correlates from given
    *CIE XYZ* tristimulus values.

    This is the *forward* implementation.

    Parameters
    ----------
    XYZ : array_like
        *CIE XYZ* tristimulus values of test sample / stimulus.
    XYZ_w : array_like
        *CIE XYZ* tristimulus values of reference white.
    L_A : numeric or array_like
        Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken
        to be 20% of the luminance of a white object in the scene).
    Y_b : numeric or array_like
        Luminous factor of background :math:`Y_b` such as
        :math:`Y_b = 100 x L_b / L_w` where :math:`L_w` is the luminance of the
        light source and :math:`L_b` is the luminance of the background. For
        viewing images, :math:`Y_b` can be the average :math:`Y` value for the
        pixels in the entire image, or frequently, a :math:`Y` value of 20,
        approximate an :math:`L^*` of 50 is used.
    surround : InductionFactors_CAM16, optional
        Surround viewing conditions induction factors.
    discount_illuminant : bool, optional
        Truth value indicating if the illuminant should be discounted.

    Returns
    -------
    CAM_Specification_CAM16
        *CAM16* colour appearance model specification.

    Notes
    -----

    +---------------------------+-----------------------+---------------+
    | **Domain**                | **Scale - Reference** | **Scale - 1** |
    +===========================+=======================+===============+
    | ``XYZ``                   | [0, 100]              | [0, 1]        |
    +---------------------------+-----------------------+---------------+
    | ``XYZ_w``                 | [0, 100]              | [0, 1]        |
    +---------------------------+-----------------------+---------------+

    +-------------------------------+-----------------------+---------------+
    | **Range**                     | **Scale - Reference** | **Scale - 1** |
    +===============================+=======================+===============+
    | ``CAM_Specification_CAM16.J`` | [0, 100]              | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_CAM16.C`` | [0, 100]              | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_CAM16.h`` | [0, 360]              | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_CAM16.s`` | [0, 100]              | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_CAM16.Q`` | [0, 100]              | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_CAM16.M`` | [0, 100]              | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_CAM16.H`` | [0, 400]              | [0, 1]        |
    +-------------------------------+-----------------------+---------------+

    References
    ----------
    :cite:`Li2017`

    Examples
    --------
    >>> XYZ = np.array([19.01, 20.00, 21.78])
    >>> XYZ_w = np.array([95.05, 100.00, 108.88])
    >>> L_A = 318.31
    >>> Y_b = 20.0
    >>> surround = VIEWING_CONDITIONS_CAM16['Average']
    >>> XYZ_to_CAM16(XYZ, XYZ_w, L_A, Y_b, surround)  # doctest: +ELLIPSIS
    CAM_Specification_CAM16(J=41.7312079..., C=0.1033557..., \
h=217.0679597..., s=2.3450150..., Q=195.3717089..., M=0.1074367..., \
H=275.5949861..., HC=None)
    """

    XYZ = to_domain_100(XYZ)
    XYZ_w = to_domain_100(XYZ_w)
    _X_w, Y_w, _Z_w = tsplit(XYZ_w)
    L_A = as_float_array(L_A)
    Y_b = as_float_array(Y_b)

    # Step 0
    # Converting *CIE XYZ* tristimulus values to sharpened *RGB* values.
    RGB_w = vector_dot(MATRIX_16, XYZ_w)

    # Computing degree of adaptation :math:`D`.
    D = (np.clip(degree_of_adaptation(surround.F, L_A), 0, 1)
         if not discount_illuminant else ones(L_A.shape))

    n, F_L, N_bb, N_cb, z = tsplit(
        viewing_condition_dependent_parameters(Y_b, Y_w, L_A))

    D_RGB = (D[..., np.newaxis] * Y_w[..., np.newaxis] / RGB_w + 1 -
             D[..., np.newaxis])
    RGB_wc = D_RGB * RGB_w

    # Applying forward post-adaptation non linear response compression.
    RGB_aw = post_adaptation_non_linear_response_compression_forward(
        RGB_wc, F_L)

    # Computing achromatic responses for the whitepoint.
    A_w = achromatic_response_forward(RGB_aw, N_bb)

    # Step 1
    # Converting *CIE XYZ* tristimulus values to sharpened *RGB* values.
    RGB = vector_dot(MATRIX_16, XYZ)

    # Step 2
    RGB_c = D_RGB * RGB

    # Step 3
    # Applying forward post-adaptation non linear response compression.
    RGB_a = post_adaptation_non_linear_response_compression_forward(RGB_c, F_L)

    # Step 4
    # Converting to preliminary cartesian coordinates.
    a, b = tsplit(opponent_colour_dimensions_forward(RGB_a))

    # Computing the *hue* angle :math:`h`.
    h = hue_angle(a, b)

    # Step 5
    # Computing eccentricity factor *e_t*.
    e_t = eccentricity_factor(h)

    # Computing hue :math:`h` quadrature :math:`H`.
    H = hue_quadrature(h)
    # TODO: Compute hue composition.

    # Step 6
    # Computing achromatic responses for the stimulus.
    A = achromatic_response_forward(RGB_a, N_bb)

    # Step 7
    # Computing the correlate of *Lightness* :math:`J`.
    J = lightness_correlate(A, A_w, surround.c, z)

    # Step 8
    # Computing the correlate of *brightness* :math:`Q`.
    Q = brightness_correlate(surround.c, J, A_w, F_L)

    # Step 9
    # Computing the correlate of *chroma* :math:`C`.
    C = chroma_correlate(J, n, surround.N_c, N_cb, e_t, a, b, RGB_a)

    # Computing the correlate of *colourfulness* :math:`M`.
    M = colourfulness_correlate(C, F_L)

    # Computing the correlate of *saturation* :math:`s`.
    s = saturation_correlate(M, Q)

    return CAM_Specification_CAM16(from_range_100(J), from_range_100(C),
                                   from_range_degrees(h), from_range_100(s),
                                   from_range_100(Q), from_range_100(M),
                                   from_range_degrees(H, 400), None)
Ejemplo n.º 16
0
def ZCAM_to_XYZ(
    specification: CAM_Specification_ZCAM,
    XYZ_w: ArrayLike,
    L_A: FloatingOrArrayLike,
    Y_b: FloatingOrArrayLike,
    surround: InductionFactors_ZCAM = VIEWING_CONDITIONS_ZCAM["Average"],
    discount_illuminant: Boolean = False,
) -> NDArray:
    """
    Convert from *ZCAM* specification to *CIE XYZ* tristimulus values.

    Parameters
    ----------
    specification
         *ZCAM* colour appearance model specification.
         Correlate of *Lightness* :math:`J`, correlate of *chroma* :math:`C` or
         correlate of *colourfulness* :math:`M` and *hue* angle :math:`h` in
         degrees must be specified, e.g. :math:`JCh` or :math:`JMh`.
    XYZ_w
        Absolute *CIE XYZ* tristimulus values of the white under reference
        illuminant.
    L_A
        Test adapting field *luminance* :math:`L_A` in :math:`cd/m^2` such as
        :math:`L_A = L_w * Y_b / 100` (where :math:`L_w` is luminance of the
        reference white and :math:`Y_b` is the background luminance factor).
    Y_b
        Luminous factor of background :math:`Y_b` such as
        :math:`Y_b = 100 x L_b / L_w` where :math:`L_w` is the luminance of the
        light source and :math:`L_b` is the luminance of the background. For
        viewing images, :math:`Y_b` can be the average :math:`Y` value for the
        pixels in the entire image, or frequently, a :math:`Y` value of 20,
        approximate an :math:`L^*` of 50 is used.
    surround
        Surround viewing conditions induction factors.
    discount_illuminant
        Truth value indicating if the illuminant should be discounted.

    Returns
    -------
    :class:`numpy.ndarray`
        *CIE XYZ* tristimulus values.

    Raises
    ------
    ValueError
        If neither *C* or *M* correlates have been defined in the
        ``CAM_Specification_ZCAM`` argument.

    Warnings
    --------
    The underlying *SMPTE ST 2084:2014* transfer function is an absolute
    transfer function.

    Notes
    -----
    -   *Safdar, Hardeberg and Luo (2021)* does not specify how the chromatic
        adaptation to *CIE Standard Illuminant D65* in *Step 0* should be
        performed. A one-step *Von Kries* chromatic adaptation transform is not
        symetrical or transitive when a degree of adptation is involved.
        *Safdar, Hardeberg and Luo (2018)* uses *Zhai and Luo (2018)* two-steps
        chromatic adaptation transform, thus it seems sensible to adopt this
        transform for the *ZCAM* colour appearance model until more information
        is available. It is worth noting that a one-step *Von Kries* chromatic
        adaptation transform with support for degree of adaptation produces
        values closer to the supplemental document compared to the
        *Zhai and Luo (2018)* two-steps chromatic adaptation transform but then
        the *ZCAM* colour appearance model does not round-trip properly.
    -   *Step 4* of the inverse model uses a rounded exponent of 1.3514
        preventing the model to round-trip properly. Given that this
        implementation takes some liberties with respect to the chromatic
        adaptation transform to use, it was deemed appropriate to use an
        exponent value that enables the *ZCAM* colour appearance model to
        round-trip.
    -   The underlying *SMPTE ST 2084:2014* transfer function is an absolute
        transfer function, thus the domain and range values for the *Reference*
        and *1* scales are only indicative that the data is not affected by
        scale transformations.

    +-------------------------------+-----------------------+---------------+
    | **Domain**                    | **Scale - Reference** | **Scale - 1** |
    +===============================+=======================+===============+
    | ``CAM_Specification_ZCAM.J``  | [UN]                  | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_ZCAM.C``  | [UN]                  | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_ZCAM.h``  | [0, 360]              | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_ZCAM.s``  | [UN]                  | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_ZCAM.Q``  | [UN]                  | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_ZCAM.M``  | [UN]                  | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_ZCAM.H``  | [0, 400]              | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_ZCAM.HC`` | [UN]                  | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_ZCAM.V``  | [UN]                  | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_ZCAM.K``  | [UN]                  | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_ZCAM.H``  | [UN]                  | [0, 1]        |
    +-------------------------------+-----------------------+---------------+

    +-----------+-----------------------+---------------+
    | **Range** | **Scale - Reference** | **Scale - 1** |
    +===========+=======================+===============+
    | ``XYZ``   | [UN]                  | [UN]          |
    +-----------+-----------------------+---------------+

    References
    ----------
    :cite:`Safdar2018`, :cite:`Safdar2021`, :cite:`Zhai2018`

    Examples
    --------
    >>> specification = CAM_Specification_ZCAM(J=92.250443780723629,
    ...                                        C=3.0216926733329013,
    ...                                        h=196.32457375575581)
    >>> XYZ_w = np.array([256, 264, 202])
    >>> L_A = 264
    >>> Y_b = 100
    >>> surround = VIEWING_CONDITIONS_ZCAM['Average']
    >>> ZCAM_to_XYZ(specification, XYZ_w, L_A, Y_b, surround)
    ... # doctest: +ELLIPSIS
    array([ 185.,  206.,  163.])
    """

    J_z, C_z, h_z, _S_z, _Q_z, M_z, _H, _H_Z, _V_z, _K_z, _W_z = astuple(
        specification)

    J_z = to_domain_1(J_z)
    C_z = to_domain_1(C_z)
    h_z = to_domain_degrees(h_z)
    M_z = to_domain_1(M_z)

    XYZ_w = to_domain_1(XYZ_w)
    _X_w, Y_w, _Z_w = tsplit(XYZ_w)
    L_A = as_float_array(L_A)
    Y_b = as_float_array(Y_b)

    F_s, F, c, N_c = surround

    # Step 0 (Forward) - Chromatic adaptation from reference illuminant to
    # "CIE Standard Illuminant D65" illuminant using "CAT02".
    # Computing degree of adaptation :math:`D`.
    D = (degree_of_adaptation(surround.F, L_A)
         if not discount_illuminant else ones(L_A.shape))

    # Step 1 (Forward) - Computing factors related with viewing conditions and
    # independent of the test stimulus.
    # Background factor :math:`F_b`
    F_b = np.sqrt(Y_b / Y_w)
    # Luminance level adaptation factor :math:`F_L`
    F_L = 0.171 * spow(L_A, 1 / 3) * (1 - np.exp(-48 / 9 * L_A))

    # Step 2 (Forward) - Computing achromatic response (:math:`I_{z,w}`),
    # redness-greenness (:math:`a_{z,w}`), and yellowness-blueness
    # (:math:`b_{z,w}`).
    with domain_range_scale("ignore"):
        I_z_w, _A_z_w, B_z_w = tsplit(
            XYZ_to_Izazbz(XYZ_w, method="Safdar 2021"))

    # Step 1 (Inverse) - Computing achromatic response (:math:`I_z`).
    Q_z_p = (1.6 * F_s) / F_b**0.12
    Q_z_m = F_s**2.2 * F_b**0.5 * spow(F_L, 0.2)
    Q_z_w = 2700 * spow(I_z_w, Q_z_p) * Q_z_m

    I_z_p = (F_b**0.12) / (1.6 * F_s)
    I_z_d = 2700 * 100 * Q_z_m

    I_z = spow((J_z * Q_z_w) / I_z_d, I_z_p)

    # Step 2 (Inverse) - Computing chroma :math:`C_z`.
    if has_only_nan(M_z) and not has_only_nan(C_z):
        M_z = (C_z * Q_z_w) / 100
    elif has_only_nan(M_z):
        raise ValueError('Either "C" or "M" correlate must be defined in '
                         'the "CAM_Specification_ZCAM" argument!')

    # Step 3 (Inverse) - Computing hue angle :math:`h_z`
    # :math:`h_z` is currently required as an input.

    # Computing eccentricity factor :math:`e_z`.
    e_z = 1.015 + np.cos(np.radians(89.038 + h_z % 360))
    h_z_r = np.radians(h_z)

    # Step 4 (Inverse) - Computing redness-greenness (:math:`a_z`), and
    # yellowness-blueness (:math:`b_z`).
    # C_z_p_e = 1.3514
    C_z_p_e = 50 / 37
    C_z_p = spow(
        (M_z * spow(I_z_w, 0.78) * F_b**0.1) /
        (100 * e_z**0.068 * spow(F_L, 0.2)),
        C_z_p_e,
    )
    a_z = C_z_p * np.cos(h_z_r)
    b_z = C_z_p * np.sin(h_z_r)

    # Step 5 (Inverse) - Computing tristimulus values :math:`XYZ_{D65}`.
    with domain_range_scale("ignore"):
        XYZ_D65 = Izazbz_to_XYZ(tstack([I_z, a_z, b_z]), method="Safdar 2021")

    XYZ = chromatic_adaptation_Zhai2018(XYZ_D65,
                                        TVS_D65,
                                        XYZ_w,
                                        D,
                                        D,
                                        transform="CAT02")

    return from_range_1(XYZ)
Ejemplo n.º 17
0
def XYZ_to_ZCAM(
    XYZ: ArrayLike,
    XYZ_w: ArrayLike,
    L_A: FloatingOrArrayLike,
    Y_b: FloatingOrArrayLike,
    surround: InductionFactors_ZCAM = VIEWING_CONDITIONS_ZCAM["Average"],
    discount_illuminant: Boolean = False,
) -> CAM_Specification_ZCAM:
    """
    Compute the *ZCAM* colour appearance model correlates from given *CIE XYZ*
    tristimulus values.

    Parameters
    ----------
    XYZ
        Absolute *CIE XYZ* tristimulus values of test sample / stimulus.
    XYZ_w
        Absolute *CIE XYZ* tristimulus values of the white under reference
        illuminant.
    L_A
        Test adapting field *luminance* :math:`L_A` in :math:`cd/m^2` such as
        :math:`L_A = L_w * Y_b / 100` (where :math:`L_w` is luminance of the
        reference white and :math:`Y_b` is the background luminance factor).
    Y_b
        Luminous factor of background :math:`Y_b` such as
        :math:`Y_b = 100 * L_b / L_w` where :math:`L_w` is the luminance of the
        light source and :math:`L_b` is the luminance of the background. For
        viewing images, :math:`Y_b` can be the average :math:`Y` value for the
        pixels in the entire image, or frequently, a :math:`Y` value of 20,
        approximate an :math:`L^*` of 50 is used.
    surround
        Surround viewing conditions induction factors.
    discount_illuminant
        Truth value indicating if the illuminant should be discounted.

    Returns
    -------
    :class:`colour.CAM_Specification_ZCAM`
       *ZCAM* colour appearance model specification.

    Warnings
    --------
    The underlying *SMPTE ST 2084:2014* transfer function is an absolute
    transfer function.

    Notes
    -----
    -   *Safdar, Hardeberg and Luo (2021)* does not specify how the chromatic
        adaptation to *CIE Standard Illuminant D65* in *Step 0* should be
        performed. A one-step *Von Kries* chromatic adaptation transform is not
        symmetrical or transitive when a degree of adaptation is involved.
        *Safdar, Hardeberg and Luo (2018)* uses *Zhai and Luo (2018)* two-steps
        chromatic adaptation transform, thus it seems sensible to adopt this
        transform for the *ZCAM* colour appearance model until more information
        is available. It is worth noting that a one-step *Von Kries* chromatic
        adaptation transform with support for degree of adaptation produces
        values closer to the supplemental document compared to the
        *Zhai and Luo (2018)* two-steps chromatic adaptation transform but then
        the *ZCAM* colour appearance model does not round-trip properly.
    -   The underlying *SMPTE ST 2084:2014* transfer function is an absolute
        transfer function, thus the domain and range values for the *Reference*
        and *1* scales are only indicative that the data is not affected by
        scale transformations.

    +------------+-----------------------+---------------+
    | **Domain** | **Scale - Reference** | **Scale - 1** |
    +============+=======================+===============+
    | ``XYZ``    | [UN]                  | [UN]          |
    +------------+-----------------------+---------------+
    | ``XYZ_tw`` | [UN]                  | [UN]          |
    +------------+-----------------------+---------------+
    | ``XYZ_rw`` | [UN]                  | [UN]          |
    +------------+-----------------------+---------------+

    +-------------------------------+-----------------------+---------------+
    | **Range**                     | **Scale - Reference** | **Scale - 1** |
    +===============================+=======================+===============+
    | ``CAM_Specification_ZCAM.J``  | [UN]                  | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_ZCAM.C``  | [UN]                  | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_ZCAM.h``  | [0, 360]              | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_ZCAM.s``  | [UN]                  | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_ZCAM.Q``  | [UN]                  | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_ZCAM.M``  | [UN]                  | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_ZCAM.H``  | [0, 400]              | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_ZCAM.HC`` | [UN]                  | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_ZCAM.V``  | [UN]                  | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_ZCAM.K``  | [UN]                  | [0, 1]        |
    +-------------------------------+-----------------------+---------------+
    | ``CAM_Specification_ZCAM.H``  | [UN]                  | [0, 1]        |
    +-------------------------------+-----------------------+---------------+

    References
    ----------
    :cite:`Safdar2018`, :cite:`Safdar2021`, :cite:`Zhai2018`

    Examples
    --------
    >>> XYZ = np.array([185, 206, 163])
    >>> XYZ_w = np.array([256, 264, 202])
    >>> L_A = 264
    >>> Y_b = 100
    >>> surround = VIEWING_CONDITIONS_ZCAM['Average']
    >>> XYZ_to_ZCAM(XYZ, XYZ_w, L_A, Y_b, surround)
    ... # doctest: +ELLIPSIS
    CAM_Specification_ZCAM(J=92.2504437..., C=3.0216926..., h=196.3245737..., \
s=19.1319556..., Q=321.3408463..., M=10.5256217..., H=237.6114442..., \
HC=None, V=34.7006776..., K=25.8835968..., W=91.6821728...)
    """

    XYZ = to_domain_1(XYZ)
    XYZ_w = to_domain_1(XYZ_w)
    _X_w, Y_w, _Z_w = tsplit(XYZ_w)
    L_A = as_float_array(L_A)
    Y_b = as_float_array(Y_b)

    F_s, _F, _c, _N_c = surround

    # Step 0 (Forward) - Chromatic adaptation from reference illuminant to
    # "CIE Standard Illuminant D65" illuminant using "CAT02".
    # Computing degree of adaptation :math:`D`.
    D = (degree_of_adaptation(surround.F, L_A)
         if not discount_illuminant else ones(L_A.shape))

    XYZ_D65 = chromatic_adaptation_Zhai2018(XYZ,
                                            XYZ_w,
                                            TVS_D65,
                                            D,
                                            D,
                                            transform="CAT02")

    # Step 1 (Forward) - Computing factors related with viewing conditions and
    # independent of the test stimulus.
    # Background factor :math:`F_b`
    F_b = np.sqrt(Y_b / Y_w)
    # Luminance level adaptation factor :math:`F_L`
    F_L = 0.171 * spow(L_A, 1 / 3) * (1 - np.exp(-48 / 9 * L_A))

    # Step 2 (Forward) - Computing achromatic response (:math:`I_z` and
    # :math:`I_{z,w}`), redness-greenness (:math:`a_z` and :math:`a_{z,w}`),
    # and yellowness-blueness (:math:`b_z`, :math:`b_{z,w}`).
    with domain_range_scale("ignore"):
        I_z, a_z, b_z = tsplit(XYZ_to_Izazbz(XYZ_D65, method="Safdar 2021"))
        I_z_w, _a_z_w, b_z_w = tsplit(
            XYZ_to_Izazbz(XYZ_w, method="Safdar 2021"))

    # Step 3 (Forward) - Computing hue angle :math:`h_z`
    h_z = hue_angle(a_z, b_z)

    # Step 4 (Forward) - Computing hue quadrature :math:`H`.
    H = hue_quadrature(h_z)

    # Computing eccentricity factor :math:`e_z`.
    e_z = 1.015 + np.cos(np.radians(89.038 + h_z % 360))

    # Step 5 (Forward) - Computing brightness :math:`Q_z`,
    # lightness :math:`J_z`, colourfulness :math`M_z`, and chroma :math:`C_z`
    Q_z_p = (1.6 * F_s) / F_b**0.12
    Q_z_m = F_s**2.2 * F_b**0.5 * spow(F_L, 0.2)
    Q_z = 2700 * spow(I_z, Q_z_p) * Q_z_m
    Q_z_w = 2700 * spow(I_z_w, Q_z_p) * Q_z_m

    J_z = 100 * (Q_z / Q_z_w)

    M_z = (100 * (a_z**2 + b_z**2)**0.37 *
           ((spow(e_z, 0.068) * spow(F_L, 0.2)) /
            (F_b**0.1 * spow(I_z_w, 0.78))))

    C_z = 100 * (M_z / Q_z_w)

    # Step 6 (Forward) - Computing saturation :math:`S_z`,
    # vividness :math:`V_z`, blackness :math:`K_z`, and whiteness :math:`W_z`.
    S_z = 100 * spow(F_L, 0.6) * np.sqrt(M_z / Q_z)

    V_z = np.sqrt((J_z - 58)**2 + 3.4 * C_z**2)

    K_z = 100 - 0.8 * np.sqrt(J_z**2 + 8 * C_z**2)

    W_z = 100 - np.sqrt((100 - J_z)**2 + C_z**2)

    return CAM_Specification_ZCAM(
        as_float(from_range_1(J_z)),
        as_float(from_range_1(C_z)),
        as_float(from_range_degrees(h_z)),
        as_float(from_range_1(S_z)),
        as_float(from_range_1(Q_z)),
        as_float(from_range_1(M_z)),
        as_float(from_range_degrees(H, 400)),
        None,
        as_float(from_range_1(V_z)),
        as_float(from_range_1(K_z)),
        as_float(from_range_1(W_z)),
    )
Ejemplo n.º 18
0
def plot_single_sd(sd,
                   cmfs='CIE 1931 2 Degree Standard Observer',
                   out_of_gamut_clipping=True,
                   modulate_colours_with_sd_amplitude=False,
                   equalize_sd_amplitude=False,
                   **kwargs):
    """
    Plots given spectral distribution.

    Parameters
    ----------
    sd : SpectralDistribution
        Spectral distribution to plot.
    cmfs : unicode or LMS_ConeFundamentals or \
RGB_ColourMatchingFunctions or XYZ_ColourMatchingFunctions, optional
        Standard observer colour matching functions used for computing the
        spectrum domain and colours. ``cmfs`` can be of any type or form
        supported by the :func:`colour.plotting.filter_cmfs` definition.
    out_of_gamut_clipping : bool, optional
        Whether to clip out of gamut colours otherwise, the colours will be
        offset by the absolute minimal colour leading to a rendering on
        gray background, less saturated and smoother.
    modulate_colours_with_sd_amplitude : bool, optional
        Whether to modulate the colours with the spectral distribution
        amplitude.
    equalize_sd_amplitude : bool, optional
        Whether to equalize the spectral distribution amplitude.
        Equalization occurs after the colours modulation thus setting both
        arguments to *True* will generate a spectrum strip where each
        wavelength colour is modulated by the spectral distribution amplitude.
        The usual 5% margin above the spectral distribution is also omitted.

    Other Parameters
    ----------------
    \\**kwargs : dict, optional
        {:func:`colour.plotting.artist`, :func:`colour.plotting.render`},
        Please refer to the documentation of the previously listed definitions.

    Returns
    -------
    tuple
        Current figure and axes.

    References
    ----------
    :cite:`Spiker2015a`

    Examples
    --------
    >>> from colour import SpectralDistribution
    >>> data = {
    ...     500: 0.0651,
    ...     520: 0.0705,
    ...     540: 0.0772,
    ...     560: 0.0870,
    ...     580: 0.1128,
    ...     600: 0.1360
    ... }
    >>> sd = SpectralDistribution(data, name='Custom')
    >>> plot_single_sd(sd)  # doctest: +ELLIPSIS
    (<Figure size ... with 1 Axes>, <...AxesSubplot...>)

    .. image:: ../_static/Plotting_Plot_Single_SD.png
        :align: center
        :alt: plot_single_sd
    """

    _figure, axes = artist(**kwargs)

    cmfs = first_item(filter_cmfs(cmfs).values())

    sd = sd.copy()
    sd.interpolator = LinearInterpolator
    wavelengths = cmfs.wavelengths[np.logical_and(
        cmfs.wavelengths >= max(min(cmfs.wavelengths), min(sd.wavelengths)),
        cmfs.wavelengths <= min(max(cmfs.wavelengths), max(sd.wavelengths)),
    )]
    values = sd[wavelengths]

    RGB = XYZ_to_plotting_colourspace(
        wavelength_to_XYZ(wavelengths, cmfs),
        CCS_ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['E'],
        apply_cctf_encoding=False)

    if not out_of_gamut_clipping:
        RGB += np.abs(np.min(RGB))

    RGB = normalise_maximum(RGB)

    if modulate_colours_with_sd_amplitude:
        RGB *= (values / np.max(values))[..., np.newaxis]

    RGB = CONSTANTS_COLOUR_STYLE.colour.colourspace.cctf_encoding(RGB)

    if equalize_sd_amplitude:
        values = ones(values.shape)

    margin = 0 if equalize_sd_amplitude else 0.05

    x_min, x_max = min(wavelengths), max(wavelengths)
    y_min, y_max = 0, max(values) + max(values) * margin

    polygon = Polygon(np.vstack([
        (x_min, 0),
        tstack([wavelengths, values]),
        (x_max, 0),
    ]),
                      facecolor='none',
                      edgecolor='none')
    axes.add_patch(polygon)

    padding = 0.1
    axes.bar(x=wavelengths - padding,
             height=max(values),
             width=1 + padding,
             color=RGB,
             align='edge',
             clip_path=polygon)

    axes.plot(wavelengths, values, color=CONSTANTS_COLOUR_STYLE.colour.dark)

    settings = {
        'axes': axes,
        'bounding_box': (x_min, x_max, y_min, y_max),
        'title': '{0} - {1}'.format(sd.strict_name, cmfs.strict_name),
        'x_label': 'Wavelength $\\lambda$ (nm)',
        'y_label': 'Spectral Distribution',
    }
    settings.update(kwargs)

    return render(**settings)
Ejemplo n.º 19
0
def sd_multi_leds_Ohno2005(
    peak_wavelengths: ArrayLike,
    fwhm: ArrayLike,
    peak_power_ratios: Optional[ArrayLike] = None,
    shape: SpectralShape = SPECTRAL_SHAPE_DEFAULT,
    **kwargs: Any,
) -> SpectralDistribution:
    """
    Return a multi *LED* spectral distribution of given spectral shape at
    given peak wavelengths and full widths at half maximum according to
    *Ohno (2005)* method.

    The multi *LED* spectral distribution is generated using many single *LED*
    spectral distributions generated with :func:`colour.sd_single_led_Ohno2005`
    definition.

    Parameters
    ----------
    peak_wavelengths
        Wavelengths the multi *LED* spectral distribution will peak at, i.e.
        the peaks for each generated single *LED* spectral distributions.
    fwhm
        Full widths at half maximum, i.e. widths of the underlying gaussian
        spectral distributions measured between those points on the *y* axis
        which are half the maximum amplitude.
    peak_power_ratios
        Peak power ratios for each generated single *LED* spectral
        distributions.
    shape
        Spectral shape used to create the spectral distribution.

    Other Parameters
    ----------------
    kwargs
        {:func:`colour.colorimetry.sd_single_led_Ohno2005`},
        See the documentation of the previously listed definition.

    Returns
    -------
    :class:`colour.SpectralDistribution`
        Multi *LED* spectral distribution.

    Notes
    -----
    -   By default, the spectral distribution will use the shape given by
        :attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.

    References
    ----------
    :cite:`Ohno2005`, :cite:`Ohno2008a`

    Examples
    --------
    >>> sd = sd_multi_leds_Ohno2005(
    ...     np.array([457, 530, 615]),
    ...     np.array([20, 30, 20]),
    ...     np.array([0.731, 1.000, 1.660]),
    ... )
    >>> sd.shape
    SpectralShape(360.0, 780.0, 1.0)
    >>> sd[500]  # doctest: +ELLIPSIS
    0.1295132...
    """

    peak_wavelengths = as_float_array(peak_wavelengths)
    fwhm = np.resize(fwhm, peak_wavelengths.shape)
    if peak_power_ratios is None:
        peak_power_ratios = ones(peak_wavelengths.shape)
    else:
        peak_power_ratios = np.resize(peak_power_ratios,
                                      peak_wavelengths.shape)

    sd = sd_zeros(shape)

    for (peak_wavelength, fwhm_s,
         peak_power_ratio) in zip(peak_wavelengths, fwhm, peak_power_ratios):
        sd += (  # type: ignore[misc]
            sd_single_led_Ohno2005(peak_wavelength, fwhm_s, **kwargs) *
            peak_power_ratio)

    def _format_array(a: NDArray) -> str:
        """Format given array :math:`a`."""

        return ", ".join([str(e) for e in a])

    sd.name = (f"{_format_array(peak_wavelengths)}nm - "
               f"{_format_array(fwhm)}FWHM - "
               f"{_format_array(peak_power_ratios)} Peak Power Ratios - "
               f"LED - Ohno (2005)")

    return sd
Ejemplo n.º 20
0
def plot_RGB_colourspaces_gamuts(
    colourspaces: Union[RGB_Colourspace, str, Sequence[Union[RGB_Colourspace,
                                                             str]]],
    reference_colourspace: Union[Literal["CAM02LCD", "CAM02SCD", "CAM02UCS",
                                         "CAM16LCD", "CAM16SCD", "CAM16UCS",
                                         "CIE XYZ", "CIE xyY", "CIE Lab",
                                         "CIE Luv", "CIE UCS", "CIE UVW",
                                         "DIN99", "Hunter Lab", "Hunter Rdab",
                                         "ICaCb", "ICtCp", "IPT", "IgPgTg",
                                         "Jzazbz", "OSA UCS", "Oklab",
                                         "hdr-CIELAB", "hdr-IPT", ],
                                 str, ] = "CIE xyY",
    segments: Integer = 8,
    show_grid: Boolean = True,
    grid_segments: Integer = 10,
    show_spectral_locus: Boolean = False,
    spectral_locus_colour: Optional[Union[ArrayLike, str]] = None,
    cmfs: Union[MultiSpectralDistributions, str, Sequence[Union[
        MultiSpectralDistributions,
        str]], ] = "CIE 1931 2 Degree Standard Observer",
    chromatically_adapt: Boolean = False,
    convert_kwargs: Optional[Dict] = None,
    **kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
    """
    Plot given *RGB* colourspaces gamuts in given reference colourspace.

    Parameters
    ----------
    colourspaces
        *RGB* colourspaces to plot the gamuts. ``colourspaces`` elements
        can be of any type or form supported by the
        :func:`colour.plotting.filter_RGB_colourspaces` definition.
    reference_colourspace
        Reference colourspace model to plot the gamuts into, see
        :attr:`colour.COLOURSPACE_MODELS` attribute for the list of supported
        colourspace models.
    segments
        Edge segments count for each *RGB* colourspace cubes.
    show_grid
        Whether to show a grid at the bottom of the *RGB* colourspace cubes.
    grid_segments
        Edge segments count for the grid.
    show_spectral_locus
        Whether to show the spectral locus.
    spectral_locus_colour
        Spectral locus colour.
    cmfs
        Standard observer colour matching functions used for computing the
        spectral locus boundaries. ``cmfs`` can be of any type or form
        supported by the :func:`colour.plotting.filter_cmfs` definition.
    chromatically_adapt
        Whether to chromatically adapt the *RGB* colourspaces given in
        ``colourspaces`` to the whitepoint of the default plotting colourspace.
    convert_kwargs
        Keyword arguments for the :func:`colour.convert` definition.

    Other Parameters
    ----------------
    edge_colours
        Edge colours array such as `edge_colours = (None, (0.5, 0.5, 1.0))`.
    edge_alpha
        Edge opacity value such as `edge_alpha = (0.0, 1.0)`.
    face_alpha
        Face opacity value such as `face_alpha = (0.5, 1.0)`.
    face_colours
        Face colours array such as `face_colours = (None, (0.5, 0.5, 1.0))`.
    kwargs
        {:func:`colour.plotting.artist`,
        :func:`colour.plotting.volume.nadir_grid`},
        See the documentation of the previously listed definitions.

    Returns
    -------
    :class:`tuple`
        Current figure and axes.

    Examples
    --------
    >>> plot_RGB_colourspaces_gamuts(['ITU-R BT.709', 'ACEScg', 'S-Gamut'])
    ... # doctest: +ELLIPSIS
    (<Figure size ... with 1 Axes>, <...Axes3DSubplot...>)

    .. image:: ../_static/Plotting_Plot_RGB_Colourspaces_Gamuts.png
        :align: center
        :alt: plot_RGB_colourspaces_gamuts
    """

    colourspaces = cast(
        List[RGB_Colourspace],
        list(filter_RGB_colourspaces(colourspaces).values()),
    )

    convert_kwargs = optional(convert_kwargs, {})

    count_c = len(colourspaces)

    title = (
        f"{', '.join([colourspace.name for colourspace in colourspaces])} "
        f"- {reference_colourspace} Reference Colourspace")

    illuminant = CONSTANTS_COLOUR_STYLE.colour.colourspace.whitepoint

    convert_settings = {"illuminant": illuminant}
    convert_settings.update(convert_kwargs)

    settings = Structure(
        **{
            "face_colours": [None] * count_c,
            "edge_colours": [None] * count_c,
            "face_alpha": [1] * count_c,
            "edge_alpha": [1] * count_c,
            "title": title,
        })
    settings.update(kwargs)

    figure = plt.figure()
    axes = figure.add_subplot(111, projection="3d")

    points = zeros((4, 3))
    if show_spectral_locus:
        cmfs = cast(MultiSpectralDistributions,
                    first_item(filter_cmfs(cmfs).values()))
        XYZ = cmfs.values

        points = colourspace_model_axis_reorder(
            convert(XYZ, "CIE XYZ", reference_colourspace, **convert_settings),
            reference_colourspace,
        )

        points[np.isnan(points)] = 0

        c = ((0.0, 0.0, 0.0,
              0.5) if spectral_locus_colour is None else spectral_locus_colour)

        axes.plot(
            points[..., 0],
            points[..., 1],
            points[..., 2],
            color=c,
            zorder=CONSTANTS_COLOUR_STYLE.zorder.midground_line,
        )
        axes.plot(
            (points[-1][0], points[0][0]),
            (points[-1][1], points[0][1]),
            (points[-1][2], points[0][2]),
            color=c,
            zorder=CONSTANTS_COLOUR_STYLE.zorder.midground_line,
        )

    plotting_colourspace = CONSTANTS_COLOUR_STYLE.colour.colourspace

    quads_c: List = []
    RGB_cf: List = []
    RGB_ce: List = []
    for i, colourspace in enumerate(colourspaces):

        if chromatically_adapt and not np.array_equal(
                colourspace.whitepoint, plotting_colourspace.whitepoint):
            colourspace = colourspace.chromatically_adapt(
                plotting_colourspace.whitepoint,
                plotting_colourspace.whitepoint_name,
            )

        quads_cb, RGB = RGB_identity_cube(
            width_segments=segments,
            height_segments=segments,
            depth_segments=segments,
        )

        XYZ = RGB_to_XYZ(
            quads_cb,
            colourspace.whitepoint,
            colourspace.whitepoint,
            colourspace.matrix_RGB_to_XYZ,
        )

        convert_settings = {"illuminant": colourspace.whitepoint}
        convert_settings.update(convert_kwargs)

        quads_c.extend(
            colourspace_model_axis_reorder(
                convert(XYZ, "CIE XYZ", reference_colourspace,
                        **convert_settings),
                reference_colourspace,
            ))

        if settings.face_colours[i] is not None:
            RGB = ones(RGB.shape) * settings.face_colours[i]

        RGB_cf.extend(
            np.hstack([RGB,
                       full((RGB.shape[0], 1), settings.face_alpha[i])]))

        if settings.edge_colours[i] is not None:
            RGB = ones(RGB.shape) * settings.edge_colours[i]

        RGB_ce.extend(
            np.hstack([RGB,
                       full((RGB.shape[0], 1), settings.edge_alpha[i])]))

    quads = as_float_array(quads_c)
    RGB_f = as_float_array(RGB_cf)
    RGB_e = as_float_array(RGB_ce)

    quads[np.isnan(quads)] = 0

    if quads.size != 0:
        for i, axis in enumerate("xyz"):
            min_a = np.minimum(np.min(quads[..., i]), np.min(points[..., i]))
            max_a = np.maximum(np.max(quads[..., i]), np.max(points[..., i]))
            getattr(axes, f"set_{axis}lim")((min_a, max_a))

    labels = np.array(
        COLOURSPACE_MODELS_AXIS_LABELS[reference_colourspace])[as_int_array(
            colourspace_model_axis_reorder([0, 1, 2], reference_colourspace))]
    for i, axis in enumerate("xyz"):
        getattr(axes, f"set_{axis}label")(labels[i])

    if show_grid:
        limits = np.array([[-1.5, 1.5], [-1.5, 1.5]])

        quads_g, RGB_gf, RGB_ge = nadir_grid(limits, grid_segments, labels,
                                             axes, **settings)
        quads = np.vstack([quads_g, quads])
        RGB_f = np.vstack([RGB_gf, RGB_f])
        RGB_e = np.vstack([RGB_ge, RGB_e])

    collection = Poly3DCollection(quads)
    collection.set_facecolors(RGB_f)
    collection.set_edgecolors(RGB_e)

    axes.add_collection3d(collection)

    settings.update({
        "axes": axes,
        "axes_visible": False,
        "camera_aspect": "equal"
    })
    settings.update(kwargs)

    return render(**settings)
Ejemplo n.º 21
0
def ellipse_fitting_Halir1998(a):
    """
    Returns the coefficients of the implicit second-order polynomial/quadratic
    curve that fits given point array :math:`a` using
    *Halir and Flusser (1998)* method.

    The implicit second-order polynomial is expressed as follows::

    :math:`F\\left(x, y\\right)` = ax^2 + bxy + cy^2 + dx + ey + f = 0`

    with an ellipse-specific constraint such as :math:`b^2 -4ac < 0` and where
    :math:`a, b, c, d, e, f` are coefficients of the ellipse and
    :math:`F\\left(x, y\\right)` are coordinates of points lying on it.

    Parameters
    ----------
    a : array_like
        Point array :math:`a` to be fitted.

    Returns
    -------
    ndarray
        Coefficients of the implicit second-order polynomial/quadratic
        curve that fits given point array :math:`a`.

    References
    ----------
    :cite:`Halir1998`

    Examples
    --------
    >>> a = np.array([[2, 0], [0, 1], [-2, 0], [0, -1]])
    >>> ellipse_fitting_Halir1998(a)  # doctest: +ELLIPSIS
    array([ 0.2425356...,  0.        ,  0.9701425...,  0.        ,  0.        ,
           -0.9701425...])
    >>> ellipse_coefficients_canonical_form(ellipse_fitting_Halir1998(a))
    array([-0., -0.,  2.,  1.,  0.])
    """

    x, y = tsplit(a)

    # Quadratic part of the design matrix.
    D1 = tstack([x**2, x * y, y**2])
    # Linear part of the design matrix.
    D2 = tstack([x, y, ones(x.shape)])

    D1_T = np.transpose(D1)
    D2_T = np.transpose(D2)

    # Quadratic part of the scatter matrix.
    S1 = np.dot(D1_T, D1)
    # Combined part of the scatter matrix.
    S2 = np.dot(D1_T, D2)
    # Linear part of the scatter matrix.
    S3 = np.dot(D2_T, D2)

    T = -np.dot(np.linalg.inv(S3), np.transpose(S2))

    # Reduced scatter matrix.
    M = S1 + np.dot(S2, T)
    M = np.array([M[2, :] / 2, -M[1, :], M[0, :] / 2])

    _w, v = np.linalg.eig(M)

    A1 = v[:, np.nonzero(4 * v[0, :] * v[2, :] - v[1, :]**2 > 0)[0]]
    A2 = np.dot(T, A1)

    A = np.ravel([A1, A2])

    return A
Ejemplo n.º 22
0
def degrees_of_adaptation(
    LMS: ArrayLike,
    Y_n: FloatingOrArrayLike,
    v: FloatingOrArrayLike = 1 / 3,
    discount_illuminant: Boolean = False,
) -> NDArray:
    """
    Compute the degrees of adaptation :math:`p_L`, :math:`p_M` and
    :math:`p_S`.

    Parameters
    ----------
    LMS
        Cone responses.
    Y_n
        Luminance :math:`Y_n` of test adapting stimulus in :math:`cd/m^2`.
    v
        Exponent :math:`v`.
    discount_illuminant
        Truth value indicating if the illuminant should be discounted.

    Returns
    -------
    :class:`numpy.ndarray`
        Degrees of adaptation :math:`p_L`, :math:`p_M` and :math:`p_S`.

    Examples
    --------
    >>> LMS = np.array([20.00052060, 19.99978300, 19.99883160])
    >>> Y_n = 31.83
    >>> degrees_of_adaptation(LMS, Y_n)  # doctest: +ELLIPSIS
    array([ 0.9799324...,  0.9960035...,  1.0233041...])
    >>> degrees_of_adaptation(LMS, Y_n, 1 / 3, True)
    array([ 1.,  1.,  1.])
    """

    LMS = as_float_array(LMS)
    if discount_illuminant:
        return ones(LMS.shape)

    Y_n = as_float_array(Y_n)
    v = as_float_array(v)

    L, M, S = tsplit(LMS)

    # E illuminant.
    LMS_E = vector_dot(CAT_VON_KRIES, ones(LMS.shape))
    L_E, M_E, S_E = tsplit(LMS_E)

    Ye_n = spow(Y_n, v)

    def m_E(x: NDArray, y: NDArray) -> NDArray:
        """Compute the :math:`m_E` term."""

        return (3 * (x / y)) / (L / L_E + M / M_E + S / S_E)

    def P_c(x: NDArray) -> NDArray:
        """Compute the :math:`P_L`, :math:`P_M` or :math:`P_S` terms."""

        return (1 + Ye_n + x) / (1 + Ye_n + 1 / x)

    p_L = P_c(m_E(L, L_E))
    p_M = P_c(m_E(M, M_E))
    p_S = P_c(m_E(S, S_E))

    p_LMS = tstack([p_L, p_M, p_S])

    return p_LMS
Ejemplo n.º 23
0
def primitive_vertices_sphere(radius=0.5,
                              segments=8,
                              intermediate=False,
                              origin=np.array([0, 0, 0]),
                              axis='+z'):
    """
    Returns the vertices of a latitude-longitude sphere primitive.

    Parameters
    ----------
    radius: numeric, optional
        Sphere radius.
    segments: numeric, optional
        Latitude-longitude segments, if the ``intermediate`` argument is
        *True*, then the sphere will have one less segment along its longitude.
    intermediate: bool, optional
        Whether to generate the sphere vertices at the center of the faces
        outlined by the segments of a regular sphere generated without
        the ``intermediate`` argument set to *True*. The resulting sphere is
        inscribed on the regular sphere faces but possesses the same poles.
    origin: array_like, optional
        Sphere origin on the construction plane.
    axis : array_like, optional
        **{'+z', '+x', '+y', 'yz', 'xz', 'xy'}**,
        Axis (or normal of the plane) the poles of the sphere will be aligned
        with.

    Returns
    -------
    ndarray
        Sphere primitive vertices.

    Notes
    -----
    -   The sphere poles have latitude segments count - 1 co-located vertices.

    Examples
    --------
    >>> primitive_vertices_sphere(segments=4)  # doctest: +ELLIPSIS
    array([[[  0.0000000...e+00,   0.0000000...e+00,   5.0000000...e-01],
            [ -3.5355339...e-01,  -4.3297802...e-17,   3.5355339...e-01],
            [ -5.0000000...e-01,  -6.1232340...e-17,   3.0616170...e-17],
            [ -3.5355339...e-01,  -4.3297802...e-17,  -3.5355339...e-01],
            [ -6.1232340...e-17,  -7.4987989...e-33,  -5.0000000...e-01]],
    <BLANKLINE>
           [[  0.0000000...e+00,   0.0000000...e+00,   5.0000000...e-01],
            [  2.1648901...e-17,  -3.5355339...e-01,   3.5355339...e-01],
            [  3.0616170...e-17,  -5.0000000...e-01,   3.0616170...e-17],
            [  2.1648901...e-17,  -3.5355339...e-01,  -3.5355339...e-01],
            [  3.7493994...e-33,  -6.1232340...e-17,  -5.0000000...e-01]],
    <BLANKLINE>
           [[  0.0000000...e+00,   0.0000000...e+00,   5.0000000...e-01],
            [  3.5355339...e-01,   0.0000000...e+00,   3.5355339...e-01],
            [  5.0000000...e-01,   0.0000000...e+00,   3.0616170...e-17],
            [  3.5355339...e-01,   0.0000000...e+00,  -3.5355339...e-01],
            [  6.1232340...e-17,   0.0000000...e+00,  -5.0000000...e-01]],
    <BLANKLINE>
           [[  0.0000000...e+00,   0.0000000...e+00,   5.0000000...e-01],
            [  2.1648901...e-17,   3.5355339...e-01,   3.5355339...e-01],
            [  3.0616170...e-17,   5.0000000...e-01,   3.0616170...e-17],
            [  2.1648901...e-17,   3.5355339...e-01,  -3.5355339...e-01],
            [  3.7493994...e-33,   6.1232340...e-17,  -5.0000000...e-01]]])
    """

    axis = PLANE_TO_AXIS_MAPPING.get(axis, axis).lower()

    if not intermediate:
        theta = np.tile(np.radians(np.linspace(0, 180, segments + 1)),
                        (segments + 1, 1))
        phi = np.transpose(
            np.tile(np.radians(np.linspace(-180, 180, segments + 1)),
                    (segments + 1, 1)))
    else:
        theta = np.tile(
            np.radians(np.linspace(0, 180, segments * 2 + 1)[1::2][1:-1]),
            (segments + 1, 1))
        theta = np.hstack([
            zeros([segments + 1, 1]),
            theta,
            full([segments + 1, 1], np.pi),
        ])
        phi = np.transpose(
            np.tile(
                np.radians(np.linspace(-180, 180, segments + 1)) +
                np.radians(360 / segments / 2), (segments, 1)))

    rho = ones(phi.shape) * radius
    rho_theta_phi = tstack([rho, theta, phi])

    vertices = spherical_to_cartesian(rho_theta_phi)

    # Removing extra longitude vertices.
    vertices = vertices[:-1, :, :]

    if axis == '+z':
        pass
    elif axis == '+y':
        vertices = np.roll(vertices, 2, -1)
    elif axis == '+x':
        vertices = np.roll(vertices, 1, -1)
    else:
        raise ValueError('Axis must be one of "{0}"!'.format(
            ['+x', '+y', '+z']))

    vertices += origin

    return vertices
Ejemplo n.º 24
0
def matrix_augmented_Cheung2004(
    RGB: ArrayLike,
    terms: Literal[3, 5, 7, 8, 10, 11, 14, 16, 17, 19, 20, 22] = 3,
) -> NDArray:
    """
    Perform polynomial expansion of given *RGB* colourspace array using
    *Cheung et al. (2004)* method.

    Parameters
    ----------
    RGB
        *RGB* colourspace array to expand.
    terms
        Number of terms of the expanded polynomial.

    Returns
    -------
    :class:`numpy.ndarray`
        Expanded *RGB* colourspace array.

    Notes
    -----
    -   This definition combines the augmented matrices given in
        :cite:`Cheung2004` and :cite:`Westland2004`.

    References
    ----------
    :cite:`Cheung2004`, :cite:`Westland2004`

    Examples
    --------
    >>> RGB = np.array([0.17224810, 0.09170660, 0.06416938])
    >>> matrix_augmented_Cheung2004(RGB, terms=5)  # doctest: +ELLIPSIS
    array([ 0.1722481...,  0.0917066...,  0.0641693...,  0.0010136...,  1...])
    """

    RGB = as_float_array(RGB)

    R, G, B = tsplit(RGB)
    tail = ones(R.shape)

    existing_terms = np.array([3, 5, 7, 8, 10, 11, 14, 16, 17, 19, 20, 22])
    closest_terms = as_int(closest(existing_terms, terms))
    if closest_terms != terms:
        raise ValueError(
            f'"Cheung et al. (2004)" method does not define an augmented '
            f"matrix with {terms} terms, closest augmented matrix has "
            f"{closest_terms} terms!")

    if terms == 3:
        return RGB
    elif terms == 5:
        return tstack([
            R,
            G,
            B,
            R * G * B,
            tail,
        ])
    elif terms == 7:
        return tstack([
            R,
            G,
            B,
            R * G,
            R * B,
            G * B,
            tail,
        ])
    elif terms == 8:
        return tstack([
            R,
            G,
            B,
            R * G,
            R * B,
            G * B,
            R * G * B,
            tail,
        ])
    elif terms == 10:
        return tstack([
            R,
            G,
            B,
            R * G,
            R * B,
            G * B,
            R**2,
            G**2,
            B**2,
            tail,
        ])
    elif terms == 11:
        return tstack([
            R,
            G,
            B,
            R * G,
            R * B,
            G * B,
            R**2,
            G**2,
            B**2,
            R * G * B,
            tail,
        ])
    elif terms == 14:
        return tstack([
            R,
            G,
            B,
            R * G,
            R * B,
            G * B,
            R**2,
            G**2,
            B**2,
            R * G * B,
            R**3,
            G**3,
            B**3,
            tail,
        ])
    elif terms == 16:
        return tstack([
            R,
            G,
            B,
            R * G,
            R * B,
            G * B,
            R**2,
            G**2,
            B**2,
            R * G * B,
            R**2 * G,
            G**2 * B,
            B**2 * R,
            R**3,
            G**3,
            B**3,
        ])
    elif terms == 17:
        return tstack([
            R,
            G,
            B,
            R * G,
            R * B,
            G * B,
            R**2,
            G**2,
            B**2,
            R * G * B,
            R**2 * G,
            G**2 * B,
            B**2 * R,
            R**3,
            G**3,
            B**3,
            tail,
        ])
    elif terms == 19:
        return tstack([
            R,
            G,
            B,
            R * G,
            R * B,
            G * B,
            R**2,
            G**2,
            B**2,
            R * G * B,
            R**2 * G,
            G**2 * B,
            B**2 * R,
            R**2 * B,
            G**2 * R,
            B**2 * G,
            R**3,
            G**3,
            B**3,
        ])
    elif terms == 20:
        return tstack([
            R,
            G,
            B,
            R * G,
            R * B,
            G * B,
            R**2,
            G**2,
            B**2,
            R * G * B,
            R**2 * G,
            G**2 * B,
            B**2 * R,
            R**2 * B,
            G**2 * R,
            B**2 * G,
            R**3,
            G**3,
            B**3,
            tail,
        ])
    elif terms == 22:
        return tstack([
            R,
            G,
            B,
            R * G,
            R * B,
            G * B,
            R**2,
            G**2,
            B**2,
            R * G * B,
            R**2 * G,
            G**2 * B,
            B**2 * R,
            R**2 * B,
            G**2 * R,
            B**2 * G,
            R**3,
            G**3,
            B**3,
            R**2 * G * B,
            R * G**2 * B,
            R * G * B**2,
        ])
Ejemplo n.º 25
0
def CIECAM02_to_XYZ(
    specification: CAM_Specification_CIECAM02,
    XYZ_w: ArrayLike,
    L_A: FloatingOrArrayLike,
    Y_b: FloatingOrArrayLike,
    surround: InductionFactors_CIECAM02 = VIEWING_CONDITIONS_CIECAM02[
        "Average"],
    discount_illuminant: Boolean = False,
) -> NDArray:
    """
    Convert from *CIECAM02* specification to *CIE XYZ* tristimulus values.

    Parameters
    ----------
    specification
        *CIECAM02* colour appearance model specification. Correlate of
        *Lightness* :math:`J`, correlate of *chroma* :math:`C` or correlate of
        *colourfulness* :math:`M` and *hue* angle :math:`h` in degrees must be
        specified, e.g. :math:`JCh` or :math:`JMh`.
    XYZ_w
        *CIE XYZ* tristimulus values of reference white.
    L_A
        Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken
        to be 20% of the luminance of a white object in the scene).
    Y_b
        Luminous factor of background :math:`Y_b` such as
        :math:`Y_b = 100 x L_b / L_w` where :math:`L_w` is the luminance of the
        light source and :math:`L_b` is the luminance of the background. For
        viewing images, :math:`Y_b` can be the average :math:`Y` value for the
        pixels in the entire image, or frequently, a :math:`Y` value of 20,
        approximate an :math:`L^*` of 50 is used.
    surround
        Surround viewing conditions.
    discount_illuminant
        Discount the illuminant.

    Returns
    -------
    :class:`numpy.ndarray`
        *CIE XYZ* tristimulus values.

    Raises
    ------
    ValueError
        If neither *C* or *M* correlates have been defined in the
        ``CAM_Specification_CIECAM02`` argument.

    Notes
    -----
    +----------------------------------+-----------------------\
+---------------+
    | **Domain**                       | **Scale - Reference** \
| **Scale - 1** |
    +==================================+=======================\
+===============+
    | ``CAM_Specification_CIECAM02.J`` | [0, 100]              \
| [0, 1]        |
    +----------------------------------+-----------------------\
+---------------+
    | ``CAM_Specification_CIECAM02.C`` | [0, 100]              \
| [0, 1]        |
    +----------------------------------+-----------------------\
+---------------+
    | ``CAM_Specification_CIECAM02.h`` | [0, 360]              \
| [0, 1]        |
    +----------------------------------+-----------------------\
+---------------+
    | ``CAM_Specification_CIECAM02.s`` | [0, 100]              \
| [0, 1]        |
    +----------------------------------+-----------------------\
+---------------+
    | ``CAM_Specification_CIECAM02.Q`` | [0, 100]              \
| [0, 1]        |
    +----------------------------------+-----------------------\
+---------------+
    | ``CAM_Specification_CIECAM02.M`` | [0, 100]              \
| [0, 1]        |
    +----------------------------------+-----------------------\
+---------------+
    | ``CAM_Specification_CIECAM02.H`` | [0, 360]              \
| [0, 1]        |
    +----------------------------------+-----------------------\
+---------------+
    | ``XYZ_w``                        | [0, 100]              \
| [0, 1]        |
    +----------------------------------+-----------------------\
+---------------+

    +-----------+-----------------------+---------------+
    | **Range** | **Scale - Reference** | **Scale - 1** |
    +===========+=======================+===============+
    | ``XYZ``   | [0, 100]              | [0, 1]        |
    +-----------+-----------------------+---------------+

    References
    ----------
    :cite:`Fairchild2004c`, :cite:`Luo2013`, :cite:`Moroneya`,
    :cite:`Wikipedia2007a`

    Examples
    --------
    >>> specification = CAM_Specification_CIECAM02(J=41.731091132513917,
    ...                                            C=0.104707757171031,
    ...                                            h=219.048432658311780)
    >>> XYZ_w = np.array([95.05, 100.00, 108.88])
    >>> L_A = 318.31
    >>> Y_b = 20.0
    >>> CIECAM02_to_XYZ(specification, XYZ_w, L_A, Y_b)  # doctest: +ELLIPSIS
    array([ 19.01...,  20...  ,  21.78...])
    """

    J, C, h, _s, _Q, M, _H, _HC = astuple(specification)

    J = to_domain_100(J)
    C = to_domain_100(C)
    h = to_domain_degrees(h)
    M = to_domain_100(M)
    L_A = as_float_array(L_A)
    XYZ_w = to_domain_100(XYZ_w)
    _X_w, Y_w, _Z_w = tsplit(XYZ_w)

    n, F_L, N_bb, N_cb, z = viewing_condition_dependent_parameters(
        Y_b, Y_w, L_A)

    if has_only_nan(C) and not has_only_nan(M):
        C = M / spow(F_L, 0.25)
    elif has_only_nan(C):
        raise ValueError('Either "C" or "M" correlate must be defined in '
                         'the "CAM_Specification_CIECAM02" argument!')

    # Converting *CIE XYZ* tristimulus values to *CMCCAT2000* transform
    # sharpened *RGB* values.
    RGB_w = vector_dot(CAT_CAT02, XYZ_w)

    # Computing degree of adaptation :math:`D`.
    D = (degree_of_adaptation(surround.F, L_A)
         if not discount_illuminant else ones(L_A.shape))

    # Computing full chromatic adaptation.
    RGB_wc = full_chromatic_adaptation_forward(RGB_w, RGB_w, Y_w, D)

    # Converting to *Hunt-Pointer-Estevez* colourspace.
    RGB_pw = RGB_to_rgb(RGB_wc)

    # Applying post-adaptation non-linear response compression.
    RGB_aw = post_adaptation_non_linear_response_compression_forward(
        RGB_pw, F_L)

    # Computing achromatic response for the whitepoint.
    A_w = achromatic_response_forward(RGB_aw, N_bb)

    # Computing temporary magnitude quantity :math:`t`.
    t = temporary_magnitude_quantity_inverse(C, J, n)

    # Computing eccentricity factor *e_t*.
    e_t = eccentricity_factor(h)

    # Computing achromatic response :math:`A` for the stimulus.
    A = achromatic_response_inverse(A_w, J, surround.c, z)

    # Computing *P_1* to *P_3*.
    P_n = P(surround.N_c, N_cb, e_t, t, A, N_bb)
    _P_1, P_2, _P_3 = tsplit(P_n)

    # Computing opponent colour dimensions :math:`a` and :math:`b`.
    a, b = tsplit(opponent_colour_dimensions_inverse(P_n, h))

    # Applying post-adaptation non-linear response compression matrix.
    RGB_a = matrix_post_adaptation_non_linear_response_compression(P_2, a, b)

    # Applying inverse post-adaptation non-linear response compression.
    RGB_p = post_adaptation_non_linear_response_compression_inverse(RGB_a, F_L)

    # Converting to *Hunt-Pointer-Estevez* colourspace.
    RGB_c = rgb_to_RGB(RGB_p)

    # Applying inverse full chromatic adaptation.
    RGB = full_chromatic_adaptation_inverse(RGB_c, RGB_w, Y_w, D)

    # Converting *CMCCAT2000* transform sharpened *RGB* values to *CIE XYZ*
    # tristimulus values.
    XYZ = vector_dot(CAT_INVERSE_CAT02, RGB)

    return from_range_100(XYZ)
Ejemplo n.º 26
0
def primitive_cube(width=1,
                   height=1,
                   depth=1,
                   width_segments=1,
                   height_segments=1,
                   depth_segments=1,
                   planes=None):
    """
    Generates vertices and indices for a filled and outlined cube primitive.

    Parameters
    ----------
    width : float, optional
        Cube width.
    height : float, optional
        Cube height.
    depth : float, optional
        Cube depth.
    width_segments : int, optional
        Cube segments count along the width.
    height_segments : float, optional
        Cube segments count along the height.
    depth_segments : float, optional
        Cube segments count along the depth.
    planes : array_like, optional
        **{'-x', '+x', '-y', '+y', '-z', '+z',
        'xy', 'xz', 'yz', 'yx', 'zx', 'zy'}**,
        Grid primitives to include in the cube construction.

    Returns
    -------
    tuple
        Tuple of cube vertices, face indices to produce a filled cube and
        outline indices to produce an outline of the faces of the cube.

    Examples
    --------
    >>> vertices, faces, outline = primitive_cube()
    >>> print(vertices)
    [([-0.5,  0.5, -0.5], [ 0.,  1.], [-0., -0., -1.], [ 0.,  1.,  0.,  1.])
     ([ 0.5,  0.5, -0.5], [ 1.,  1.], [-0., -0., -1.], [ 1.,  1.,  0.,  1.])
     ([-0.5, -0.5, -0.5], [ 0.,  0.], [-0., -0., -1.], [ 0.,  0.,  0.,  1.])
     ([ 0.5, -0.5, -0.5], [ 1.,  0.], [-0., -0., -1.], [ 1.,  0.,  0.,  1.])
     ([-0.5,  0.5,  0.5], [ 0.,  1.], [ 0.,  0.,  1.], [ 0.,  1.,  1.,  1.])
     ([ 0.5,  0.5,  0.5], [ 1.,  1.], [ 0.,  0.,  1.], [ 1.,  1.,  1.,  1.])
     ([-0.5, -0.5,  0.5], [ 0.,  0.], [ 0.,  0.,  1.], [ 0.,  0.,  1.,  1.])
     ([ 0.5, -0.5,  0.5], [ 1.,  0.], [ 0.,  0.,  1.], [ 1.,  0.,  1.,  1.])
     ([ 0.5, -0.5, -0.5], [ 0.,  1.], [-0., -1., -0.], [ 1.,  0.,  0.,  1.])
     ([ 0.5, -0.5,  0.5], [ 1.,  1.], [-0., -1., -0.], [ 1.,  0.,  1.,  1.])
     ([-0.5, -0.5, -0.5], [ 0.,  0.], [-0., -1., -0.], [ 0.,  0.,  0.,  1.])
     ([-0.5, -0.5,  0.5], [ 1.,  0.], [-0., -1., -0.], [ 0.,  0.,  1.,  1.])
     ([ 0.5,  0.5, -0.5], [ 0.,  1.], [ 0.,  1.,  0.], [ 1.,  1.,  0.,  1.])
     ([ 0.5,  0.5,  0.5], [ 1.,  1.], [ 0.,  1.,  0.], [ 1.,  1.,  1.,  1.])
     ([-0.5,  0.5, -0.5], [ 0.,  0.], [ 0.,  1.,  0.], [ 0.,  1.,  0.,  1.])
     ([-0.5,  0.5,  0.5], [ 1.,  0.], [ 0.,  1.,  0.], [ 0.,  1.,  1.,  1.])
     ([-0.5, -0.5,  0.5], [ 0.,  1.], [-1., -0., -0.], [ 0.,  0.,  1.,  1.])
     ([-0.5,  0.5,  0.5], [ 1.,  1.], [-1., -0., -0.], [ 0.,  1.,  1.,  1.])
     ([-0.5, -0.5, -0.5], [ 0.,  0.], [-1., -0., -0.], [ 0.,  0.,  0.,  1.])
     ([-0.5,  0.5, -0.5], [ 1.,  0.], [-1., -0., -0.], [ 0.,  1.,  0.,  1.])
     ([ 0.5, -0.5,  0.5], [ 0.,  1.], [ 1.,  0.,  0.], [ 1.,  0.,  1.,  1.])
     ([ 0.5,  0.5,  0.5], [ 1.,  1.], [ 1.,  0.,  0.], [ 1.,  1.,  1.,  1.])
     ([ 0.5, -0.5, -0.5], [ 0.,  0.], [ 1.,  0.,  0.], [ 1.,  0.,  0.,  1.])
     ([ 0.5,  0.5, -0.5], [ 1.,  0.], [ 1.,  0.,  0.], [ 1.,  1.,  0.,  1.])]
    >>> print(faces)
    [[ 1  2  0]
     [ 1  3  2]
     [ 4  6  5]
     [ 6  7  5]
     [ 9 10  8]
     [ 9 11 10]
     [12 14 13]
     [14 15 13]
     [17 18 16]
     [17 19 18]
     [20 22 21]
     [22 23 21]]
    >>> print(outline)
    [[ 0  2]
     [ 2  3]
     [ 3  1]
     [ 1  0]
     [ 4  6]
     [ 6  7]
     [ 7  5]
     [ 5  4]
     [ 8 10]
     [10 11]
     [11  9]
     [ 9  8]
     [12 14]
     [14 15]
     [15 13]
     [13 12]
     [16 18]
     [18 19]
     [19 17]
     [17 16]
     [20 22]
     [22 23]
     [23 21]
     [21 20]]
    """

    planes = (
        sorted(list(PLANE_TO_AXIS_MAPPING.values())) if planes is None else
        [PLANE_TO_AXIS_MAPPING.get(plane, plane).lower() for plane in planes])

    w_s, h_s, d_s = width_segments, height_segments, depth_segments

    planes_m = []
    if '-z' in planes:
        planes_m.append(list(primitive_grid(width, depth, w_s, d_s, '-z')))
        planes_m[-1][0]['position'][..., 2] -= height / 2
        planes_m[-1][1] = np.fliplr(planes_m[-1][1])
    if '+z' in planes:
        planes_m.append(list(primitive_grid(width, depth, w_s, d_s, '+z')))
        planes_m[-1][0]['position'][..., 2] += height / 2

    if '-y' in planes:
        planes_m.append(list(primitive_grid(height, width, h_s, w_s, '-y')))
        planes_m[-1][0]['position'][..., 1] -= depth / 2
        planes_m[-1][1] = np.fliplr(planes_m[-1][1])
    if '+y' in planes:
        planes_m.append(list(primitive_grid(height, width, h_s, w_s, '+y')))
        planes_m[-1][0]['position'][..., 1] += depth / 2

    if '-x' in planes:
        planes_m.append(list(primitive_grid(depth, height, d_s, h_s, '-x')))
        planes_m[-1][0]['position'][..., 0] -= width / 2
        planes_m[-1][1] = np.fliplr(planes_m[-1][1])
    if '+x' in planes:
        planes_m.append(list(primitive_grid(depth, height, d_s, h_s, '+x')))
        planes_m[-1][0]['position'][..., 0] += width / 2

    positions = zeros([0, 3])
    uvs = zeros([0, 2])
    normals = zeros([0, 3])

    faces = zeros([0, 3], dtype=DEFAULT_INT_DTYPE)
    outline = zeros([0, 2], dtype=DEFAULT_INT_DTYPE)

    offset = 0
    for vertices_p, faces_p, outline_p in planes_m:
        positions = np.vstack([positions, vertices_p['position']])
        uvs = np.vstack([uvs, vertices_p['uv']])
        normals = np.vstack([normals, vertices_p['normal']])

        faces = np.vstack([faces, faces_p + offset])
        outline = np.vstack([outline, outline_p + offset])
        offset += vertices_p['position'].shape[0]

    vertices = zeros(positions.shape[0], [('position', DEFAULT_FLOAT_DTYPE, 3),
                                          ('uv', DEFAULT_FLOAT_DTYPE, 2),
                                          ('normal', DEFAULT_FLOAT_DTYPE, 3),
                                          ('colour', DEFAULT_FLOAT_DTYPE, 4)])

    vertex_colours = np.ravel(positions)
    vertex_colours = np.hstack([
        np.reshape(
            np.interp(vertex_colours,
                      (np.min(vertex_colours), np.max(vertex_colours)),
                      (0, 1)), positions.shape),
        ones([positions.shape[0], 1])
    ])

    vertices['position'] = positions
    vertices['uv'] = uvs
    vertices['normal'] = normals
    vertices['colour'] = vertex_colours

    return vertices, faces, outline
Ejemplo n.º 27
0
def nadir_grid(
    limits: Optional[ArrayLike] = None,
    segments: Integer = 10,
    labels: Optional[Sequence[str]] = None,
    axes: Optional[plt.Axes] = None,
    **kwargs: Any,
) -> Tuple[NDArray, NDArray, NDArray]:
    """
    Return a grid on *CIE xy* plane made of quad geometric elements and its
    associated faces and edges colours. Ticks and labels are added to the
    given axes according to the extended grid settings.

    Parameters
    ----------
    limits
        Extended grid limits.
    segments
        Edge segments count for the extended grid.
    labels
        Axis labels.
    axes
        Axes to add the grid.

    Other Parameters
    ----------------
    grid_edge_alpha
        Grid edge opacity value such as `grid_edge_alpha = 0.5`.
    grid_edge_colours
        Grid edge colours array such as
        `grid_edge_colours = (0.25, 0.25, 0.25)`.
    grid_face_alpha
        Grid face opacity value such as `grid_face_alpha = 0.1`.
    grid_face_colours
        Grid face colours array such as
        `grid_face_colours = (0.25, 0.25, 0.25)`.
    ticks_and_label_location
        Location of the *X* and *Y* axis ticks and labels such as
        `ticks_and_label_location = ('-x', '-y')`.
    x_axis_colour
        *X* axis colour array such as `x_axis_colour = (0.0, 0.0, 0.0, 1.0)`.
    x_label_colour
        *X* axis label colour array such as
        `x_label_colour = (0.0, 0.0, 0.0, 0.85)`.
    x_ticks_colour
        *X* axis ticks colour array such as
        `x_ticks_colour = (0.0, 0.0, 0.0, 0.85)`.
    y_axis_colour
        *Y* axis colour array such as `y_axis_colour = (0.0, 0.0, 0.0, 1.0)`.
    y_label_colour
        *Y* axis label colour array such as
        `y_label_colour = (0.0, 0.0, 0.0, 0.85)`.
    y_ticks_colour
        *Y* axis ticks colour array such as
        `y_ticks_colour = (0.0, 0.0, 0.0, 0.85)`.

    Returns
    -------
    :class:`tuple`
        Grid quads, faces colours, edges colours.

    Examples
    --------
    >>> nadir_grid(segments=1)
    (array([[[-1.   , -1.   ,  0.   ],
            [ 1.   , -1.   ,  0.   ],
            [ 1.   ,  1.   ,  0.   ],
            [-1.   ,  1.   ,  0.   ]],
    <BLANKLINE>
           [[-1.   , -1.   ,  0.   ],
            [ 0.   , -1.   ,  0.   ],
            [ 0.   ,  0.   ,  0.   ],
            [-1.   ,  0.   ,  0.   ]],
    <BLANKLINE>
           [[-1.   ,  0.   ,  0.   ],
            [ 0.   ,  0.   ,  0.   ],
            [ 0.   ,  1.   ,  0.   ],
            [-1.   ,  1.   ,  0.   ]],
    <BLANKLINE>
           [[ 0.   , -1.   ,  0.   ],
            [ 1.   , -1.   ,  0.   ],
            [ 1.   ,  0.   ,  0.   ],
            [ 0.   ,  0.   ,  0.   ]],
    <BLANKLINE>
           [[ 0.   ,  0.   ,  0.   ],
            [ 1.   ,  0.   ,  0.   ],
            [ 1.   ,  1.   ,  0.   ],
            [ 0.   ,  1.   ,  0.   ]],
    <BLANKLINE>
           [[-1.   , -0.001,  0.   ],
            [ 1.   , -0.001,  0.   ],
            [ 1.   ,  0.001,  0.   ],
            [-1.   ,  0.001,  0.   ]],
    <BLANKLINE>
           [[-0.001, -1.   ,  0.   ],
            [ 0.001, -1.   ,  0.   ],
            [ 0.001,  1.   ,  0.   ],
            [-0.001,  1.   ,  0.   ]]]), array([[ 0.25,  0.25,  0.25,  0.1 ],
           [ 0.  ,  0.  ,  0.  ,  0.  ],
           [ 0.  ,  0.  ,  0.  ,  0.  ],
           [ 0.  ,  0.  ,  0.  ,  0.  ],
           [ 0.  ,  0.  ,  0.  ,  0.  ],
           [ 0.  ,  0.  ,  0.  ,  1.  ],
           [ 0.  ,  0.  ,  0.  ,  1.  ]]), array([[ 0.5 ,  0.5 ,  0.5 ,  0.5 ],
           [ 0.75,  0.75,  0.75,  0.25],
           [ 0.75,  0.75,  0.75,  0.25],
           [ 0.75,  0.75,  0.75,  0.25],
           [ 0.75,  0.75,  0.75,  0.25],
           [ 0.  ,  0.  ,  0.  ,  1.  ],
           [ 0.  ,  0.  ,  0.  ,  1.  ]]))
    """

    limits = as_float_array(
        cast(ArrayLike, optional(limits, np.array([[-1, 1], [-1, 1]]))))
    labels = cast(Sequence, optional(labels, ("x", "y")))

    extent = np.max(np.abs(limits[..., 1] - limits[..., 0]))

    settings = Structure(
        **{
            "grid_face_colours": (0.25, 0.25, 0.25),
            "grid_edge_colours": (0.50, 0.50, 0.50),
            "grid_face_alpha": 0.1,
            "grid_edge_alpha": 0.5,
            "x_axis_colour": (0.0, 0.0, 0.0, 1.0),
            "y_axis_colour": (0.0, 0.0, 0.0, 1.0),
            "x_ticks_colour": (0.0, 0.0, 0.0, 0.85),
            "y_ticks_colour": (0.0, 0.0, 0.0, 0.85),
            "x_label_colour": (0.0, 0.0, 0.0, 0.85),
            "y_label_colour": (0.0, 0.0, 0.0, 0.85),
            "ticks_and_label_location": ("-x", "-y"),
        })
    settings.update(**kwargs)

    # Outer grid.
    quads_g = primitive_vertices_grid_mpl(
        origin=(-extent / 2, -extent / 2),
        width=extent,
        height=extent,
        height_segments=segments,
        width_segments=segments,
    )

    RGB_g = ones((quads_g.shape[0], quads_g.shape[-1]))
    RGB_gf = RGB_g * settings.grid_face_colours
    RGB_gf = np.hstack(
        [RGB_gf, full((RGB_gf.shape[0], 1), settings.grid_face_alpha)])
    RGB_ge = RGB_g * settings.grid_edge_colours
    RGB_ge = np.hstack(
        [RGB_ge, full((RGB_ge.shape[0], 1), settings.grid_edge_alpha)])

    # Inner grid.
    quads_gs = primitive_vertices_grid_mpl(
        origin=(-extent / 2, -extent / 2),
        width=extent,
        height=extent,
        height_segments=segments * 2,
        width_segments=segments * 2,
    )

    RGB_gs = ones((quads_gs.shape[0], quads_gs.shape[-1]))
    RGB_gsf = RGB_gs * 0
    RGB_gsf = np.hstack([RGB_gsf, full((RGB_gsf.shape[0], 1), 0)])
    RGB_gse = np.clip(RGB_gs * settings.grid_edge_colours * 1.5, 0, 1)
    RGB_gse = np.hstack(
        (RGB_gse, full((RGB_gse.shape[0], 1), settings.grid_edge_alpha / 2)))

    # Axis.
    thickness = extent / 1000
    quad_x = primitive_vertices_grid_mpl(origin=(limits[0, 0], -thickness / 2),
                                         width=extent,
                                         height=thickness)
    RGB_x = ones((quad_x.shape[0], quad_x.shape[-1] + 1))
    RGB_x = RGB_x * settings.x_axis_colour

    quad_y = primitive_vertices_grid_mpl(origin=(-thickness / 2, limits[1, 0]),
                                         width=thickness,
                                         height=extent)
    RGB_y = ones((quad_y.shape[0], quad_y.shape[-1] + 1))
    RGB_y = RGB_y * settings.y_axis_colour

    if axes is not None:
        # Ticks.
        x_s = 1 if "+x" in settings.ticks_and_label_location else -1
        y_s = 1 if "+y" in settings.ticks_and_label_location else -1
        for i, axis in enumerate("xy"):
            h_a = "center" if axis == "x" else "left" if x_s == 1 else "right"
            v_a = "center"

            ticks = list(sorted(set(quads_g[..., 0, i])))
            ticks += [ticks[-1] + ticks[-1] - ticks[-2]]
            for tick in ticks:
                x = (limits[1, 1 if x_s == 1 else 0] +
                     (x_s * extent / 25) if i else tick)
                y = (tick if i else limits[0, 1 if y_s == 1 else 0] +
                     (y_s * extent / 25))

                tick = as_int_scalar(tick) if is_integer(tick) else tick
                c = settings[f"{axis}_ticks_colour"]

                axes.text(
                    x,
                    y,
                    0,
                    tick,
                    "x",
                    horizontalalignment=h_a,
                    verticalalignment=v_a,
                    color=c,
                    clip_on=True,
                )

        # Labels.
        for i, axis in enumerate("xy"):
            h_a = "center" if axis == "x" else "left" if x_s == 1 else "right"
            v_a = "center"

            x = (limits[1, 1 if x_s == 1 else 0] +
                 (x_s * extent / 10) if i else 0)
            y = (0 if i else limits[0, 1 if y_s == 1 else 0] +
                 (y_s * extent / 10))

            c = settings[f"{axis}_label_colour"]

            axes.text(
                x,
                y,
                0,
                labels[i],
                "x",
                horizontalalignment=h_a,
                verticalalignment=v_a,
                color=c,
                size=20,
                clip_on=True,
            )

    quads = np.vstack([quads_g, quads_gs, quad_x, quad_y])
    RGB_f = np.vstack([RGB_gf, RGB_gsf, RGB_x, RGB_y])
    RGB_e = np.vstack([RGB_ge, RGB_gse, RGB_x, RGB_y])

    return quads, RGB_f, RGB_e
Ejemplo n.º 28
0
def plot_single_sd(
    sd: SpectralDistribution,
    cmfs: Union[MultiSpectralDistributions, str, Sequence[Union[
        MultiSpectralDistributions,
        str]], ] = "CIE 1931 2 Degree Standard Observer",
    out_of_gamut_clipping: Boolean = True,
    modulate_colours_with_sd_amplitude: Boolean = False,
    equalize_sd_amplitude: Boolean = False,
    **kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
    """
    Plot given spectral distribution.

    Parameters
    ----------
    sd
        Spectral distribution to plot.
    cmfs
        Standard observer colour matching functions used for computing the
        spectrum domain and colours. ``cmfs`` can be of any type or form
        supported by the :func:`colour.plotting.filter_cmfs` definition.
    out_of_gamut_clipping
        Whether to clip out of gamut colours otherwise, the colours will be
        offset by the absolute minimal colour leading to a rendering on
        gray background, less saturated and smoother.
    modulate_colours_with_sd_amplitude
        Whether to modulate the colours with the spectral distribution
        amplitude.
    equalize_sd_amplitude
        Whether to equalize the spectral distribution amplitude.
        Equalization occurs after the colours modulation thus setting both
        arguments to *True* will generate a spectrum strip where each
        wavelength colour is modulated by the spectral distribution amplitude.
        The usual 5% margin above the spectral distribution is also omitted.

    Other Parameters
    ----------------
    kwargs
        {:func:`colour.plotting.artist`, :func:`colour.plotting.render`},
        See the documentation of the previously listed definitions.

    Returns
    -------
    :class:`tuple`
        Current figure and axes.

    References
    ----------
    :cite:`Spiker2015a`

    Examples
    --------
    >>> from colour import SpectralDistribution
    >>> data = {
    ...     500: 0.0651,
    ...     520: 0.0705,
    ...     540: 0.0772,
    ...     560: 0.0870,
    ...     580: 0.1128,
    ...     600: 0.1360
    ... }
    >>> sd = SpectralDistribution(data, name='Custom')
    >>> plot_single_sd(sd)  # doctest: +ELLIPSIS
    (<Figure size ... with 1 Axes>, <...AxesSubplot...>)

    .. image:: ../_static/Plotting_Plot_Single_SD.png
        :align: center
        :alt: plot_single_sd
    """

    _figure, axes = artist(**kwargs)

    cmfs = cast(MultiSpectralDistributions,
                first_item(filter_cmfs(cmfs).values()))

    sd = cast(SpectralDistribution, sd.copy())
    sd.interpolator = LinearInterpolator
    wavelengths = cmfs.wavelengths[np.logical_and(
        cmfs.wavelengths >= max(min(cmfs.wavelengths), min(sd.wavelengths)),
        cmfs.wavelengths <= min(max(cmfs.wavelengths), max(sd.wavelengths)),
    )]
    values = as_float_array(sd[wavelengths])

    RGB = XYZ_to_plotting_colourspace(
        wavelength_to_XYZ(wavelengths, cmfs),
        CCS_ILLUMINANTS["CIE 1931 2 Degree Standard Observer"]["E"],
        apply_cctf_encoding=False,
    )

    if not out_of_gamut_clipping:
        RGB += np.abs(np.min(RGB))

    RGB = normalise_maximum(RGB)

    if modulate_colours_with_sd_amplitude:
        RGB *= (values / np.max(values))[..., np.newaxis]

    RGB = CONSTANTS_COLOUR_STYLE.colour.colourspace.cctf_encoding(RGB)

    if equalize_sd_amplitude:
        values = ones(values.shape)

    margin = 0 if equalize_sd_amplitude else 0.05

    x_min, x_max = min(wavelengths), max(wavelengths)
    y_min, y_max = 0, max(values) + max(values) * margin

    polygon = Polygon(
        np.vstack([
            (x_min, 0),
            tstack([wavelengths, values]),
            (x_max, 0),
        ]),
        facecolor="none",
        edgecolor="none",
        zorder=CONSTANTS_COLOUR_STYLE.zorder.background_polygon,
    )
    axes.add_patch(polygon)

    padding = 0.1
    axes.bar(
        x=wavelengths - padding,
        height=max(values),
        width=1 + padding,
        color=RGB,
        align="edge",
        clip_path=polygon,
        zorder=CONSTANTS_COLOUR_STYLE.zorder.background_polygon,
    )

    axes.plot(
        wavelengths,
        values,
        color=CONSTANTS_COLOUR_STYLE.colour.dark,
        zorder=CONSTANTS_COLOUR_STYLE.zorder.midground_line,
    )

    settings: Dict[str, Any] = {
        "axes": axes,
        "bounding_box": (x_min, x_max, y_min, y_max),
        "title": f"{sd.strict_name} - {cmfs.strict_name}",
        "x_label": "Wavelength $\\lambda$ (nm)",
        "y_label": "Spectral Distribution",
    }
    settings.update(kwargs)

    return render(**settings)
Ejemplo n.º 29
0
    def test_ones(self):
        """
        Tests :func:`colour.utilities.array.ones` definition.
        """

        np.testing.assert_equal(ones(3), np.ones(3))
Ejemplo n.º 30
0
def nadir_grid(limits=None, segments=10, labels=None, axes=None, **kwargs):
    """
    Returns a grid on *CIE xy* plane made of quad geometric elements and its
    associated faces and edges colours. Ticks and labels are added to the
    given axes according to the extended grid settings.

    Parameters
    ----------
    limits : array_like, optional
        Extended grid limits.
    segments : int, optional
        Edge segments count for the extended grid.
    labels : array_like, optional
        Axis labels.
    axes : matplotlib.axes.Axes, optional
        Axes to add the grid.

    Other Parameters
    ----------------
    grid_face_colours : array_like, optional
        Grid face colours array such as
        `grid_face_colours = (0.25, 0.25, 0.25)`.
    grid_edge_colours : array_like, optional
        Grid edge colours array such as
        `grid_edge_colours = (0.25, 0.25, 0.25)`.
    grid_face_alpha : numeric, optional
        Grid face opacity value such as `grid_face_alpha = 0.1`.
    grid_edge_alpha : numeric, optional
        Grid edge opacity value such as `grid_edge_alpha = 0.5`.
    x_axis_colour : array_like, optional
        *X* axis colour array such as `x_axis_colour = (0.0, 0.0, 0.0, 1.0)`.
    y_axis_colour : array_like, optional
        *Y* axis colour array such as `y_axis_colour = (0.0, 0.0, 0.0, 1.0)`.
    x_ticks_colour : array_like, optional
        *X* axis ticks colour array such as
        `x_ticks_colour = (0.0, 0.0, 0.0, 0.85)`.
    y_ticks_colour : array_like, optional
        *Y* axis ticks colour array such as
        `y_ticks_colour = (0.0, 0.0, 0.0, 0.85)`.
    x_label_colour : array_like, optional
        *X* axis label colour array such as
        `x_label_colour = (0.0, 0.0, 0.0, 0.85)`.
    y_label_colour : array_like, optional
        *Y* axis label colour array such as
        `y_label_colour = (0.0, 0.0, 0.0, 0.85)`.
    ticks_and_label_location : array_like, optional
        Location of the *X* and *Y* axis ticks and labels such as
        `ticks_and_label_location = ('-x', '-y')`.

    Returns
    -------
    tuple
        Grid quads, faces colours, edges colours.

    Examples
    --------
    >>> nadir_grid(segments=1)
    (array([[[-1.   , -1.   ,  0.   ],
            [ 1.   , -1.   ,  0.   ],
            [ 1.   ,  1.   ,  0.   ],
            [-1.   ,  1.   ,  0.   ]],
    <BLANKLINE>
           [[-1.   , -1.   ,  0.   ],
            [ 0.   , -1.   ,  0.   ],
            [ 0.   ,  0.   ,  0.   ],
            [-1.   ,  0.   ,  0.   ]],
    <BLANKLINE>
           [[-1.   ,  0.   ,  0.   ],
            [ 0.   ,  0.   ,  0.   ],
            [ 0.   ,  1.   ,  0.   ],
            [-1.   ,  1.   ,  0.   ]],
    <BLANKLINE>
           [[ 0.   , -1.   ,  0.   ],
            [ 1.   , -1.   ,  0.   ],
            [ 1.   ,  0.   ,  0.   ],
            [ 0.   ,  0.   ,  0.   ]],
    <BLANKLINE>
           [[ 0.   ,  0.   ,  0.   ],
            [ 1.   ,  0.   ,  0.   ],
            [ 1.   ,  1.   ,  0.   ],
            [ 0.   ,  1.   ,  0.   ]],
    <BLANKLINE>
           [[-1.   , -0.001,  0.   ],
            [ 1.   , -0.001,  0.   ],
            [ 1.   ,  0.001,  0.   ],
            [-1.   ,  0.001,  0.   ]],
    <BLANKLINE>
           [[-0.001, -1.   ,  0.   ],
            [ 0.001, -1.   ,  0.   ],
            [ 0.001,  1.   ,  0.   ],
            [-0.001,  1.   ,  0.   ]]]), array([[ 0.25,  0.25,  0.25,  0.1 ],
           [ 0.  ,  0.  ,  0.  ,  0.  ],
           [ 0.  ,  0.  ,  0.  ,  0.  ],
           [ 0.  ,  0.  ,  0.  ,  0.  ],
           [ 0.  ,  0.  ,  0.  ,  0.  ],
           [ 0.  ,  0.  ,  0.  ,  1.  ],
           [ 0.  ,  0.  ,  0.  ,  1.  ]]), array([[ 0.5 ,  0.5 ,  0.5 ,  0.5 ],
           [ 0.75,  0.75,  0.75,  0.25],
           [ 0.75,  0.75,  0.75,  0.25],
           [ 0.75,  0.75,  0.75,  0.25],
           [ 0.75,  0.75,  0.75,  0.25],
           [ 0.  ,  0.  ,  0.  ,  1.  ],
           [ 0.  ,  0.  ,  0.  ,  1.  ]]))
    """

    if limits is None:
        limits = np.array([[-1, 1], [-1, 1]])

    if labels is None:
        labels = ('x', 'y')

    extent = np.max(np.abs(limits[..., 1] - limits[..., 0]))

    settings = Structure(
        **{
            'grid_face_colours': (0.25, 0.25, 0.25),
            'grid_edge_colours': (0.50, 0.50, 0.50),
            'grid_face_alpha': 0.1,
            'grid_edge_alpha': 0.5,
            'x_axis_colour': (0.0, 0.0, 0.0, 1.0),
            'y_axis_colour': (0.0, 0.0, 0.0, 1.0),
            'x_ticks_colour': (0.0, 0.0, 0.0, 0.85),
            'y_ticks_colour': (0.0, 0.0, 0.0, 0.85),
            'x_label_colour': (0.0, 0.0, 0.0, 0.85),
            'y_label_colour': (0.0, 0.0, 0.0, 0.85),
            'ticks_and_label_location': ('-x', '-y')
        })
    settings.update(**kwargs)

    # Outer grid.
    quads_g = primitive_vertices_grid_mpl(
        origin=(-extent / 2, -extent / 2),
        width=extent,
        height=extent,
        height_segments=segments,
        width_segments=segments)

    RGB_g = ones([quads_g.shape[0], quads_g.shape[-1]])
    RGB_gf = RGB_g * settings.grid_face_colours
    RGB_gf = np.hstack(
        [RGB_gf, full([RGB_gf.shape[0], 1], settings.grid_face_alpha)])
    RGB_ge = RGB_g * settings.grid_edge_colours
    RGB_ge = np.hstack(
        [RGB_ge, full([RGB_ge.shape[0], 1], settings.grid_edge_alpha)])

    # Inner grid.
    quads_gs = primitive_vertices_grid_mpl(
        origin=(-extent / 2, -extent / 2),
        width=extent,
        height=extent,
        height_segments=segments * 2,
        width_segments=segments * 2)

    RGB_gs = ones([quads_gs.shape[0], quads_gs.shape[-1]])
    RGB_gsf = RGB_gs * 0
    RGB_gsf = np.hstack([RGB_gsf, full([RGB_gsf.shape[0], 1], 0)])
    RGB_gse = np.clip(RGB_gs * settings.grid_edge_colours * 1.5, 0, 1)
    RGB_gse = np.hstack((RGB_gse,
                         full([RGB_gse.shape[0], 1],
                              settings.grid_edge_alpha / 2)))

    # Axis.
    thickness = extent / 1000
    quad_x = primitive_vertices_grid_mpl(
        origin=(limits[0, 0], -thickness / 2), width=extent, height=thickness)
    RGB_x = ones([quad_x.shape[0], quad_x.shape[-1] + 1])
    RGB_x = RGB_x * settings.x_axis_colour

    quad_y = primitive_vertices_grid_mpl(
        origin=(-thickness / 2, limits[1, 0]), width=thickness, height=extent)
    RGB_y = ones([quad_y.shape[0], quad_y.shape[-1] + 1])
    RGB_y = RGB_y * settings.y_axis_colour

    if axes is not None:
        # Ticks.
        x_s = 1 if '+x' in settings.ticks_and_label_location else -1
        y_s = 1 if '+y' in settings.ticks_and_label_location else -1
        for i, axis in enumerate('xy'):
            h_a = 'center' if axis == 'x' else 'left' if x_s == 1 else 'right'
            v_a = 'center'

            ticks = list(sorted(set(quads_g[..., 0, i])))
            ticks += [ticks[-1] + ticks[-1] - ticks[-2]]
            for tick in ticks:
                x = (limits[1, 1 if x_s == 1 else 0] + (x_s * extent / 25)
                     if i else tick)
                y = (tick if i else
                     limits[0, 1 if y_s == 1 else 0] + (y_s * extent / 25))

                tick = DEFAULT_INT_DTYPE(tick) if DEFAULT_FLOAT_DTYPE(
                    tick).is_integer() else tick
                c = settings['{0}_ticks_colour'.format(axis)]

                axes.text(
                    x,
                    y,
                    0,
                    tick,
                    'x',
                    horizontalalignment=h_a,
                    verticalalignment=v_a,
                    color=c,
                    clip_on=True)

        # Labels.
        for i, axis in enumerate('xy'):
            h_a = 'center' if axis == 'x' else 'left' if x_s == 1 else 'right'
            v_a = 'center'

            x = (limits[1, 1 if x_s == 1 else 0] + (x_s * extent / 10)
                 if i else 0)
            y = (0 if i else
                 limits[0, 1 if y_s == 1 else 0] + (y_s * extent / 10))

            c = settings['{0}_label_colour'.format(axis)]

            axes.text(
                x,
                y,
                0,
                labels[i],
                'x',
                horizontalalignment=h_a,
                verticalalignment=v_a,
                color=c,
                size=20,
                clip_on=True)

    quads = np.vstack([quads_g, quads_gs, quad_x, quad_y])
    RGB_f = np.vstack([RGB_gf, RGB_gsf, RGB_x, RGB_y])
    RGB_e = np.vstack([RGB_ge, RGB_gse, RGB_x, RGB_y])

    return quads, RGB_f, RGB_e