Esempio n. 1
0
    def test_as_int_array(self):
        """Test :func:`colour.utilities.array.as_int_array` definition."""

        np.testing.assert_equal(as_int_array([1.0, 2.0, 3.0]),
                                np.array([1, 2, 3]))

        self.assertEqual(as_int_array([1, 2, 3]).dtype, DEFAULT_INT_DTYPE)
Esempio n. 2
0
def read_LUT_UnorderedSonySPI3D(path):
    title = path_to_title(path)
    domain_min, domain_max = np.array([0, 0, 0]), np.array([1, 1, 1])
    size = 2
    indexes = []
    table = []
    comments = []
    with open(path) as spi3d_file:
        lines = filter(None, (line.strip() for line in spi3d_file.readlines()))
        for line in lines:
            if line.startswith('#'):
                comments.append(line[1:].strip())
                continue
            tokens = line.split()
            if len(tokens) == 3:
                assert len(set(tokens)) == 1, (
                    'Non-uniform "LUT" shape is unsupported!')
                size = DEFAULT_INT_DTYPE(tokens[0])
            if len(tokens) == 6:
                indexes.append(as_int_array(tokens[:3]))
                table.append(as_float_array(tokens[3:]))
    indexes = as_int_array(indexes)
    sorting_indexes = np.lexsort((indexes[:, 2], indexes[:, 1], indexes[:, 0]))
    #print(sorting_indexes)
    assert np.array_equal(
        indexes[sorting_indexes],
        DEFAULT_INT_DTYPE(np.around(
            LUT3D.linear_table(size) * (size - 1))).reshape(
                (-1, 3))), 'Indexes do not match expected "LUT3D" indexes!'
    table = as_float_array(table)[sorting_indexes].reshape(
        [size, size, size, 3])
    return LUT3D(table,
                 title,
                 np.vstack([domain_min, domain_max]),
                 comments=comments)
Esempio n. 3
0
    def test_as_int_array(self):
        """
        Tests :func:`colour.utilities.array.as_int_array` definition.
        """

        np.testing.assert_equal(
            as_int_array([1.0, 2.0, 3.0]), np.array([1, 2, 3]))

        self.assertEqual(as_int_array([1, 2, 3]).dtype, DEFAULT_INT_DTYPE)
Esempio n. 4
0
    def test_set_int_precision(self):
        """
        Tests :func:`colour.utilities.array.set_int_precision` definition.
        """

        self.assertEqual(as_int_array(np.ones(3)).dtype, np.int64)

        set_int_precision(np.int32)

        self.assertEqual(as_int_array(np.ones(3)).dtype, np.int32)

        set_int_precision(np.int64)

        self.assertEqual(as_int_array(np.ones(3)).dtype, np.int64)
Esempio n. 5
0
    def test_set_default_int_dtype(self):
        """
        Test :func:`colour.utilities.array.set_default_int_dtype` definition.
        """

        self.assertEqual(as_int_array(np.ones(3)).dtype, np.int64)

        set_default_int_dtype(np.int32)

        self.assertEqual(as_int_array(np.ones(3)).dtype, np.int32)

        set_default_int_dtype(np.int64)

        self.assertEqual(as_int_array(np.ones(3)).dtype, np.int64)
Esempio n. 6
0
def scale_contour(contour: ArrayLike, factor: Floating) -> NDArray:
    """
    Scale given contour by given scale factor.

    Parameters
    ----------
    contour
        Contour to scale.
    factor
        Scale factor.

    Returns
    -------
    :class:`numpy.ndarray`
        Scaled contour.

    Examples
    --------
    >>> contour = np.array([[0, 0], [1, 0], [1, 1], [0, 1]])
    >>> scale_contour(contour, 2)
    array([[ 0.,  0.],
           [ 2.,  0.],
           [ 2.,  2.],
           [ 0.,  2.]])
    """

    centroid = as_int_array(contour_centroid(contour))
    scaled_contour = (as_float_array(contour) - centroid) * factor + centroid

    return scaled_contour
Esempio n. 7
0
def scale_contour(contour, factor):
    """
    Scales given contour by given scale factor.

    Parameters
    ----------
    contour : array_like
        Contour to scale.
    factor : numeric
        Scale factor.

    Returns
    -------
    ndarray
        Scaled contour.

    Examples
    --------
    >>> contour = np.array([[0, 0], [1, 0], [1, 1], [0, 1]])
    >>> scale_contour(contour, 2)
    array([[ 0.,  0.],
           [ 2.,  0.],
           [ 2.,  2.],
           [ 0.,  2.]])
    """

    centroid = as_int_array(contour_centroid(contour))
    scaled_contour = (as_float_array(contour) - centroid) * factor + centroid

    return scaled_contour
Esempio n. 8
0
    def _parse_table_section(lines):
        """Parse the table at given lines."""

        size = as_int_array(lines[0].split())
        table = as_float_array([line.split() for line in lines[1:]])

        return size, table
Esempio n. 9
0
def swatch_masks(
    width: Integer,
    height: Integer,
    swatches_h: Integer,
    swatches_v: Integer,
    samples: Integer,
) -> Tuple[NDArray, ...]:
    """
    Return swatch masks for given image width and height and swatches count.

    Parameters
    ----------
    width
        Image width.
    height
        Image height.
    swatches_h
        Horizontal swatches count.
    swatches_v
        Vertical swatches count.
    samples
        Samples count.

    Returns
    -------
    :class:`tuple`
        Tuple of swatch masks.

    Examples
    --------
    >>> from pprint import pprint
    >>> pprint(swatch_masks(16, 8, 4, 2, 1))  # doctest: +ELLIPSIS
    (array([2, 2, 2, 2]...),
     array([2, 2, 6, 6]...),
     array([ 2,  2, 10, 10]...),
     array([ 2,  2, 14, 14]...),
     array([6, 6, 2, 2]...),
     array([6, 6, 6, 6]...),
     array([ 6,  6, 10, 10]...),
     array([ 6,  6, 14, 14]...))
    """

    samples_half = as_int(samples / 2)

    masks = []
    offset_h = width / swatches_h / 2
    offset_v = height / swatches_v / 2
    for j in np.linspace(offset_v, height - offset_v, swatches_v):
        for i in np.linspace(offset_h, width - offset_h, swatches_h):
            masks.append(
                as_int_array([
                    j - samples_half,
                    j + samples_half,
                    i - samples_half,
                    i + samples_half,
                ]))

    return tuple(masks)
Esempio n. 10
0
def read_unordered_LUT_SonySPI3D(path):
    """
    Reads given unordered *.spi3d* *LUT* file.

    Parameters
    ----------
    path : unicode
        *LUT* path.

    Returns
    -------
    LUT3D or LUT3x1D
        :class:`LUT3D` or :class:`LUT3x1D` class instance.
    """

    title = path_to_title(path)
    domain_min, domain_max = np.array([0, 0, 0]), np.array([1, 1, 1])
    indexes = []
    comments = []
    table_unordered = []
    table_ordered = []

    with open(path) as spi3d_file:
        lines = filter(None, (line.strip() for line in spi3d_file.readlines()))
        for line in lines:
            if line.startswith('#'):
                comments.append(line[1:].strip())
                continue

            tokens = line.split()
            if len(tokens) == 3:
                size = DEFAULT_INT_DTYPE(tokens[0])

            if len(tokens) == 6:
                indexes.append(as_int_array(tokens[:3]))
                table_unordered.append(as_float_array(tokens[3:]))

    test_indexes = np.around(LUT3D.linear_table(size) * (size - 1)).reshape(
        (-1, 3))
    for i in range(64):
        for j in range(64):
            if (np.array_equal(test_indexes[i], indexes[j])):
                table_ordered.append(table_unordered[j])

    table_ordered = as_float_array(table_ordered).reshape(
        [size, size, size, 3])

    return LUT3D(table_ordered,
                 title,
                 np.vstack([domain_min, domain_max]),
                 comments=comments)
Esempio n. 11
0
def RGB_to_HEX(RGB: ArrayLike) -> StrOrNDArray:
    """
    Convert from *RGB* colourspace to hexadecimal representation.

    Parameters
    ----------
    RGB
        *RGB* colourspace array.

    Returns
    -------
    :class:`str` or :class:`numpy.array`
        Hexadecimal representation.

    Notes
    -----
    +------------+-----------------------+---------------+
    | **Domain** | **Scale - Reference** | **Scale - 1** |
    +============+=======================+===============+
    | ``RGB``    | [0, 1]                | [0, 1]        |
    +------------+-----------------------+---------------+

    Examples
    --------
    >>> RGB = np.array([0.66666667, 0.86666667, 1.00000000])
    >>> RGB_to_HEX(RGB)
    '#aaddff'
    """

    RGB = to_domain_1(RGB)

    if np.any(RGB < 0):
        usage_warning(
            '"RGB" array contains negative values, those will be clipped, '
            "unpredictable results may occur!")

        RGB = as_float_array(np.clip(RGB, 0, np.inf))

    if np.any(RGB > 1):
        usage_warning(
            '"RGB" array contains values over 1 and will be normalised, '
            "unpredictable results may occur!")

        RGB = eotf_inverse_sRGB(normalise_maximum(eotf_sRGB(RGB)))

    to_HEX = np.vectorize("{:02x}".format)

    HEX = to_HEX(as_int_array(RGB * 255, dtype=np.uint8)).astype(object)
    HEX = np.asarray("#") + HEX[..., 0] + HEX[..., 1] + HEX[..., 2]

    return HEX
Esempio n. 12
0
def hue_quadrature(h: FloatingOrArrayLike) -> FloatingOrNDArray:
    """
    Return the hue quadrature from given hue :math:`h` angle in degrees.

    Parameters
    ----------
    h
        Hue :math:`h` angle in degrees.

    Returns
    -------
    :class:`numpy.floating` or :class:`numpy.ndarray`
        Hue quadrature.

    Examples
    --------
    >>> hue_quadrature(219.0484326582719)  # doctest: +ELLIPSIS
    278.0607358...
    """

    h = as_float_array(h)

    h_i = HUE_DATA_FOR_HUE_QUADRATURE["h_i"]
    e_i = HUE_DATA_FOR_HUE_QUADRATURE["e_i"]
    H_i = HUE_DATA_FOR_HUE_QUADRATURE["H_i"]

    # *np.searchsorted* returns an erroneous index if a *nan* is used as input.
    h[np.asarray(np.isnan(h))] = 0
    i = as_int_array(np.searchsorted(h_i, h, side="left") - 1)

    h_ii = h_i[i]
    e_ii = e_i[i]
    H_ii = H_i[i]
    h_ii1 = h_i[i + 1]
    e_ii1 = e_i[i + 1]

    H = H_ii + ((100 * (h - h_ii) / e_ii) / ((h - h_ii) / e_ii +
                                             (h_ii1 - h) / e_ii1))

    H = np.where(
        h < 20.14,
        385.9 + (14.1 * h / 0.856) / (h / 0.856 + (20.14 - h) / 0.8),
        H,
    )
    H = np.where(
        h >= 237.53,
        H_ii + ((85.9 * (h - h_ii) / e_ii) / ((h - h_ii) / e_ii +
                                              (360 - h) / 0.856)),
        H,
    )
    return as_float(H)
Esempio n. 13
0
def hue_quadrature(h):
    """
    Returns the hue quadrature from given hue :math:`h` angle in degrees.

    Parameters
    ----------
    h : numeric or array_like
        Hue :math:`h` angle in degrees.

    Returns
    -------
    numeric or ndarray
        Hue quadrature.

    Examples
    --------
    >>> hue_quadrature(219.0484326582719)  # doctest: +ELLIPSIS
    278.0607358...
    """

    h = as_float_array(h)

    h_i = HUE_DATA_FOR_HUE_QUADRATURE['h_i']
    e_i = HUE_DATA_FOR_HUE_QUADRATURE['e_i']
    H_i = HUE_DATA_FOR_HUE_QUADRATURE['H_i']

    # *np.searchsorted* returns an erroneous index if a *nan* is used as input.
    h[np.asarray(np.isnan(h))] = 0
    i = as_int_array(np.searchsorted(h_i, h, side='left') - 1)

    h_ii = h_i[i]
    e_ii = e_i[i]
    H_ii = H_i[i]
    h_ii1 = h_i[i + 1]
    e_ii1 = e_i[i + 1]

    H = H_ii + ((100 * (h - h_ii) / e_ii) / (
        (h - h_ii) / e_ii + (h_ii1 - h) / e_ii1))
    H = np.where(
        h < 20.14,
        385.9 + (14.1 * h / 0.856) / (h / 0.856 + (20.14 - h) / 0.8),
        H,
    )
    H = np.where(
        h >= 237.53,
        H_ii + ((85.9 * (h - h_ii) / e_ii) / (
            (h - h_ii) / e_ii + (360 - h) / 0.856)),
        H,
    )
    return as_float(H)
Esempio n. 14
0
def hue_quadrature(h):
    """
    Returns the hue quadrature from given hue :math:`h` angle in degrees.

    Parameters
    ----------
    h : numeric or array_like
        Hue :math:`h` angle in degrees.

    Returns
    -------
    numeric or ndarray
        Hue quadrature.

    Examples
    --------
    >>> hue_quadrature(219.0484326582719)  # doctest: +ELLIPSIS
    278.0607358...
    """

    h = as_float_array(h)

    h_i = HUE_DATA_FOR_HUE_QUADRATURE['h_i']
    e_i = HUE_DATA_FOR_HUE_QUADRATURE['e_i']
    H_i = HUE_DATA_FOR_HUE_QUADRATURE['H_i']

    # *np.searchsorted* returns an erroneous index if a *nan* is used as input.
    h[np.asarray(np.isnan(h))] = 0
    i = as_int_array(np.searchsorted(h_i, h, side='left') - 1)

    h_ii = h_i[i]
    e_ii = e_i[i]
    H_ii = H_i[i]
    h_ii1 = h_i[i + 1]
    e_ii1 = e_i[i + 1]

    H = H_ii + ((100 * (h - h_ii) / e_ii) / ((h - h_ii) / e_ii +
                                             (h_ii1 - h) / e_ii1))
    H = np.where(
        h < 20.14,
        385.9 + (14.1 * h / 0.856) / (h / 0.856 + (20.14 - h) / 0.8),
        H,
    )
    H = np.where(
        h >= 237.53,
        H_ii + ((85.9 * (h - h_ii) / e_ii) / ((h - h_ii) / e_ii +
                                              (360 - h) / 0.856)),
        H,
    )
    return as_float(H)
Esempio n. 15
0
def swatch_masks(width, height, swatches_h, swatches_v, samples):
    """
    Returns swatch masks for given image width and height and swatches count.

    Parameters
    ----------
    width : int
        Image width.
    height : height
        Image height.
    swatches_h : int
        Horizontal swatches count.
    swatches_v : int
        Vertical swatches count.
    samples : int
        Samples count.

    Returns
    -------
    list
        List of swatch masks.

    Examples
    --------
    >>> from pprint import pprint
    >>> pprint(swatch_masks(16, 8, 4, 2, 1))
    [array([2, 2, 2, 2]),
     array([2, 2, 6, 6]),
     array([ 2,  2, 10, 10]),
     array([ 2,  2, 14, 14]),
     array([6, 6, 2, 2]),
     array([6, 6, 6, 6]),
     array([ 6,  6, 10, 10]),
     array([ 6,  6, 14, 14])]
    """

    samples = as_int(samples / 2)

    masks = []
    offset_h = width / swatches_h / 2
    offset_v = height / swatches_v / 2
    for j in np.linspace(offset_v, height - offset_v, swatches_v):
        for i in np.linspace(offset_h, width - offset_h, swatches_h):
            masks.append(
                as_int_array(
                    [j - samples, j + samples, i - samples, i + samples]))

    return masks
Esempio n. 16
0
def ranges_YCbCr(bits: Integer, is_legal: Boolean, is_int: Boolean) -> NDArray:
    """
    Return the *Y'CbCr* colour encoding ranges array for given bit depth,
    range legality and representation.

    Parameters
    ----------
    bits
        Bit depth of the *Y'CbCr* colour encoding ranges array.
    is_legal
        Whether the *Y'CbCr* colour encoding ranges array is legal.
    is_int
        Whether the *Y'CbCr* colour encoding ranges array represents integer
        code values.

    Returns
    -------
    :class:`numpy.ndarray`
        *Y'CbCr* colour encoding ranges array.

    Examples
    --------
    >>> ranges_YCbCr(8, True, True)
    array([ 16, 235,  16, 240])
    >>> ranges_YCbCr(8, True, False)  # doctest: +ELLIPSIS
    array([ 0.0627451...,  0.9215686...,  0.0627451...,  0.9411764...])
    >>> ranges_YCbCr(10, False, False)
    array([ 0. ,  1. , -0.5,  0.5])
    """

    if is_legal:
        ranges = np.array([16, 235, 16, 240])
        ranges *= 2**(bits - 8)
    else:
        ranges = np.array([0, 2**bits - 1, 0, 2**bits - 1])

    if not is_int:
        ranges = as_int_array(ranges) / (2**bits - 1)

    if is_int and not is_legal:
        ranges[3] = 2**bits

    if not is_int and not is_legal:
        ranges[2] = -0.5
        ranges[3] = 0.5

    return ranges
Esempio n. 17
0
def hue_quadrature(h: FloatingOrArrayLike) -> FloatingOrNDArray:
    """
    Return the hue quadrature from given hue :math:`h` angle in degrees.

    Parameters
    ----------
    h
        Hue :math:`h` angle in degrees.

    Returns
    -------
    :class:`numpy.floating` or :class:`numpy.ndarray`
        Hue quadrature.

    Examples
    --------
    >>> hue_quadrature(196.3185839)  # doctest: +ELLIPSIS
    237.6052911...
    """

    h = as_float_array(h)

    h_i = HUE_DATA_FOR_HUE_QUADRATURE["h_i"]
    e_i = HUE_DATA_FOR_HUE_QUADRATURE["e_i"]
    H_i = HUE_DATA_FOR_HUE_QUADRATURE["H_i"]

    # :math:`h_p` = :math:`h_z` + 360 if :math:`h_z` < :math:`h_1, i.e. h_i[0]
    h[h <= h_i[0]] += 360
    # *np.searchsorted* returns an erroneous index if a *nan* is used as input.
    h[np.asarray(np.isnan(h))] = 0
    i = as_int_array(np.searchsorted(h_i, h, side="left") - 1)

    h_ii = h_i[i]
    e_ii = e_i[i]
    H_ii = H_i[i]
    h_ii1 = h_i[i + 1]
    e_ii1 = e_i[i + 1]

    H = H_ii + ((100 * (h - h_ii) / e_ii) / ((h - h_ii) / e_ii +
                                             (h_ii1 - h) / e_ii1))

    return as_float(H)
Esempio n. 18
0
def full_to_legal(
    CV: Union[FloatingOrArrayLike, IntegerOrArrayLike],
    bit_depth: Integer = 10,
    in_int: Boolean = False,
    out_int: Boolean = False,
) -> Union[FloatingOrNDArray, IntegerOrNDArray]:
    """
    Convert given code value :math:`CV` or float equivalent of a code value at
    a given bit depth from full range (full swing) to legal range
    (studio swing).

    Parameters
    ----------
    CV
        Full range code value :math:`CV` or float equivalent of a code value at
        a given bit depth.
    bit_depth
        Bit depth used for conversion.
    in_int
        Whether to treat the input value as integer code value or float
        equivalent of a code value at a given bit depth.
    out_int
        Whether to return value as integer code value or float equivalent of a
        code value at a given bit depth.

    Returns
    -------
    :class:`numpy.floating` or :class:`numpy.integer` or :class:`numpy.ndarray`
        Legal range code value :math:`CV` or float equivalent of a code value
        at a given bit depth.

    Examples
    --------
    >>> full_to_legal(0.0)  # doctest: +ELLIPSIS
    0.0625610...
    >>> full_to_legal(1.0)  # doctest: +ELLIPSIS
    0.9188660...
    >>> full_to_legal(0.0, out_int=True)
    64
    >>> full_to_legal(1.0, out_int=True)
    940
    >>> full_to_legal(0, in_int=True)  # doctest: +ELLIPSIS
    0.0625610...
    >>> full_to_legal(1023, in_int=True)  # doctest: +ELLIPSIS
    0.9188660...
    >>> full_to_legal(0, in_int=True, out_int=True)
    64
    >>> full_to_legal(1023, in_int=True, out_int=True)
    940
    """

    CV = as_float_array(CV)

    MV = 2**bit_depth - 1

    CV_legal = as_int_array(np.round(CV / MV)) if in_int else CV

    B, W = CV_range(bit_depth, True, True)

    CV_legal = (W - B) * CV_legal + B

    if out_int:
        return as_int(np.round(CV_legal))
    else:
        return as_float(CV_legal / MV)
Esempio n. 19
0
def plot_RGB_colourspaces_gamuts(
    colourspaces: Union[RGB_Colourspace, str, Sequence[Union[RGB_Colourspace,
                                                             str]]],
    reference_colourspace: Union[Literal["CAM02LCD", "CAM02SCD", "CAM02UCS",
                                         "CAM16LCD", "CAM16SCD", "CAM16UCS",
                                         "CIE XYZ", "CIE xyY", "CIE Lab",
                                         "CIE Luv", "CIE UCS", "CIE UVW",
                                         "DIN99", "Hunter Lab", "Hunter Rdab",
                                         "ICaCb", "ICtCp", "IPT", "IgPgTg",
                                         "Jzazbz", "OSA UCS", "Oklab",
                                         "hdr-CIELAB", "hdr-IPT", ],
                                 str, ] = "CIE xyY",
    segments: Integer = 8,
    show_grid: Boolean = True,
    grid_segments: Integer = 10,
    show_spectral_locus: Boolean = False,
    spectral_locus_colour: Optional[Union[ArrayLike, str]] = None,
    cmfs: Union[MultiSpectralDistributions, str, Sequence[Union[
        MultiSpectralDistributions,
        str]], ] = "CIE 1931 2 Degree Standard Observer",
    chromatically_adapt: Boolean = False,
    convert_kwargs: Optional[Dict] = None,
    **kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
    """
    Plot given *RGB* colourspaces gamuts in given reference colourspace.

    Parameters
    ----------
    colourspaces
        *RGB* colourspaces to plot the gamuts. ``colourspaces`` elements
        can be of any type or form supported by the
        :func:`colour.plotting.filter_RGB_colourspaces` definition.
    reference_colourspace
        Reference colourspace model to plot the gamuts into, see
        :attr:`colour.COLOURSPACE_MODELS` attribute for the list of supported
        colourspace models.
    segments
        Edge segments count for each *RGB* colourspace cubes.
    show_grid
        Whether to show a grid at the bottom of the *RGB* colourspace cubes.
    grid_segments
        Edge segments count for the grid.
    show_spectral_locus
        Whether to show the spectral locus.
    spectral_locus_colour
        Spectral locus colour.
    cmfs
        Standard observer colour matching functions used for computing the
        spectral locus boundaries. ``cmfs`` can be of any type or form
        supported by the :func:`colour.plotting.filter_cmfs` definition.
    chromatically_adapt
        Whether to chromatically adapt the *RGB* colourspaces given in
        ``colourspaces`` to the whitepoint of the default plotting colourspace.
    convert_kwargs
        Keyword arguments for the :func:`colour.convert` definition.

    Other Parameters
    ----------------
    edge_colours
        Edge colours array such as `edge_colours = (None, (0.5, 0.5, 1.0))`.
    edge_alpha
        Edge opacity value such as `edge_alpha = (0.0, 1.0)`.
    face_alpha
        Face opacity value such as `face_alpha = (0.5, 1.0)`.
    face_colours
        Face colours array such as `face_colours = (None, (0.5, 0.5, 1.0))`.
    kwargs
        {:func:`colour.plotting.artist`,
        :func:`colour.plotting.volume.nadir_grid`},
        See the documentation of the previously listed definitions.

    Returns
    -------
    :class:`tuple`
        Current figure and axes.

    Examples
    --------
    >>> plot_RGB_colourspaces_gamuts(['ITU-R BT.709', 'ACEScg', 'S-Gamut'])
    ... # doctest: +ELLIPSIS
    (<Figure size ... with 1 Axes>, <...Axes3DSubplot...>)

    .. image:: ../_static/Plotting_Plot_RGB_Colourspaces_Gamuts.png
        :align: center
        :alt: plot_RGB_colourspaces_gamuts
    """

    colourspaces = cast(
        List[RGB_Colourspace],
        list(filter_RGB_colourspaces(colourspaces).values()),
    )

    convert_kwargs = optional(convert_kwargs, {})

    count_c = len(colourspaces)

    title = (
        f"{', '.join([colourspace.name for colourspace in colourspaces])} "
        f"- {reference_colourspace} Reference Colourspace")

    illuminant = CONSTANTS_COLOUR_STYLE.colour.colourspace.whitepoint

    convert_settings = {"illuminant": illuminant}
    convert_settings.update(convert_kwargs)

    settings = Structure(
        **{
            "face_colours": [None] * count_c,
            "edge_colours": [None] * count_c,
            "face_alpha": [1] * count_c,
            "edge_alpha": [1] * count_c,
            "title": title,
        })
    settings.update(kwargs)

    figure = plt.figure()
    axes = figure.add_subplot(111, projection="3d")

    points = zeros((4, 3))
    if show_spectral_locus:
        cmfs = cast(MultiSpectralDistributions,
                    first_item(filter_cmfs(cmfs).values()))
        XYZ = cmfs.values

        points = colourspace_model_axis_reorder(
            convert(XYZ, "CIE XYZ", reference_colourspace, **convert_settings),
            reference_colourspace,
        )

        points[np.isnan(points)] = 0

        c = ((0.0, 0.0, 0.0,
              0.5) if spectral_locus_colour is None else spectral_locus_colour)

        axes.plot(
            points[..., 0],
            points[..., 1],
            points[..., 2],
            color=c,
            zorder=CONSTANTS_COLOUR_STYLE.zorder.midground_line,
        )
        axes.plot(
            (points[-1][0], points[0][0]),
            (points[-1][1], points[0][1]),
            (points[-1][2], points[0][2]),
            color=c,
            zorder=CONSTANTS_COLOUR_STYLE.zorder.midground_line,
        )

    plotting_colourspace = CONSTANTS_COLOUR_STYLE.colour.colourspace

    quads_c: List = []
    RGB_cf: List = []
    RGB_ce: List = []
    for i, colourspace in enumerate(colourspaces):

        if chromatically_adapt and not np.array_equal(
                colourspace.whitepoint, plotting_colourspace.whitepoint):
            colourspace = colourspace.chromatically_adapt(
                plotting_colourspace.whitepoint,
                plotting_colourspace.whitepoint_name,
            )

        quads_cb, RGB = RGB_identity_cube(
            width_segments=segments,
            height_segments=segments,
            depth_segments=segments,
        )

        XYZ = RGB_to_XYZ(
            quads_cb,
            colourspace.whitepoint,
            colourspace.whitepoint,
            colourspace.matrix_RGB_to_XYZ,
        )

        convert_settings = {"illuminant": colourspace.whitepoint}
        convert_settings.update(convert_kwargs)

        quads_c.extend(
            colourspace_model_axis_reorder(
                convert(XYZ, "CIE XYZ", reference_colourspace,
                        **convert_settings),
                reference_colourspace,
            ))

        if settings.face_colours[i] is not None:
            RGB = ones(RGB.shape) * settings.face_colours[i]

        RGB_cf.extend(
            np.hstack([RGB,
                       full((RGB.shape[0], 1), settings.face_alpha[i])]))

        if settings.edge_colours[i] is not None:
            RGB = ones(RGB.shape) * settings.edge_colours[i]

        RGB_ce.extend(
            np.hstack([RGB,
                       full((RGB.shape[0], 1), settings.edge_alpha[i])]))

    quads = as_float_array(quads_c)
    RGB_f = as_float_array(RGB_cf)
    RGB_e = as_float_array(RGB_ce)

    quads[np.isnan(quads)] = 0

    if quads.size != 0:
        for i, axis in enumerate("xyz"):
            min_a = np.minimum(np.min(quads[..., i]), np.min(points[..., i]))
            max_a = np.maximum(np.max(quads[..., i]), np.max(points[..., i]))
            getattr(axes, f"set_{axis}lim")((min_a, max_a))

    labels = np.array(
        COLOURSPACE_MODELS_AXIS_LABELS[reference_colourspace])[as_int_array(
            colourspace_model_axis_reorder([0, 1, 2], reference_colourspace))]
    for i, axis in enumerate("xyz"):
        getattr(axes, f"set_{axis}label")(labels[i])

    if show_grid:
        limits = np.array([[-1.5, 1.5], [-1.5, 1.5]])

        quads_g, RGB_gf, RGB_ge = nadir_grid(limits, grid_segments, labels,
                                             axes, **settings)
        quads = np.vstack([quads_g, quads])
        RGB_f = np.vstack([RGB_gf, RGB_f])
        RGB_e = np.vstack([RGB_ge, RGB_e])

    collection = Poly3DCollection(quads)
    collection.set_facecolors(RGB_f)
    collection.set_edgecolors(RGB_e)

    axes.add_collection3d(collection)

    settings.update({
        "axes": axes,
        "axes_visible": False,
        "camera_aspect": "equal"
    })
    settings.update(kwargs)

    return render(**settings)
Esempio n. 20
0
def colour_checkers_coordinates_segmentation(
        image: ArrayLike,
        additional_data: Boolean = False,
        **kwargs: Any
) -> Union[ColourCheckersDetectionData, Tuple[NDArray, ...]]:
    """
    Detect the colour checkers coordinates in given image :math:`image` using
    segmentation.

    This is the core detection definition. The process is a follows:

    -   Input image :math:`image` is converted to a grayscale image
        :math:`image_g`.
    -   Image :math:`image_g` is denoised.
    -   Image :math:`image_g` is thresholded/segmented to image
        :math:`image_s`.
    -   Image :math:`image_s` is eroded and dilated to cleanup remaining noise.
    -   Contours are detected on image :math:`image_s`.
    -   Contours are filtered to only keep squares/swatches above and below
        defined surface area.
    -   Squares/swatches are clustered to isolate region-of-interest that are
        potentially colour checkers: Contours are scaled by a third so that
        colour checkers swatches are expected to be joined, creating a large
        rectangular cluster. Rectangles are fitted to the clusters.
    -   Clusters with an aspect ratio different to the expected one are
        rejected, a side-effect is that the complementary pane of the
        *X-Rite* *ColorChecker Passport* is omitted.
    -   Clusters with a number of swatches close to the expected one are
        kept.

    Parameters
    ----------
    image
        Image to detect the colour checkers in.
    additional_data
        Whether to output additional data.

    Other Parameters
    ----------------
    aspect_ratio
        Colour checker aspect ratio, e.g. 1.5.
    aspect_ratio_minimum
        Minimum colour checker aspect ratio for detection: projective geometry
        might reduce the colour checker aspect ratio.
    aspect_ratio_maximum
        Maximum colour checker aspect ratio for detection: projective geometry
        might increase the colour checker aspect ratio.
    swatches
        Colour checker swatches total count.
    swatches_horizontal
        Colour checker swatches horizontal columns count.
    swatches_vertical
        Colour checker swatches vertical row count.
    swatches_count_minimum
        Minimum swatches count to be considered for the detection.
    swatches_count_maximum
        Maximum swatches count to be considered for the detection.
    swatches_chromatic_slice
        A `slice` instance defining chromatic swatches used to detect if the
        colour checker is upside down.
    swatches_achromatic_slice
        A `slice` instance defining achromatic swatches used to detect if the
        colour checker is upside down.
    swatch_minimum_area_factor
        Swatch minimum area factor :math:`f` with the minimum area :math:`m_a`
        expressed as follows: :math:`m_a = image_w * image_h / s_c / f` where
        :math:`image_w`, :math:`image_h` and :math:`s_c` are respectively the
        image width, height and the swatches count.
    swatch_contour_scale
        As the image is filtered, the swatches area will tend to shrink, the
        generated contours can thus be scaled.
    cluster_contour_scale
        As the swatches are clustered, it might be necessary to adjust the
        cluster scale so that the masks are centred better on the swatches.
    working_width
        Size the input image is resized to for detection.
    fast_non_local_means_denoising_kwargs
        Keyword arguments for :func:`cv2.fastNlMeansDenoising` definition.
    adaptive_threshold_kwargs
        Keyword arguments for :func:`cv2.adaptiveThreshold` definition.
    interpolation_method
        Interpolation method used when resizing the images, `cv2.INTER_CUBIC`
        and `cv2.INTER_LINEAR` methods are recommended.

    Returns
    -------
    :class:`colour_checker_detection.detection.segmentation.\
ColourCheckersDetectionData` or :class:`tuple`
        Tuple of colour checkers coordinates or
        :class:`ColourCheckersDetectionData` class instance with additional
        data.

    Notes
    -----
    -   Multiple colour checkers can be detected if presented in ``image``.

    Examples
    --------
    >>> import os
    >>> from colour import read_image
    >>> from colour_checker_detection import TESTS_RESOURCES_DIRECTORY
    >>> path = os.path.join(TESTS_RESOURCES_DIRECTORY,
    ...                     'colour_checker_detection', 'detection',
    ...                     'IMG_1967.png')
    >>> image = read_image(path)
    >>> colour_checkers_coordinates_segmentation(image)  # doctest: +ELLIPSIS
    (array([[ 369,  688],
           [ 382,  226],
           [1078,  246],
           [1065,  707]]...)
    """

    image = as_float_array(image, FLOAT_DTYPE_DEFAULT)[..., :3]

    settings = Structure(**SETTINGS_SEGMENTATION_COLORCHECKER_CLASSIC)
    settings.update(**kwargs)

    image = as_8_bit_BGR_image(
        adjust_image(image, settings.working_width,
                     settings.interpolation_method))

    width, height = image.shape[1], image.shape[0]
    maximum_area = width * height / settings.swatches
    minimum_area = (width * height / settings.swatches /
                    settings.swatch_minimum_area_factor)

    # Thresholding/Segmentation.
    image_g = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    image_g = cv2.fastNlMeansDenoising(
        image_g, None, **settings.fast_non_local_means_denoising_kwargs)
    image_s = cv2.adaptiveThreshold(image_g,
                                    **settings.adaptive_threshold_kwargs)
    # Cleanup.
    kernel = np.ones([3, 3], np.uint8)
    image_c = cv2.erode(image_s, kernel, iterations=1)
    image_c = cv2.dilate(image_c, kernel, iterations=1)

    # Detecting contours.
    contours, _hierarchy = cv2.findContours(image_c, cv2.RETR_TREE,
                                            cv2.CHAIN_APPROX_NONE)

    # Filtering squares/swatches contours.
    swatches = []
    for contour in contours:
        curve = cv2.approxPolyDP(contour, 0.01 * cv2.arcLength(contour, True),
                                 True)
        if minimum_area < cv2.contourArea(curve) < maximum_area and is_square(
                curve):
            swatches.append(as_int_array(cv2.boxPoints(
                cv2.minAreaRect(curve))))

    # Clustering squares/swatches.
    contours = np.zeros(image.shape, dtype=np.uint8)
    for swatch in [
            as_int_array(scale_contour(swatch, settings.swatch_contour_scale))
            for swatch in swatches
    ]:
        cv2.drawContours(contours, [swatch], -1, [255] * 3, -1)
    contours = cv2.cvtColor(contours, cv2.COLOR_RGB2GRAY)
    contours, _hierarchy = cv2.findContours(contours, cv2.RETR_EXTERNAL,
                                            cv2.CHAIN_APPROX_NONE)
    clusters = [
        as_int_array(
            scale_contour(
                cv2.boxPoints(cv2.minAreaRect(cluster)),
                settings.cluster_contour_scale,
            )) for cluster in contours
    ]

    # Filtering clusters using their aspect ratio.
    filtered_clusters = []
    for cluster in clusters[:]:
        rectangle = cv2.minAreaRect(cluster)
        width = max(rectangle[1][0], rectangle[1][1])
        height = min(rectangle[1][0], rectangle[1][1])
        ratio = width / height
        if (settings.aspect_ratio_minimum < ratio <
                settings.aspect_ratio_maximum):
            filtered_clusters.append(as_int_array(cluster))
    clusters = filtered_clusters

    # Filtering swatches within cluster.
    counts = []
    for cluster in clusters:
        count = 0
        for swatch in swatches:
            if (cv2.pointPolygonTest(cluster, contour_centroid(swatch),
                                     False) == 1):
                count += 1
        counts.append(count)

    indexes = np.where(
        np.logical_and(
            as_int_array(counts) >= settings.swatches_count_minimum,
            as_int_array(counts) <= settings.swatches_count_maximum,
        ))[0]

    colour_checkers = tuple(clusters[i] for i in indexes)

    if additional_data:
        return ColourCheckersDetectionData(tuple(colour_checkers),
                                           tuple(clusters), tuple(swatches),
                                           image_c)
    else:
        return colour_checkers
Esempio n. 21
0
def plot_visible_spectrum_section(
    cmfs: Union[MultiSpectralDistributions, str, Sequence[Union[
        MultiSpectralDistributions,
        str]], ] = "CIE 1931 2 Degree Standard Observer",
    illuminant: Union[SpectralDistribution, str] = "D65",
    model: Union[Literal["CAM02LCD", "CAM02SCD", "CAM02UCS", "CAM16LCD",
                         "CAM16SCD", "CAM16UCS", "CIE XYZ", "CIE xyY",
                         "CIE Lab", "CIE Luv", "CIE UCS", "CIE UVW", "DIN99",
                         "Hunter Lab", "Hunter Rdab", "ICaCb", "ICtCp", "IPT",
                         "IgPgTg", "Jzazbz", "OSA UCS", "Oklab", "hdr-CIELAB",
                         "hdr-IPT", ], str, ] = "CIE xyY",
    axis: Union[Literal["+z", "+x", "+y"], str] = "+z",
    origin: Floating = 0.5,
    normalise: Boolean = True,
    show_section_colours: Boolean = True,
    show_section_contour: Boolean = True,
    **kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
    """
    Plot the visible spectrum volume, i.e. *Rösch-MacAdam* colour solid,
    section colours along given axis and origin.

    Parameters
    ----------
    cmfs
        Standard observer colour matching functions, default to the
        *CIE 1931 2 Degree Standard Observer*.  ``cmfs`` can be of any type or
        form supported by the :func:`colour.plotting.filter_cmfs` definition.
    illuminant
        Illuminant spectral distribution, default to *CIE Illuminant D65*.
        ``illuminant`` can be of any type or form supported by the
        :func:`colour.plotting.filter_illuminants` definition.
    model
        Colourspace model, see :attr:`colour.COLOURSPACE_MODELS` attribute for
        the list of supported colourspace models.
    axis
        Axis the hull section will be normal to.
    origin
        Coordinate along ``axis`` at which to plot the hull section.
    normalise
        Whether to normalise ``axis`` to the extent of the hull along it.
    show_section_colours
        Whether to show the hull section colours.
    show_section_contour
        Whether to show the hull section contour.

    Other Parameters
    ----------------
    kwargs
        {:func:`colour.plotting.artist`,
        :func:`colour.plotting.render`,
        :func:`colour.plotting.section.plot_hull_section_colours`
        :func:`colour.plotting.section.plot_hull_section_contour`},
        See the documentation of the previously listed definitions.

    Returns
    -------
    :class:`tuple`
        Current figure and axes.

    Examples
    --------
    >>> from colour.utilities import is_trimesh_installed
    >>> if is_trimesh_installed:
    ...     plot_visible_spectrum_section(
    ...         section_colours='RGB', section_opacity=0.15)
    ...     # doctest: +ELLIPSIS
    (<Figure size ... with 1 Axes>, <...AxesSubplot...>)

    .. image:: ../_static/Plotting_Plot_Visible_Spectrum_Section.png
        :align: center
        :alt: plot_visible_spectrum_section
    """

    import trimesh

    settings: Dict[str, Any] = {"uniform": True}
    settings.update(kwargs)

    _figure, axes = artist(**settings)

    # pylint: disable=E1102
    cmfs = reshape_msds(first_item(filter_cmfs(cmfs).values()),
                        SpectralShape(360, 780, 1))
    illuminant = cast(
        SpectralDistribution,
        first_item(filter_illuminants(illuminant).values()),
    )

    vertices = solid_RoschMacAdam(
        cmfs,
        illuminant,
        point_order="Pulse Wave Width",
        filter_jagged_points=True,
    )
    mesh = trimesh.Trimesh(vertices)
    hull = trimesh.convex.convex_hull(mesh)

    if show_section_colours:
        settings = {"axes": axes}
        settings.update(kwargs)
        settings["standalone"] = False

        plot_hull_section_colours(hull, model, axis, origin, normalise,
                                  **settings)

    if show_section_contour:
        settings = {"axes": axes}
        settings.update(kwargs)
        settings["standalone"] = False

        plot_hull_section_contour(hull, model, axis, origin, normalise,
                                  **settings)

    title = (f"Visible Spectrum Section - "
             f"{f'{origin * 100}%' if normalise else origin} - "
             f"{model} - "
             f"{cmfs.strict_name}")

    plane = MAPPING_AXIS_TO_PLANE[axis]

    labels = np.array(COLOURSPACE_MODELS_AXIS_LABELS[model])[as_int_array(
        colourspace_model_axis_reorder([0, 1, 2], model))]
    x_label, y_label = labels[plane[0]], labels[plane[1]]

    settings.update({
        "axes": axes,
        "standalone": True,
        "title": title,
        "x_label": x_label,
        "y_label": y_label,
    })
    settings.update(kwargs)

    return render(**settings)
Esempio n. 22
0
def plot_RGB_colourspaces_gamuts(colourspaces=None,
                                 reference_colourspace='CIE xyY',
                                 segments=8,
                                 show_grid=True,
                                 grid_segments=10,
                                 show_spectral_locus=False,
                                 spectral_locus_colour=None,
                                 cmfs='CIE 1931 2 Degree Standard Observer',
                                 **kwargs):
    """
    Plots given *RGB* colourspaces gamuts in given reference colourspace.

    Parameters
    ----------
    colourspaces : array_like, optional
        *RGB* colourspaces to plot the gamuts.
    reference_colourspace : unicode, optional
        **{'CIE XYZ', 'CIE xyY', 'CIE xy', 'CIE Lab', 'CIE LCHab', 'CIE Luv',
        'CIE Luv uv', 'CIE LCHuv', 'CIE UCS', 'CIE UCS uv', 'CIE UVW',
        'DIN 99', 'Hunter Lab', 'Hunter Rdab', 'IPT', 'JzAzBz', 'OSA UCS',
        'hdr-CIELAB', 'hdr-IPT'}**,
        Reference colourspace to plot the gamuts into.
    segments : int, optional
        Edge segments count for each *RGB* colourspace cubes.
    show_grid : bool, optional
        Whether to show a grid at the bottom of the *RGB* colourspace cubes.
    grid_segments : bool, optional
        Edge segments count for the grid.
    show_spectral_locus : bool, optional
        Whether to show the spectral locus.
    spectral_locus_colour : array_like, optional
        Spectral locus colour.
    cmfs : unicode, optional
        Standard observer colour matching functions used for spectral locus.

    Other Parameters
    ----------------
    \\**kwargs : dict, optional
        {:func:`colour.plotting.artist`,
        :func:`colour.plotting.volume.nadir_grid`},
        Please refer to the documentation of the previously listed definitions.
    face_colours : array_like, optional
        Face colours array such as `face_colours = (None, (0.5, 0.5, 1.0))`.
    edge_colours : array_like, optional
        Edge colours array such as `edge_colours = (None, (0.5, 0.5, 1.0))`.
    face_alpha : numeric, optional
        Face opacity value such as `face_alpha = (0.5, 1.0)`.
    edge_alpha : numeric, optional
        Edge opacity value such as `edge_alpha = (0.0, 1.0)`.

    Returns
    -------
    tuple
        Current figure and axes.

    Examples
    --------
    >>> plot_RGB_colourspaces_gamuts(['ITU-R BT.709', 'ACEScg', 'S-Gamut'])
    ... # doctest: +ELLIPSIS
    (<Figure size ... with 1 Axes>, \
<matplotlib.axes._subplots.Axes3DSubplot object at 0x...>)

    .. image:: ../_static/Plotting_Plot_RGB_Colourspaces_Gamuts.png
        :align: center
        :alt: plot_RGB_colourspaces_gamuts
    """

    if colourspaces is None:
        colourspaces = ('ITU-R BT.709', 'ACEScg')

    colourspaces = filter_RGB_colourspaces(colourspaces).values()

    count_c = len(colourspaces)

    title = '{0} - {1} Reference Colourspace'.format(
        ', '.join([colourspace.name for colourspace in colourspaces]),
        reference_colourspace,
    )

    settings = Structure(
        **{
            'face_colours': [None] * count_c,
            'edge_colours': [None] * count_c,
            'face_alpha': [1] * count_c,
            'edge_alpha': [1] * count_c,
            'title': title,
        })
    settings.update(kwargs)

    figure = plt.figure()
    axes = figure.add_subplot(111, projection='3d')

    illuminant = COLOUR_STYLE_CONSTANTS.colour.colourspace.whitepoint

    points = np.zeros((4, 3))
    if show_spectral_locus:
        cmfs = first_item(filter_cmfs(cmfs).values())
        XYZ = cmfs.values

        points = common_colourspace_model_axis_reorder(
            XYZ_to_colourspace_model(XYZ, illuminant, reference_colourspace),
            reference_colourspace)

        points[np.isnan(points)] = 0

        c = ((0.0, 0.0, 0.0,
              0.5) if spectral_locus_colour is None else spectral_locus_colour)

        axes.plot(points[..., 0],
                  points[..., 1],
                  points[..., 2],
                  color=c,
                  zorder=10)
        axes.plot((points[-1][0], points[0][0]), (points[-1][1], points[0][1]),
                  (points[-1][2], points[0][2]),
                  color=c,
                  zorder=10)

    quads, RGB_f, RGB_e = [], [], []
    for i, colourspace in enumerate(colourspaces):
        quads_c, RGB = RGB_identity_cube(width_segments=segments,
                                         height_segments=segments,
                                         depth_segments=segments)

        XYZ = RGB_to_XYZ(
            quads_c,
            colourspace.whitepoint,
            colourspace.whitepoint,
            colourspace.RGB_to_XYZ_matrix,
        )

        quads.extend(
            common_colourspace_model_axis_reorder(
                XYZ_to_colourspace_model(
                    XYZ,
                    colourspace.whitepoint,
                    reference_colourspace,
                ), reference_colourspace))

        if settings.face_colours[i] is not None:
            RGB = np.ones(RGB.shape) * settings.face_colours[i]

        RGB_f.extend(
            np.hstack([
                RGB,
                np.full((RGB.shape[0], 1), settings.face_alpha[i],
                        DEFAULT_FLOAT_DTYPE)
            ]))

        if settings.edge_colours[i] is not None:
            RGB = np.ones(RGB.shape) * settings.edge_colours[i]

        RGB_e.extend(
            np.hstack([
                RGB,
                np.full((RGB.shape[0], 1), settings.edge_alpha[i],
                        DEFAULT_FLOAT_DTYPE)
            ]))

    quads = as_float_array(quads)
    quads[np.isnan(quads)] = 0

    if quads.size != 0:
        for i, axis in enumerate('xyz'):
            min_a = min(np.min(quads[..., i]), np.min(points[..., i]))
            max_a = max(np.max(quads[..., i]), np.max(points[..., i]))
            getattr(axes, 'set_{}lim'.format(axis))((min_a, max_a))

    labels = np.array(
        COLOURSPACE_MODELS_AXIS_LABELS[reference_colourspace])[as_int_array(
            common_colourspace_model_axis_reorder([0, 1, 2],
                                                  reference_colourspace))]
    for i, axis in enumerate('xyz'):
        getattr(axes, 'set_{}label'.format(axis))(labels[i])

    if show_grid:
        limits = np.array([[-1.5, 1.5], [-1.5, 1.5]])

        quads_g, RGB_gf, RGB_ge = nadir_grid(limits, grid_segments, labels,
                                             axes, **settings)
        quads = np.vstack([quads_g, quads])
        RGB_f = np.vstack([RGB_gf, RGB_f])
        RGB_e = np.vstack([RGB_ge, RGB_e])

    collection = Poly3DCollection(quads)
    collection.set_facecolors(RGB_f)
    collection.set_edgecolors(RGB_e)

    axes.add_collection3d(collection)

    settings.update({
        'axes': axes,
        'axes_visible': False,
        'camera_aspect': 'equal'
    })
    settings.update(kwargs)

    return render(**settings)
Esempio n. 23
0
def RGB_to_YcCbcCrc(
    RGB: ArrayLike,
    out_bits: Integer = 10,
    out_legal: Boolean = True,
    out_int: Boolean = False,
    is_12_bits_system: Boolean = False,
    **kwargs: Any,
) -> NDArray:
    """
    Convert an array of *RGB* linear values to the corresponding *Yc'Cbc'Crc'*
    colour encoding values array.

    Parameters
    ----------
    RGB
        Input *RGB* array of linear float values.
    out_bits
        Bit depth for integer output, or used in the calculation of the
        denominator for legal range float values, i.e. 8-bit means the float
        value for legal white is *235 / 255*. Ignored if ``out_legal`` and
        ``out_int`` are both *False*. Default is *10*.
    out_legal
        Whether to return legal range values. Default is *True*.
    out_int
        Whether to return values as ``out_bits`` integer code values. Default
        is *False*.
    is_12_bits_system
        *Recommendation ITU-R BT.2020* OETF (OECF) adopts different parameters
        for 10 and 12 bit systems. Default is *False*.

    Other Parameters
    ----------------
    out_range
        Array overriding the computed range such as
        *out_range = (Y_min, Y_max, C_min, C_max)*. If ``out_range`` is
        undefined, *Y_min*, *Y_max*, *C_min* and *C_max* will be computed
        using :func:`colour.models.rgb.ycbcr.ranges_YCbCr` definition.

    Returns
    -------
    :class:`numpy.ndarray`
        *Yc'Cbc'Crc'* colour encoding array of integer or float values.

    Notes
    -----
    +----------------+-----------------------+---------------+
    | **Domain \\***  | **Scale - Reference** | **Scale - 1** |
    +================+=======================+===============+
    | ``RGB``        | [0, 1]                | [0, 1]        |
    +----------------+-----------------------+---------------+

    +----------------+-----------------------+---------------+
    | **Range \\***   | **Scale - Reference** | **Scale - 1** |
    +================+=======================+===============+
    | ``YcCbcCrc``   | [0, 1]                | [0, 1]        |
    +----------------+-----------------------+---------------+

    \\* This definition has input and output integer switches, thus the
    domain-range scale information is only given for the floating point mode.

    Warnings
    --------
    This definition is specifically for usage with
    *Recommendation ITU-R BT.2020* when adopting the constant luminance
    implementation.

    References
    ----------
    :cite:`InternationalTelecommunicationUnion2015h`, :cite:`Wikipedia2004d`

    Examples
    --------
    >>> RGB = np.array([0.18, 0.18, 0.18])
    >>> RGB_to_YcCbcCrc(RGB, out_legal=True, out_bits=10, out_int=True,
    ...                 is_12_bits_system=False)
    ... # doctest: +ELLIPSIS
    array([422, 512, 512]...)
    """

    R, G, B = tsplit(to_domain_1(RGB))
    Y_min, Y_max, C_min, C_max = kwargs.get(
        "out_range", ranges_YCbCr(out_bits, out_legal, out_int))

    Yc = 0.2627 * R + 0.6780 * G + 0.0593 * B

    with domain_range_scale("ignore"):
        Yc = eotf_inverse_BT2020(Yc, is_12_bits_system=is_12_bits_system)
        R = eotf_inverse_BT2020(R, is_12_bits_system=is_12_bits_system)
        B = eotf_inverse_BT2020(B, is_12_bits_system=is_12_bits_system)

    Cbc = np.where((B - Yc) <= 0, (B - Yc) / 1.9404, (B - Yc) / 1.5816)
    Crc = np.where((R - Yc) <= 0, (R - Yc) / 1.7184, (R - Yc) / 0.9936)
    Yc *= Y_max - Y_min
    Yc += Y_min
    Cbc *= C_max - C_min
    Crc *= C_max - C_min
    Cbc += (C_max + C_min) / 2
    Crc += (C_max + C_min) / 2

    YcCbcCrc = tstack([Yc, Cbc, Crc])

    if out_int:
        return as_int_array(np.round(YcCbcCrc))
    else:
        return from_range_1(YcCbcCrc)
Esempio n. 24
0
def YCbCr_to_RGB(
    YCbCr: ArrayLike,
    K: NDArray = WEIGHTS_YCBCR["ITU-R BT.709"],
    in_bits: Integer = 8,
    in_legal: Boolean = True,
    in_int: Boolean = False,
    out_bits: Integer = 10,
    out_legal: Boolean = False,
    out_int: Boolean = False,
    **kwargs: Any,
) -> NDArray:
    """
    Convert an array of *Y'CbCr* colour encoding values to the corresponding
    *R'G'B'* values array.

    Parameters
    ----------
    YCbCr
        Input *Y'CbCr* colour encoding array of integer or float values.
    K
        Luma weighting coefficients of red and blue. See
        :attr:`colour.WEIGHTS_YCBCR` for presets. Default is
        *(0.2126, 0.0722)*, the weightings for *ITU-R BT.709*.
    in_bits
        Bit depth for integer input, or used in the calculation of the
        denominator for legal range float values, i.e. 8-bit means the float
        value for legal white is *235 / 255*. Default is *8*.
    in_legal
        Whether to treat the input values as legal range. Default is *True*.
    in_int
        Whether to treat the input values as ``in_bits`` integer code values.
        Default is *False*.
    out_bits
        Bit depth for integer output, or used in the calculation of the
        denominator for legal range float values, i.e. 8-bit means the float
        value for legal white is *235 / 255*. Ignored if ``out_legal`` and
        ``out_int`` are both *False*. Default is *10*.
    out_legal
        Whether to return legal range values. Default is *False*.
    out_int
        Whether to return values as ``out_bits`` integer code values. Default
        is *False*.

    Other Parameters
    ----------------
    in_range
        Array overriding the computed range such as
        *in_range = (Y_min, Y_max, C_min, C_max)*. If ``in_range`` is
        undefined, *Y_min*, *Y_max*, *C_min* and *C_max* will be computed using
        :func:`colour.models.rgb.ycbcr.ranges_YCbCr` definition.
    out_range
        Array overriding the computed range such as
        *out_range = (RGB_min, RGB_max)*. If ``out_range`` is undefined,
        *RGB_min* and *RGB_max* will be computed using :func:`colour.CV_range`
        definition.

    Returns
    -------
    :class:`numpy.ndarray`
        *R'G'B'* array of integer or float values.

    Notes
    -----
    +----------------+-----------------------+---------------+
    | **Domain \\***  | **Scale - Reference** | **Scale - 1** |
    +================+=======================+===============+
    | ``YCbCr``      | [0, 1]                | [0, 1]        |
    +----------------+-----------------------+---------------+

    +----------------+-----------------------+---------------+
    | **Range \\***   | **Scale - Reference** | **Scale - 1** |
    +================+=======================+===============+
    | ``RGB``        | [0, 1]                | [0, 1]        |
    +----------------+-----------------------+---------------+

    \\* This definition has input and output integer switches, thus the
    domain-range scale information is only given for the floating point mode.

    Warnings
    --------
    For *Recommendation ITU-R BT.2020*, :func:`colour.YCbCr_to_RGB`
    definition is only applicable to the non-constant luminance implementation.
    :func:`colour.YcCbcCrc_to_RGB` definition should be used for the constant
    luminance case as per :cite:`InternationalTelecommunicationUnion2015h`.

    References
    ----------
    :cite:`InternationalTelecommunicationUnion2011e`,
    :cite:`InternationalTelecommunicationUnion2015i`,
    :cite:`SocietyofMotionPictureandTelevisionEngineers1999b`,
    :cite:`Wikipedia2004d`

    Examples
    --------
    >>> YCbCr = np.array([502, 512, 512])
    >>> YCbCr_to_RGB(YCbCr, in_bits=10, in_legal=True, in_int=True)
    array([ 0.5,  0.5,  0.5])
    """

    if in_int:
        YCbCr = as_float_array(YCbCr)
    else:
        YCbCr = to_domain_1(YCbCr)

    Y, Cb, Cr = tsplit(YCbCr)
    Kr, Kb = K
    Y_min, Y_max, C_min, C_max = kwargs.get(
        "in_range", ranges_YCbCr(in_bits, in_legal, in_int))
    RGB_min, RGB_max = kwargs.get("out_range",
                                  CV_range(out_bits, out_legal, out_int))

    Y -= Y_min
    Cb -= (C_max + C_min) / 2
    Cr -= (C_max + C_min) / 2
    Y *= 1 / (Y_max - Y_min)
    Cb *= 1 / (C_max - C_min)
    Cr *= 1 / (C_max - C_min)
    R = Y + (2 - 2 * Kr) * Cr
    B = Y + (2 - 2 * Kb) * Cb
    G = (Y - Kr * R - Kb * B) / (1 - Kr - Kb)

    RGB = tstack([R, G, B])
    RGB *= RGB_max - RGB_min
    RGB += RGB_min

    RGB = as_int_array(np.round(RGB)) if out_int else from_range_1(RGB)

    return RGB
Esempio n. 25
0
def RGB_to_YCbCr(
    RGB: ArrayLike,
    K: NDArray = WEIGHTS_YCBCR["ITU-R BT.709"],
    in_bits: Integer = 10,
    in_legal: Boolean = False,
    in_int: Boolean = False,
    out_bits: Integer = 8,
    out_legal: Boolean = True,
    out_int: Boolean = False,
    **kwargs: Any,
) -> NDArray:
    """
    Convert an array of *R'G'B'* values to the corresponding *Y'CbCr* colour
    encoding values array.

    Parameters
    ----------
    RGB
        Input *R'G'B'* array of floats or integer values.
    K
        Luma weighting coefficients of red and blue. See
        :attr:`colour.WEIGHTS_YCBCR` for presets. Default is
        *(0.2126, 0.0722)*, the weightings for *ITU-R BT.709*.
    in_bits
        Bit depth for integer input, or used in the calculation of the
        denominator for legal range float values, i.e. 8-bit means the float
        value for legal white is *235 / 255*. Default is *10*.
    in_legal
        Whether to treat the input values as legal range. Default is *False*.
    in_int
        Whether to treat the input values as ``in_bits`` integer code values.
        Default is *False*.
    out_bits
        Bit depth for integer output, or used in the calculation of the
        denominator for legal range float values, i.e. 8-bit means the float
        value for legal white is *235 / 255*. Ignored if ``out_legal`` and
        ``out_int`` are both *False*. Default is *8*.
    out_legal
        Whether to return legal range values. Default is *True*.
    out_int
        Whether to return values as ``out_bits`` integer code values. Default
        is *False*.

    Other Parameters
    ----------------
    in_range
        Array overriding the computed range such as
        *in_range = (RGB_min, RGB_max)*. If ``in_range`` is undefined,
        *RGB_min* and *RGB_max* will be computed using :func:`colour.CV_range`
        definition.
    out_range
        Array overriding the computed range such as
        *out_range = (Y_min, Y_max, C_min, C_max)`. If ``out_range`` is
        undefined, *Y_min*, *Y_max*, *C_min* and *C_max* will be computed
        using :func:`colour.models.rgb.ycbcr.ranges_YCbCr` definition.

    Returns
    -------
    :class:`numpy.ndarray`
        *Y'CbCr* colour encoding array of integer or float values.

    Warnings
    --------
    For *Recommendation ITU-R BT.2020*, :func:`colour.RGB_to_YCbCr` definition
    is only applicable to the non-constant luminance implementation.
    :func:`colour.RGB_to_YcCbcCrc` definition should be used for the constant
    luminance case as per :cite:`InternationalTelecommunicationUnion2015h`.

    Notes
    -----
    +----------------+-----------------------+---------------+
    | **Domain \\***  | **Scale - Reference** | **Scale - 1** |
    +================+=======================+===============+
    | ``RGB``        | [0, 1]                | [0, 1]        |
    +----------------+-----------------------+---------------+

    +----------------+-----------------------+---------------+
    | **Range \\***   | **Scale - Reference** | **Scale - 1** |
    +================+=======================+===============+
    | ``YCbCr``      | [0, 1]                | [0, 1]        |
    +----------------+-----------------------+---------------+

    \\* This definition has input and output integer switches, thus the
    domain-range scale information is only given for the floating point mode.

    -   The default arguments, ``**{'in_bits': 10, 'in_legal': False,
        'in_int': False, 'out_bits': 8, 'out_legal': True, 'out_int': False}``
        transform a float *R'G'B'* input array normalised to domain [0, 1]
        (``in_bits`` is ignored) to a float *Y'CbCr* output array where *Y'* is
        normalised to range [16 / 255, 235 / 255] and *Cb* and *Cr* are
        normalised to range [16 / 255, 240./255]. The float values are
        calculated based on an [0, 255] integer range, but no 8-bit
        quantisation or clamping are performed.

    References
    ----------
    :cite:`InternationalTelecommunicationUnion2011e`,
    :cite:`InternationalTelecommunicationUnion2015i`,
    :cite:`SocietyofMotionPictureandTelevisionEngineers1999b`,
    :cite:`Wikipedia2004d`

    Examples
    --------
    >>> RGB = np.array([1.0, 1.0, 1.0])
    >>> RGB_to_YCbCr(RGB)  # doctest: +ELLIPSIS
    array([ 0.9215686...,  0.5019607...,  0.5019607...])

    Matching the float output of *The Foundry Nuke*'s *Colorspace* node set to
    *YCbCr*:

    >>> RGB_to_YCbCr(RGB,
    ...              out_range=(16 / 255, 235 / 255, 15.5 / 255, 239.5 / 255))
    ... # doctest: +ELLIPSIS
    array([ 0.9215686...,  0.5       ,  0.5       ])

    Matching the float output of *The Foundry Nuke*'s *Colorspace* node set to
    *YPbPr*:

    >>> RGB_to_YCbCr(RGB, out_legal=False, out_int=False)
    ... # doctest: +ELLIPSIS
    array([ 1.,  0.,  0.])

    Creating integer code values as per standard *10-bit SDI*:

    >>> RGB_to_YCbCr(RGB, out_legal=True, out_bits=10, out_int=True)
    ... # doctest: +ELLIPSIS
    array([940, 512, 512]...)

    For *JFIF JPEG* conversion as per *Recommendation ITU-T T.871*

    >>> RGB = np.array([102, 0, 51])
    >>> RGB_to_YCbCr(RGB, K=WEIGHTS_YCBCR['ITU-R BT.601'], in_range=(0, 255),
    ...              out_range=(0, 255, 0, 256), out_int=True)
    ... # doctest: +ELLIPSIS
    array([ 36, 136, 175]...)

    Note the use of 256 for the max *Cb / Cr* value, which is required so that
    the *Cb* and *Cr* output is centered about 128. Using 255 centres it
    about 127.5, meaning that there is no integer code value to represent
    achromatic colours. This does however create the possibility of output
    integer codes with value of 256, which cannot be stored in 8-bit integer
    representation. *Recommendation ITU-T T.871* specifies these should be
    clamped to 255.

    These *JFIF JPEG* ranges are also obtained as follows:

    >>> RGB_to_YCbCr(RGB, K=WEIGHTS_YCBCR['ITU-R BT.601'], in_bits=8,
    ...              in_int=True, out_legal=False, out_int=True)
    ... # doctest: +ELLIPSIS
    array([ 36, 136, 175]...)
    """

    if in_int:
        RGB = as_float_array(RGB)
    else:
        RGB = to_domain_1(RGB)

    Kr, Kb = K
    RGB_min, RGB_max = kwargs.get("in_range",
                                  CV_range(in_bits, in_legal, in_int))
    Y_min, Y_max, C_min, C_max = kwargs.get(
        "out_range", ranges_YCbCr(out_bits, out_legal, out_int))

    RGB_float = as_float_array(RGB) - RGB_min
    RGB_float *= 1 / (RGB_max - RGB_min)
    R, G, B = tsplit(RGB_float)

    Y = Kr * R + (1 - Kr - Kb) * G + Kb * B
    Cb = 0.5 * (B - Y) / (1 - Kb)
    Cr = 0.5 * (R - Y) / (1 - Kr)
    Y *= Y_max - Y_min
    Y += Y_min
    Cb *= C_max - C_min
    Cr *= C_max - C_min
    Cb += (C_max + C_min) / 2
    Cr += (C_max + C_min) / 2

    YCbCr = tstack([Y, Cb, Cr])

    if out_int:
        return as_int_array(np.round(YCbCr))
    else:
        return from_range_1(YCbCr)
Esempio n. 26
0
def crop_and_level_image_with_rectangle(image, rectangle):
    """
    Crops and rotates/levels given image using given rectangle.

    Parameters
    ----------
    image : array_like
        Image to crop and rotate/level.
    rectangle : tuple
        Rectangle used to crop and rotate/level the image.

    Returns
    -------
    ndarray
        Cropped and rotated/levelled image.

    References
    ----------
    :cite:`Abecassis2011`

    Notes
    -----
    -   ``image`` is expected to be an unsigned 8-bit sRGB encoded image.

    Examples
    --------
    >>> import os
    >>> from colour import read_image
    >>> from colour_checker_detection import TESTS_RESOURCES_DIRECTORY
    >>> path = os.path.join(TESTS_RESOURCES_DIRECTORY,
    ...                     'colour_checker_detection', 'detection',
    ...                     'IMG_1967.png')
    >>> image = as_8_bit_BGR_image(adjust_image(read_image(path)))
    >>> rectangle = (
    ...     (723.29608154, 465.50939941),
    ...     (461.24377441, 696.34759522),
    ...     -88.18692780,
    ... )
    >>> print(image.shape)
    (958, 1440, 3)
    >>> image = crop_and_level_image_with_rectangle(image, rectangle)
    >>> print(image.shape)
    (461, 696, 3)
    """

    width, height = image.shape[1], image.shape[0]
    width_r, height_r = rectangle[1]
    centroid = as_int_array(contour_centroid(cv2.boxPoints(rectangle)))
    centroid = centroid[0], centroid[1]
    angle = rectangle[-1]

    if angle < -45:
        angle += 90
        width_r, height_r = height_r, width_r

    width_r, height_r = as_int_array([width_r, height_r])

    M_r = cv2.getRotationMatrix2D(centroid, angle, 1)

    image_r = cv2.warpAffine(image, M_r, (width, height), cv2.INTER_CUBIC)
    image_c = cv2.getRectSubPix(image_r, (width_r, height_r),
                                (centroid[0], centroid[1]))

    return image_c
Esempio n. 27
0
def colour_checkers_coordinates_segmentation(image, additional_data=False):
    """
    Detects the colour checkers coordinates in given image :math:`image` using
    segmentation.

    This is the core detection definition. The process is a follows:

    -   Input image :math:`image` is converted to a grayscale image
        :math:`image_g`.
    -   Image :math:`image_g` is denoised.
    -   Image :math:`image_g` is thresholded/segmented to image
        :math:`image_s`.
    -   Image :math:`image_s` is eroded and dilated to cleanup remaining noise.
    -   Contours are detected on image :math:`image_s`.
    -   Contours are filtered to only keep squares/swatches above and below
        defined surface area.
    -   Squares/swatches are clustered to isolate region-of-interest that are
        potentially colour checkers: Contours are scaled by a third so that
        colour checkers swatches are expected to be joined, creating a large
        rectangular cluster. Rectangles are fitted to the clusters.
    -   Clusters with an aspect ratio different to the expected one are
        rejected, a side-effect is that the complementary pane of the
        *X-Rite* *ColorChecker Passport* is omitted.
    -   Clusters with a number of swatches close to :attr:`SWATCHES` are
        kept.

    Parameters
    ----------
    image : array_like
        Image to detect the colour checkers in.
    additional_data : bool, optional
        Whether to output additional data.

    Returns
    -------
    list or ColourCheckersDetectionData
        List of colour checkers coordinates or
        :class:`ColourCheckersDetectionData` class instance with additional
        data.

    Notes
    -----
    -   Multiple colour checkers can be detected if presented in ``image``.

    Examples
    --------
    >>> import os
    >>> from colour import read_image
    >>> from colour_checker_detection import TESTS_RESOURCES_DIRECTORY
    >>> path = os.path.join(TESTS_RESOURCES_DIRECTORY,
    ...                     'colour_checker_detection', 'detection',
    ...                     'IMG_1967.png')
    >>> image = read_image(path)
    >>> colour_checkers_coordinates_segmentation(image)
    [array([[1065,  707],
           [ 369,  688],
           [ 382,  226],
           [1078,  246]])]
    """

    image = as_8_bit_BGR_image(adjust_image(image, WORKING_WIDTH))

    width, height = image.shape[1], image.shape[0]
    maximum_area = width * height / SWATCHES
    minimum_area = width * height / SWATCHES / SWATCH_MINIMUM_AREA_FACTOR

    block_size = as_int(WORKING_WIDTH * 0.015)
    block_size = block_size - block_size % 2 + 1

    # Thresholding/Segmentation.
    image_g = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    image_g = cv2.fastNlMeansDenoising(image_g, None, 10, 7, 21)
    image_s = cv2.adaptiveThreshold(image_g, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                    cv2.THRESH_BINARY, block_size, 3)
    # Cleanup.
    kernel = np.ones((3, 3), np.uint8)
    image_c = cv2.erode(image_s, kernel, iterations=1)
    image_c = cv2.dilate(image_c, kernel, iterations=1)

    # Detecting contours.
    _image_c, contours, _hierarchy = cv2.findContours(image_c, cv2.RETR_TREE,
                                                      cv2.CHAIN_APPROX_NONE)

    # Filtering squares/swatches contours.
    swatches = []
    for contour in contours:
        curve = cv2.approxPolyDP(contour, 0.01 * cv2.arcLength(contour, True),
                                 True)
        if minimum_area < cv2.contourArea(curve) < maximum_area and is_square(
                curve):
            swatches.append(as_int_array(cv2.boxPoints(
                cv2.minAreaRect(curve))))

    # Clustering squares/swatches.
    clusters = np.zeros(image.shape, dtype=np.uint8)
    for swatch in [
            as_int_array(scale_contour(swatch, 1 + 1 / 3))
            for swatch in swatches
    ]:
        cv2.drawContours(clusters, [swatch], -1, [255] * 3, -1)
    clusters = cv2.cvtColor(clusters, cv2.COLOR_RGB2GRAY)
    _image_c, clusters, _hierarchy = cv2.findContours(clusters,
                                                      cv2.RETR_EXTERNAL,
                                                      cv2.CHAIN_APPROX_NONE)
    clusters = [
        as_int_array(
            scale_contour(cv2.boxPoints(cv2.minAreaRect(cluster)), 0.975))
        for cluster in clusters
    ]

    # Filtering clusters using their aspect ratio.
    filtered_clusters = []
    for cluster in clusters[:]:
        rectangle = cv2.minAreaRect(cluster)
        width = max(rectangle[1][0], rectangle[1][1])
        height = min(rectangle[1][0], rectangle[1][1])
        ratio = width / height
        if ASPECT_RATIO * 0.9 < ratio < ASPECT_RATIO * 1.1:
            filtered_clusters.append(cluster)
    clusters = filtered_clusters

    # Filtering swatches within cluster.
    counts = []
    for cluster in clusters:
        count = 0
        for swatch in swatches:
            if cv2.pointPolygonTest(cluster, contour_centroid(swatch),
                                    False) == 1:
                count += 1
        counts.append(count)
    counts = np.array(counts)
    indexes = np.where(
        np.logical_and(counts >= SWATCHES * 0.75,
                       counts <= SWATCHES * 1.25))[0].tolist()

    colour_checkers = [clusters[i] for i in indexes]

    if additional_data:
        return ColourCheckersDetectionData(colour_checkers, clusters, swatches,
                                           image_c)
    else:
        return colour_checkers
Esempio n. 28
0
    def test_write_LUT_SonySPI3D(self):
        """
        Tests :func:`colour.io.luts.sony_spi3d.write_LUT_SonySPI3D` definition.
        """

        LUT_r = read_LUT_SonySPI3D(
            os.path.join(LUTS_DIRECTORY, 'Colour_Correct.spi3d'))

        write_LUT_SonySPI3D(
            LUT_r,
            os.path.join(self._temporary_directory, 'Colour_Correct.spi3d'))
        LUT_t = read_LUT_SonySPI3D(
            os.path.join(self._temporary_directory, 'Colour_Correct.spi3d'))
        self.assertEqual(LUT_r, LUT_t)

        write_LUT_SonySPI3D(
            LUTSequence(LUT_r),
            os.path.join(self._temporary_directory, 'Colour_Correct.spi3d'))
        self.assertEqual(LUT_r, LUT_t)

        # Test for proper indexes sequentiality.
        path = os.path.join(self._temporary_directory, 'Size_10_Indexes.spi3d')
        write_LUT_SonySPI3D(LUT3D(size=10), path)
        indexes = []

        with open(path) as spi3d_file:
            lines = filter(None,
                           (line.strip() for line in spi3d_file.readlines()))
            for line in lines:
                if line.startswith('#'):
                    continue

                tokens = line.split()
                if len(tokens) == 6:
                    indexes.append(parse_array(tokens[:3], DEFAULT_INT_DTYPE))

        np.testing.assert_array_equal(
            as_int_array(indexes)[:200, ...],
            np.array([
                [0, 0, 0],
                [0, 0, 1],
                [0, 0, 2],
                [0, 0, 3],
                [0, 0, 4],
                [0, 0, 5],
                [0, 0, 6],
                [0, 0, 7],
                [0, 0, 8],
                [0, 0, 9],
                [0, 1, 0],
                [0, 1, 1],
                [0, 1, 2],
                [0, 1, 3],
                [0, 1, 4],
                [0, 1, 5],
                [0, 1, 6],
                [0, 1, 7],
                [0, 1, 8],
                [0, 1, 9],
                [0, 2, 0],
                [0, 2, 1],
                [0, 2, 2],
                [0, 2, 3],
                [0, 2, 4],
                [0, 2, 5],
                [0, 2, 6],
                [0, 2, 7],
                [0, 2, 8],
                [0, 2, 9],
                [0, 3, 0],
                [0, 3, 1],
                [0, 3, 2],
                [0, 3, 3],
                [0, 3, 4],
                [0, 3, 5],
                [0, 3, 6],
                [0, 3, 7],
                [0, 3, 8],
                [0, 3, 9],
                [0, 4, 0],
                [0, 4, 1],
                [0, 4, 2],
                [0, 4, 3],
                [0, 4, 4],
                [0, 4, 5],
                [0, 4, 6],
                [0, 4, 7],
                [0, 4, 8],
                [0, 4, 9],
                [0, 5, 0],
                [0, 5, 1],
                [0, 5, 2],
                [0, 5, 3],
                [0, 5, 4],
                [0, 5, 5],
                [0, 5, 6],
                [0, 5, 7],
                [0, 5, 8],
                [0, 5, 9],
                [0, 6, 0],
                [0, 6, 1],
                [0, 6, 2],
                [0, 6, 3],
                [0, 6, 4],
                [0, 6, 5],
                [0, 6, 6],
                [0, 6, 7],
                [0, 6, 8],
                [0, 6, 9],
                [0, 7, 0],
                [0, 7, 1],
                [0, 7, 2],
                [0, 7, 3],
                [0, 7, 4],
                [0, 7, 5],
                [0, 7, 6],
                [0, 7, 7],
                [0, 7, 8],
                [0, 7, 9],
                [0, 8, 0],
                [0, 8, 1],
                [0, 8, 2],
                [0, 8, 3],
                [0, 8, 4],
                [0, 8, 5],
                [0, 8, 6],
                [0, 8, 7],
                [0, 8, 8],
                [0, 8, 9],
                [0, 9, 0],
                [0, 9, 1],
                [0, 9, 2],
                [0, 9, 3],
                [0, 9, 4],
                [0, 9, 5],
                [0, 9, 6],
                [0, 9, 7],
                [0, 9, 8],
                [0, 9, 9],
                [1, 0, 0],
                [1, 0, 1],
                [1, 0, 2],
                [1, 0, 3],
                [1, 0, 4],
                [1, 0, 5],
                [1, 0, 6],
                [1, 0, 7],
                [1, 0, 8],
                [1, 0, 9],
                [1, 1, 0],
                [1, 1, 1],
                [1, 1, 2],
                [1, 1, 3],
                [1, 1, 4],
                [1, 1, 5],
                [1, 1, 6],
                [1, 1, 7],
                [1, 1, 8],
                [1, 1, 9],
                [1, 2, 0],
                [1, 2, 1],
                [1, 2, 2],
                [1, 2, 3],
                [1, 2, 4],
                [1, 2, 5],
                [1, 2, 6],
                [1, 2, 7],
                [1, 2, 8],
                [1, 2, 9],
                [1, 3, 0],
                [1, 3, 1],
                [1, 3, 2],
                [1, 3, 3],
                [1, 3, 4],
                [1, 3, 5],
                [1, 3, 6],
                [1, 3, 7],
                [1, 3, 8],
                [1, 3, 9],
                [1, 4, 0],
                [1, 4, 1],
                [1, 4, 2],
                [1, 4, 3],
                [1, 4, 4],
                [1, 4, 5],
                [1, 4, 6],
                [1, 4, 7],
                [1, 4, 8],
                [1, 4, 9],
                [1, 5, 0],
                [1, 5, 1],
                [1, 5, 2],
                [1, 5, 3],
                [1, 5, 4],
                [1, 5, 5],
                [1, 5, 6],
                [1, 5, 7],
                [1, 5, 8],
                [1, 5, 9],
                [1, 6, 0],
                [1, 6, 1],
                [1, 6, 2],
                [1, 6, 3],
                [1, 6, 4],
                [1, 6, 5],
                [1, 6, 6],
                [1, 6, 7],
                [1, 6, 8],
                [1, 6, 9],
                [1, 7, 0],
                [1, 7, 1],
                [1, 7, 2],
                [1, 7, 3],
                [1, 7, 4],
                [1, 7, 5],
                [1, 7, 6],
                [1, 7, 7],
                [1, 7, 8],
                [1, 7, 9],
                [1, 8, 0],
                [1, 8, 1],
                [1, 8, 2],
                [1, 8, 3],
                [1, 8, 4],
                [1, 8, 5],
                [1, 8, 6],
                [1, 8, 7],
                [1, 8, 8],
                [1, 8, 9],
                [1, 9, 0],
                [1, 9, 1],
                [1, 9, 2],
                [1, 9, 3],
                [1, 9, 4],
                [1, 9, 5],
                [1, 9, 6],
                [1, 9, 7],
                [1, 9, 8],
                [1, 9, 9],
            ]))
Esempio n. 29
0
def plot_RGB_colourspace_section(
    colourspace: Union[RGB_Colourspace, str, Sequence[Union[RGB_Colourspace,
                                                            str]]],
    model: Union[Literal["CAM02LCD", "CAM02SCD", "CAM02UCS", "CAM16LCD",
                         "CAM16SCD", "CAM16UCS", "CIE XYZ", "CIE xyY",
                         "CIE Lab", "CIE Luv", "CIE UCS", "CIE UVW", "DIN99",
                         "Hunter Lab", "Hunter Rdab", "ICaCb", "ICtCp", "IPT",
                         "IgPgTg", "Jzazbz", "OSA UCS", "Oklab", "hdr-CIELAB",
                         "hdr-IPT", ], str, ] = "CIE xyY",
    axis: Union[Literal["+z", "+x", "+y"], str] = "+z",
    origin: Floating = 0.5,
    normalise: Boolean = True,
    show_section_colours: Boolean = True,
    show_section_contour: Boolean = True,
    **kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
    """
    Plot given *RGB* colourspace section colours along given axis and origin.

    Parameters
    ----------
    colourspace
        *RGB* colourspace of the *RGB* array. ``colourspace`` can be of any
        type or form supported by the
        :func:`colour.plotting.filter_RGB_colourspaces` definition.
    model
        Colourspace model, see :attr:`colour.COLOURSPACE_MODELS` attribute for
        the list of supported colourspace models.
    axis
        Axis the hull section will be normal to.
    origin
        Coordinate along ``axis`` at which to plot the hull section.
    normalise
        Whether to normalise ``axis`` to the extent of the hull along it.
    show_section_colours
        Whether to show the hull section colours.
    show_section_contour
        Whether to show the hull section contour.

    Other Parameters
    ----------------
    kwargs
        {:func:`colour.plotting.artist`,
        :func:`colour.plotting.render`,
        :func:`colour.plotting.section.plot_hull_section_colours`
        :func:`colour.plotting.section.plot_hull_section_contour`},
        See the documentation of the previously listed definitions.

    Returns
    -------
    :class:`tuple`
        Current figure and axes.

    Examples
    --------
    >>> from colour.utilities import is_trimesh_installed
    >>> if is_trimesh_installed:
    ...     plot_RGB_colourspace_section(
    ...         'sRGB', section_colours='RGB', section_opacity=0.15)
    ...     # doctest: +ELLIPSIS
    (<Figure size ... with 1 Axes>, <...AxesSubplot...>)

    .. image:: ../_static/Plotting_Plot_RGB_Colourspace_Section.png
        :align: center
        :alt: plot_RGB_colourspace_section
    """

    import trimesh

    settings: Dict[str, Any] = {"uniform": True}
    settings.update(kwargs)

    _figure, axes = artist(**settings)

    colourspace = cast(
        RGB_Colourspace,
        first_item(filter_RGB_colourspaces(colourspace).values()),
    )

    vertices, faces, _outline = primitive_cube(1, 1, 1, 64, 64, 64)
    XYZ_vertices = RGB_to_XYZ(
        vertices["position"] + 0.5,
        colourspace.whitepoint,
        colourspace.whitepoint,
        colourspace.matrix_RGB_to_XYZ,
    )
    hull = trimesh.Trimesh(XYZ_vertices, faces, process=False)

    if show_section_colours:
        settings = {"axes": axes}
        settings.update(kwargs)
        settings["standalone"] = False

        plot_hull_section_colours(hull, model, axis, origin, normalise,
                                  **settings)

    if show_section_contour:
        settings = {"axes": axes}
        settings.update(kwargs)
        settings["standalone"] = False

        plot_hull_section_contour(hull, model, axis, origin, normalise,
                                  **settings)

    title = (f"{colourspace.name} Section - "
             f"{f'{origin * 100}%' if normalise else origin} - "
             f"{model}")

    plane = MAPPING_AXIS_TO_PLANE[axis]

    labels = np.array(COLOURSPACE_MODELS_AXIS_LABELS[model])[as_int_array(
        colourspace_model_axis_reorder([0, 1, 2], model))]
    x_label, y_label = labels[plane[0]], labels[plane[1]]

    settings.update({
        "axes": axes,
        "standalone": True,
        "title": title,
        "x_label": x_label,
        "y_label": y_label,
    })
    settings.update(kwargs)

    return render(**settings)
Esempio n. 30
0
def legal_to_full(
    CV: Union[FloatingOrArrayLike, IntegerOrArrayLike],
    bit_depth: Integer = 10,
    in_int: Boolean = False,
    out_int: Boolean = False,
) -> Union[FloatingOrNDArray, IntegerOrNDArray]:
    """
    Convert given code value :math:`CV` or float equivalent of a code value at
    a given bit depth from legal range (studio swing) to full range
    (full swing).

    Parameters
    ----------
    CV
        Legal range code value :math:`CV` or float equivalent of a code value
        at a given bit depth.
    bit_depth
        Bit depth used for conversion.
    in_int
        Whether to treat the input value as integer code value or float
        equivalent of a code value at a given bit depth.
    out_int
        Whether to return value as integer code value or float equivalent of a
        code value at a given bit depth.

    Returns
    -------
    :class:`numpy.floating` or :class:`numpy.integer` or :class:`numpy.ndarray`
        Full range code value :math:`CV` or float equivalent of a code value
        at a given bit depth.

    Examples
    --------
    >>> legal_to_full(64 / 1023)
    0.0
    >>> legal_to_full(940 / 1023)
    1.0
    >>> legal_to_full(64 / 1023, out_int=True)
    0
    >>> legal_to_full(940 / 1023, out_int=True)
    1023
    >>> legal_to_full(64, in_int=True)
    0.0
    >>> legal_to_full(940, in_int=True)
    1.0
    >>> legal_to_full(64, in_int=True, out_int=True)
    0
    >>> legal_to_full(940, in_int=True, out_int=True)
    1023
    """

    CV = as_float_array(CV)

    MV = 2**bit_depth - 1

    CV_full = as_int_array(np.round(CV)) if in_int else CV * MV

    B, W = CV_range(bit_depth, True, True)

    CV_full = (CV_full - B) / (W - B)

    if out_int:
        return as_int(np.round(CV_full * MV))
    else:
        return as_float(CV_full)
Esempio n. 31
0
def crop_and_level_image_with_rectangle(
    image: ArrayLike,
    rectangle: Tuple[Tuple, Tuple, Floating],
    interpolation_method: Literal[  # type: ignore[misc]
        cv2.INTER_AREA, cv2.INTER_BITS, cv2.INTER_BITS2, cv2.INTER_CUBIC,
        cv2.INTER_LANCZOS4, cv2.INTER_LINEAR, ] = cv2.INTER_CUBIC,
):
    """
    Crop and rotate/level given image using given rectangle.

    Parameters
    ----------
    image
        Image to crop and rotate/level.
    rectangle
        Rectangle used to crop and rotate/level the image.
    interpolation_method
        Interpolation method.

    Returns
    -------
    :class:`numpy.ndarray`
        Cropped and rotated/levelled image.

    References
    ----------
    :cite:`Abecassis2011`

    Examples
    --------
    >>> import os
    >>> from colour import read_image
    >>> from colour_checker_detection import TESTS_RESOURCES_DIRECTORY
    >>> path = os.path.join(TESTS_RESOURCES_DIRECTORY,
    ...                     'colour_checker_detection', 'detection',
    ...                     'IMG_1967.png')
    >>> image = adjust_image(read_image(path), 1440)
    >>> rectangle = (
    ...     (723.29608154, 465.50939941),
    ...     (461.24377441, 696.34759522),
    ...     -88.18692780,
    ... )
    >>> print(image.shape)
    (958, 1440, 3)
    >>> image = crop_and_level_image_with_rectangle(image, rectangle)
    >>> print(image.shape)
    (461, 696, 3)
    """

    image = as_float_array(image, FLOAT_DTYPE_DEFAULT)[..., :3]

    width, height = image.shape[1], image.shape[0]
    width_r, height_r = rectangle[1]
    centroid = contour_centroid(cv2.boxPoints(rectangle))
    angle = rectangle[-1]

    if angle < -45:
        angle += 90
        width_r, height_r = height_r, width_r

    width_r, height_r = as_int_array([width_r, height_r])

    M_r = cv2.getRotationMatrix2D(centroid, angle, 1)

    image_r = cv2.warpAffine(image, M_r, (width, height), interpolation_method)
    image_c = cv2.getRectSubPix(image_r, (width_r, height_r),
                                (centroid[0], centroid[1]))

    return image_c
Esempio n. 32
0
def write_image_OpenImageIO(
    image: ArrayLike,
    path: str,
    bit_depth: Literal[
        "uint8", "uint16", "float16", "float32", "float64", "float128"
    ] = "float32",
    attributes: Optional[Sequence] = None,
) -> Boolean:  # noqa: D405,D407,D410,D411
    """
    Write given image at given path using *OpenImageIO*.

    Parameters
    ----------
    image
        Image data.
    path
        Image path.
    bit_depth
        Bit depth to write the image at, the bit depth conversion behaviour is
        ruled directly by *OpenImageIO*.
    attributes
        An array of :class:`colour.io.ImageAttribute_Specification` class
        instances used to set attributes of the image.

    Returns
    -------
    :class:`bool`
        Definition success.

    Examples
    --------
    Basic image writing:

    >>> import os
    >>> import colour
    >>> path = os.path.join(colour.__path__[0], 'io', 'tests', 'resources',
    ...                     'CMS_Test_Pattern.exr')
    >>> image = read_image(path)  # doctest: +SKIP
    >>> path = os.path.join(colour.__path__[0], 'io', 'tests', 'resources',
    ...                     'CMSTestPattern.tif')
    >>> write_image_OpenImageIO(image, path)  # doctest: +SKIP
    True

    Advanced image writing while setting attributes:

    >>> compression = ImageAttribute_Specification('Compression', 'none')
    >>> write_image_OpenImageIO(image, path, 'uint8', [compression])
    ... # doctest: +SKIP
    True

    Writing an "ACES" compliant "EXR" file:

    >>> if is_openimageio_installed():  # doctest: +SKIP
    ...     from OpenImageIO import TypeDesc
    ...     chromaticities = (
    ...         0.7347, 0.2653, 0.0, 1.0, 0.0001, -0.077, 0.32168, 0.33767)
    ...     attributes = [
    ...         ImageAttribute_Specification('acesImageContainerFlag', True),
    ...         ImageAttribute_Specification(
    ...             'chromaticities', chromaticities, TypeDesc('float[8]')),
    ...         ImageAttribute_Specification('compression', 'none')]
    ...     write_image_OpenImageIO(image, path, attributes=attributes)
    """

    from OpenImageIO import ImageOutput, ImageSpec

    image = as_float_array(image)
    path = str(path)

    attributes = cast(List, optional(attributes, []))

    bit_depth_specification = MAPPING_BIT_DEPTH[bit_depth]

    if bit_depth_specification.numpy in [np.uint8, np.uint16]:
        mininum, maximum = np.iinfo(np.uint8).min, np.iinfo(np.uint8).max
        image = np.clip(image * maximum, mininum, maximum)

        image = as_int_array(image, bit_depth_specification.numpy)

    image = image.astype(bit_depth_specification.numpy)

    if image.ndim == 2:
        height, width = image.shape
        channels = 1
    else:
        height, width, channels = image.shape

    specification = ImageSpec(
        width, height, channels, bit_depth_specification.openimageio
    )
    for attribute in attributes:
        name = str(attribute.name)
        value = (
            str(attribute.value)
            if isinstance(attribute.value, str)
            else attribute.value
        )
        type_ = attribute.type_
        if attribute.type_ is None:
            specification.attribute(name, value)
        else:
            specification.attribute(name, type_, value)

    image_output = ImageOutput.create(path)

    image_output.open(path, specification)
    image_output.write_image(image)

    image_output.close()

    return True