Ejemplo n.º 1
0
def rfl_to_rgb(rfl, spd=None, CSF=None, wl=None, normalize_to_white=True):
    """ 
    Convert spectral reflectance functions (illuminated by spd) to Camera Sensitivity Functions.
    
    Args:
        :rfl:
            | ndarray with spectral reflectance functions (1st row is wavelengths if wl is None).
        :spd:
            | None, optional
            | ndarray with illumination spectrum
        :CSF:
            | None, optional
            | ndarray with camera sensitivity functions 
            | If None: use Nikon D700
        :normalize_to_white:
            | True, optional
            | If True: white-balance output rgb to a perfect white diffuser.
    
    Returns:
        :rgb:
            | ndarray with rgb values for each spectral reflectance functions
    """
    rfl_cp = rfl.copy()
    if (wl is None):
        wl = rfl_cp[0]
        rfl_cp = rfl_cp[1:]
    wlr = getwlr(wl)
    if spd is not None:
        spd = cie_interp(spd, wlr, kind='linear')[1:]
    else:
        spd = np.ones_like(wlr)
    if CSF is None: CSF = _CSF_NIKON_D700
    CSF = cie_interp(CSF, wlr, kind='linear')
    CSF[1:] = CSF[1:] * spd
    rgb = rfl_cp @ CSF[1:].T
    if normalize_to_white:
        white = np.ones_like(spd)
        rgbw = white @ CSF[1:].T
        rgb = rgb / rgbw.max(axis=0, keepdims=True)

    return rgb
Ejemplo n.º 2
0
def get_cie_mesopic_adaptation(Lp, Ls=None, SP=None):
    """
    Get the mesopic adaptation state according to CIE191:2010
    
    Args:
        :Lp: 
            | float or ndarray with photopic adaptation luminance
        :Ls: 
            | None, optional
            | float or ndarray with scotopic adaptation luminance
            | If None: SP must be supplied.
        :SP:
            | None, optional
            | S/P ratio
            | If None: Ls must be supplied.
            
    Returns:
        :Lmes: 
            | mesopic adaptation luminance
        :m: 
            | mesopic adaptation coefficient
    Reference:
        1. `CIE 191:2010 Recommended System for Mesopic Photometry Based on Visual Performance.
        (ISBN 978-3-901906-88-6 ), <http://cie.co.at/publications/recommended-system-mesopic-photometry-based-visual-performance>`_
    """
    Lp = np.atleast_1d(Lp)
    Ls = np.atleast_1d(Ls)
    SP = np.atleast_1d(SP)
    if not (None in SP):
        Ls = Lp * SP
    elif not (None in Ls):
        SP = Ls / Lp
    else:
        raise Exception(
            'Either the S/P ratio or the scotopic luminance Ls must be supplied in addition to the photopic luminance Lp'
        )
    m = np.ones_like(Ls) * np.nan
    Lmes = m.copy()
    for i in range(Lp.shape[0]):
        mi_ = 0.5
        fLmes = lambda m, Lp, SP: (
            (m * Lp) + (1 - m) * SP * 683 / 1699) / (m + (1 - m) * 683 / 1699)
        fm = lambda m, Lp, SP: 0.767 + 0.3334 * np.log10(fLmes(m, Lp, SP))
        mi = fm(mi_, Lp[i], SP[i])
        while True:
            if np.isclose(mi, mi_): break
            mi_ = mi
            mi = fm(mi_, Lp[i], SP[i])
        m[i] = mi
        Lmes[i] = fLmes(mi, Lp[i], SP[i])
    return Lmes, m
Ejemplo n.º 3
0
def stress_F_test(stressA, stressB, N, alpha = 0.05):
    """ 
    Perform F-test on significance of difference between STRESS A and STRESS B.
    
    Args:
        :stressA, stressB:
            | ndarray with stress(es) values for A and B
        :N:
            | int or ndarray with number of samples used to determine stress values.
        :alpha:
            | 0.05, optional
            | significance level
            
    Returns:
        :Fstats:
            | Dictionary with keys:
            | - 'p': p-values
            | - 'F':  F-values
            | - 'Fc': critcal values
            | - 'H': string reporting on significance of A compared to B.
    """
    N = N*np.ones(stressA.shape[0])
    Fvs = np.nan*np.ones_like(stressA)
    ps = Fvs.copy()
    Fcs = Fvs.copy()
    H = []
    i = 0
    for stA, stB in zip(stressA,stressB):
        Ni = N[i]
        Fvs[i] = stA**2/stB**2
        ps[i] = stats.f.sf(Fvs[i], Ni-1, Ni-1)
        Fcs[i] = stats.f.ppf(q = alpha/2, dfn = Ni - 1, dfd = Ni-1)
        if Fvs[i] < Fcs[i]:
            H_ = "A significantly better than B"
        elif Fvs[i] > 1/Fcs[i]:
            H_ = "A significantly poorer than B"
        elif (Fcs[i] <= Fvs[i]) & (Fvs[i] < 1):
            H_ = "A insignificantly better than B"
        elif (1 < Fvs[i]) & (Fvs[i] <= 1/Fcs[i]):
            H_ = "A insignificanty poorer than B"
        elif (Fvs[i] == 1):
            H_ = "A equals B"
        H.append(H_)
        i+=1
    Fstats = {'p': ps, 'F': Fvs, 'Fc': Fcs, 'H': H}
    return Fstats
Ejemplo n.º 4
0
def _cij_to_gij(xyz,C):
    """ Convert from matrix elements describing the discrimination ellipses from Cij (XYZ) to gij (Yxy)"""
    SIG = xyz[...,0] + xyz[...,1] + xyz[...,2]
    M1 = np.array([SIG, -SIG*xyz[...,0]/xyz[...,1], xyz[...,0]/xyz[...,1]])
    M2 = np.array([np.zeros_like(SIG), np.zeros_like(SIG), np.ones_like(SIG)])
    M3 = np.array([-SIG, -SIG*(xyz[...,1] + xyz[...,2])/xyz[...,1], xyz[...,2]/xyz[...,1]])
    M = np.array((M1,M2,M3))
    
    M = _transpose_02(M) # move stimulus dimension to axis = 0
    
    C = _transpose_02(C) # move stimulus dimension to axis = 0
    
    # convert Cij (XYZ) to gij' (xyY):
    AM = np.einsum('ij,kjl->kil', _M_XYZ_TO_PQS, M)
    CAM = np.einsum('kij,kjl->kil', C, AM) 
#    ATCAM = np.einsum('ij,kjl->kil', _M_XYZ_TO_PQS.T, CAM)
#    gij = np.einsum('kij,kjl->kil', np.transpose(M,(0,2,1)), ATCAM) # gij = M.T*A.T*C**A*M = (AM).T*C*A*M
    gij = np.einsum('kij,kjl->kil', np.transpose(AM,(0,2,1)), CAM) # gij = M.T*A.T*C**A*M = (AM).T*C*A*M

    # convert gij' (xyY) to gij (Yxy):
    gij = np.roll(np.roll(gij,1,axis=2),1,axis=1)
    
    return gij
Ejemplo n.º 5
0
def get_superresolution_hsi(lrhsi,
                            hrci,
                            CSF,
                            wl=[380, 780, 1],
                            interp_type='nd',
                            k_neighbours=4,
                            verbosity=0):
    """ 
    Get a HighResolution HyperSpectral Image (super-resolution HSI) based on a LowResolution HSI and a HighResolution Color Image.
    
    Args:
        :lrhsi:
            | ndarray with LowResolution HSI [m,m,L].
        :hrci:
            | ndarray with HighResolution HSI [M,N,3].
        :CSF:
            | None, optional
            | ndarray with camera sensitivity functions 
            | If None: use Nikon D700
        :wl:
            | [380,780,1], optional
            | Wavelength range and spacing or ndarray with wavelengths of HSI image.
        :interp_type:
            | 'nd', optional
            | Options:
            | - 'nd': perform n-dimensional linear interpolation using Delaunay triangulation.
            | - 'nearest': perform nearest neighbour interpolation. 
        :k_neighbours:
            | 4 or int, optional
            | Number of nearest neighbours for reflectance spectrum interpolation.
            | Neighbours are found using scipy.spatial.cKDTree
        :verbosity:
            | 0, optional
            | Verbosity level for sub-call to render_image().
            | If > 0: make a plot of the color coordinates of original and 
            | rendered image pixels.
    Returns:
        :hrhsi:
            | ndarray with HighResolution HSI [M,N,L].
        
    Procedure:
        | Call render_image(hrci, rfl = lrhsi_2, CSF = ...) to estimate a hyperspectral image
        | from the high-resolution color image hrci with the reflectance spectra 
        | in the low-resolution hyper-spectral image as database for the estimation.
        | Estimation is done in raw RGB space with the lrhsi converted using the
        | camera sensitivity functions in CSF.
    """
    wlr = getwlr(wl)
    eew = np.vstack((wlr, np.ones_like(wlr)))
    lrhsi_2d = np.vstack(
        (wlr,
         np.reshape(lrhsi, (lrhsi.shape[0] * lrhsi.shape[1],
                            lrhsi.shape[2]))))  # create 2D rfl database
    if CSF is None: CSF = _CSF_NIKON_D700
    hrhsi = render_image(
        hrci,
        spd=eew,
        refspd=eew,
        rfl=lrhsi_2d,
        D=None,
        interp_type=interp_type,
        k_neighbours=k_neighbours,
        verbosity=verbosity,
        CSF=CSF)  # render HR-hsi from HR-ci using LR-HSI rfls as database
    return hrhsi
Ejemplo n.º 6
0
def render_image(img = None, spd = None, rfl = None, out = 'img_hyp', \
                 refspd = None, D = None, cieobs = _CIEOBS, \
                 cspace = 'xyz', cspace_tf = {}, CSF = None,\
                 interp_type = 'nd', k_neighbours = 4, show = True,
                 verbosity = 0, show_ref_img = True,\
                 stack_test_ref = 12,\
                 write_to_file = None):
    """
    Render image under specified light source spd.
    
    Args:
        :img: 
            | None or str or ndarray with float (max = 1) rgb image.
            | None load a default image.
        :spd: 
            | ndarray, optional
            | Light source spectrum for rendering
            | If None: use CIE illuminant F4
        :rfl: 
            | ndarray, optional
            | Reflectance set for color coordinate to rfl mapping.
        :out: 
            | 'img_hyp' or str, optional
            |  (other option: 'img_ren': rendered image under :spd:)
        :refspd:
            | None, optional
            | Reference spectrum for color coordinate to rfl mapping.
            | None defaults to D65 (srgb has a D65 white point)
        :D: 
            | None, optional
            | Degree of (von Kries) adaptation from spd to refspd. 
        :cieobs:
            | _CIEOBS, optional
            | CMF set for calculation of xyz from spectral data.
        :cspace:
            | 'xyz',  optional
            | Color space for color coordinate to rfl mapping.
            | Tip: Use linear space (e.g. 'xyz', 'Yuv',...) for (interp_type == 'nd'),
            |      and perceptually uniform space (e.g. 'ipt') for (interp_type == 'nearest')
        :cspace_tf:
            | {}, optional
            | Dict with parameters for xyz_to_cspace and cspace_to_xyz transform.
        :CSF:
            | None, optional
            | RGB camera response functions.
            | If None: input :xyz: contains raw rgb values. Override :cspace:
            | argument and perform estimation directly in raw rgb space!!!
        :interp_type:
            | 'nd', optional
            | Options:
            | - 'nd': perform n-dimensional linear interpolation using Delaunay triangulation.
            | - 'nearest': perform nearest neighbour interpolation. 
        :k_neighbours:
            | 4 or int, optional
            | Number of nearest neighbours for reflectance spectrum interpolation.
            | Neighbours are found using scipy.spatial.cKDTree
        :show: 
            | True, optional
            |  Show images.
        :verbosity:
            | 0, optional
            | If > 0: make a plot of the color coordinates of original and 
              rendered image pixels.
        :show_ref_img:
            | True, optional
            | True: shows rendered image under reference spd. False: shows
            |  original image.
        :write_to_file:
            | None, optional
            | None: do nothing, else: write to filename(+path) in :write_to_file:
        :stack_test_ref: 
            | 12, optional
            |   - 12: left (test), right (ref) format for show and imwrite
            |   - 21: top (test), bottom (ref)
            |   - 1: only show/write test
            |   - 2: only show/write ref
            |   - 0: show both, write test

    Returns:
        :returns: 
            | img_hyp, img_ren, 
            | ndarrays with float hyperspectral image and rendered images 
    """

    # Get image:
    #imread = lambda x: plt.imread(x) #matplotlib.pyplot

    if img is not None:
        if isinstance(img, str):
            img = plt.imread(img)  # use matplotlib.pyplot's imread
    else:
        img = plt.imread(_HYPSPCIM_DEFAULT_IMAGE)
    if isinstance(img, np.uint8):
        img = img / 255
    elif isinstance(img, np.uint16):
        img = img / (2**16 - 1)

    # Convert to 2D format:
    rgb = img.reshape(img.shape[0] * img.shape[1], 3)  # *1.0: make float
    rgb[rgb == 0] = _EPS  # avoid division by zero for pure blacks.

    # Get unique rgb values and positions:
    rgb_u, rgb_indices = np.unique(rgb, return_inverse=True, axis=0)

    # get rfl set:
    if rfl is None:  # use IESTM30['4880'] set
        rfl = _CRI_RFL['ies-tm30']['4880']['5nm']
    wlr = rfl[
        0]  # spectral reflectance set determines wavelength range for estimation (xyz_to_rfl())

    # get Ref spd:
    if refspd is None:
        refspd = _CIE_ILLUMINANTS['D65'].copy()
    refspd = cie_interp(
        refspd, wlr,
        kind='linear')  # force spd to same wavelength range as rfl

    # Convert rgb_u to xyz and lab-type values under assumed refspd:
    if CSF is None:
        xyz_wr = spd_to_xyz(refspd, cieobs=cieobs, relative=True)
        xyz_ur = colortf(rgb_u * 255, tf='srgb>xyz')
    else:
        xyz_ur = rgb_u  # for input in xyz_to_rfl (when CSF is not None: this functions assumes input is indeed rgb !!!)

    # Estimate rfl's for xyz_ur:
    rfl_est, xyzri = xyz_to_rfl(xyz_ur, rfl = rfl, out = 'rfl_est,xyz_est', \
                 refspd = refspd, D = D, cieobs = cieobs, \
                 cspace = cspace, cspace_tf = cspace_tf, CSF = CSF,\
                 interp_type = interp_type, k_neighbours = k_neighbours,
                 verbosity = verbosity)

    # Get default test spd if none supplied:
    if spd is None:
        spd = _CIE_ILLUMINANTS['F4']

    if CSF is None:
        # calculate xyz values under test spd:
        xyzti, xyztw = spd_to_xyz(spd, rfl=rfl_est, cieobs=cieobs, out=2)

        # Chromatic adaptation from test spd to refspd:
        if D is not None:
            xyzti = cat.apply(xyzti, xyzw1=xyztw, xyzw2=xyz_wr, D=D)

        # Convert xyzti under test spd to srgb:
        rgbti = colortf(xyzti, tf='srgb') / 255
    else:
        # Calculate rgb coordinates from camera sensitivity functions under spd:
        rgbti = rfl_to_rgb(rfl_est, spd=spd, CSF=CSF, wl=None)

        # Chromatic adaptation from test spd to refspd:
        if D is not None:
            white = np.ones_like(spd)
            white[0] = spd[0]
            rgbwr = rfl_to_rgb(white, spd=refspd, CSF=CSF, wl=None)
            rgbwt = rfl_to_rgb(white, spd=spd, CSF=CSF, wl=None)
            rgbti = cat.apply_vonkries2(rgbti,
                                        rgbwt,
                                        rgbwr,
                                        xyzw0=np.array([[1.0, 1.0, 1.0]]),
                                        in_='rgb',
                                        out_='rgb',
                                        D=1)

    # Reconstruct original locations for rendered image rgbs:
    img_ren = rgbti[rgb_indices]
    img_ren.shape = img.shape  # reshape back to 3D size of original
    img_ren = img_ren

    # For output:
    if show_ref_img == True:
        rgb_ref = colortf(xyzri, tf='srgb') / 255 if (
            CSF is None
        ) else xyzri  # if CSF not None: xyzri contains rgbri !!!
        img_ref = rgb_ref[rgb_indices]
        img_ref.shape = img.shape  # reshape back to 3D size of original
        img_str = 'Rendered (under ref. spd)'
        img = img_ref
    else:
        img_str = 'Original'
        img = img

    if (stack_test_ref > 0) | show == True:
        if stack_test_ref == 21:
            img_original_rendered = np.vstack(
                (img_ren, np.ones((4, img.shape[1], 3)), img))
            img_original_rendered_str = 'Rendered (under test spd)\n ' + img_str
        elif stack_test_ref == 12:
            img_original_rendered = np.hstack(
                (img_ren, np.ones((img.shape[0], 4, 3)), img))
            img_original_rendered_str = 'Rendered (under test spd) | ' + img_str
        elif stack_test_ref == 1:
            img_original_rendered = img_ren
            img_original_rendered_str = 'Rendered (under test spd)'
        elif stack_test_ref == 2:
            img_original_rendered = img
            img_original_rendered_str = img_str
        elif stack_test_ref == 0:
            img_original_rendered = img_ren
            img_original_rendered_str = 'Rendered (under test spd)'

    if write_to_file is not None:
        # Convert from RGB to BGR formatand write:
        #print('Writing rendering results to image file: {}'.format(write_to_file))
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            imsave(write_to_file, img_original_rendered)

    if show == True:
        # show images using pyplot.show():
        plt.figure()

        plt.imshow(img_original_rendered)
        plt.title(img_original_rendered_str)
        plt.gca().get_xaxis().set_ticklabels([])
        plt.gca().get_yaxis().set_ticklabels([])

        if stack_test_ref == 0:
            plt.figure()
            plt.imshow(img)
            plt.title(img_str)
            plt.axis('off')

    if 'img_hyp' in out.split(','):
        # Create hyper_spectral image:
        rfl_image_2D = rfl_est[
            rgb_indices +
            1, :]  # create array with all rfls required for each pixel
        img_hyp = rfl_image_2D.reshape(img.shape[0], img.shape[1],
                                       rfl_image_2D.shape[1])

    # Setup output:
    if out == 'img_hyp':
        return img_hyp
    elif out == 'img_ren':
        return img_ren
    else:
        return eval(out)
Ejemplo n.º 7
0
    crop = lambda im, cr, cc, h, w: im[(cr - h // 2):(cr + h // 2),
                                       (cc - w // 2):(cc + w // 2), :].copy()
    im = crop(im, cr, cc, h * n, w * n)
    print('New image shape:', im.shape)

    # simulate HR hyperspectral image:
    hrhsi = render_image(im, show=False)
    wlr = getwlr([380, 780, 1])  #  = wavelength range of default TM30 rfl set
    wlr = wlr[20:-80:10]  # wavelength range from 400nm-700nm every 10 nm
    hrhsi = hrhsi[...,
                  20:-80:10]  # wavelength range from 400nm-700nm every 10 nm
    print('Simulated HR-HSI shape:', hrhsi.shape)
    # np.save(file[:-4]+'.npy',{'hrhsi':hrhsi,'im':im, 'wlr':wlr})

    # Illumination spectrum of HSI:
    eew = np.vstack((wlr, np.ones_like(wlr)))

    # Create fig and axes for plots:
    if verbosity > 0: fig, axs = plt.subplots(1, 3)

    # convert HR hsi to HR rgb image:
    hrci = hsi_to_rgb(hrhsi,
                      spd=eew,
                      cieobs=cieobs,
                      wl=wlr,
                      linear_rgb=linear_rgb)
    if verbosity > 0: axs[0].imshow(hrci)

    # create LR hsi image for testing:
    dl = n
    lrhsi = hrhsi[::dl, ::dl, :]
Ejemplo n.º 8
0
def fit_ellipse(xy, center_on_mean_xy = False):
    """
    Fit an ellipse to supplied data points.

    Args:
        :xy: 
            | coordinates of points to fit (Nx2 array)
        :center_on_mean_xy:
            | False, optional
            | Center ellipse on mean of xy 
            | (otherwise it might be offset due to solving 
            | the contrained minization problem: aT*S*a, see ref below.)
            
    Returns:
        :v:
            | vector with ellipse parameters [Rmax,Rmin, xc,yc, theta (rad.)]
            
    Reference:
        1. Fitzgibbon, A.W., Pilu, M., and Fischer R.B., 
        Direct least squares fitting of ellipsees, 
        Proc. of the 13th Internation Conference on Pattern Recognition, 
        pp 253–257, Vienna, 1996.
    """
    # remove centroid:
#    center = xy.mean(axis=0)
#    xy = xy - center
    
    # Fit ellipse:
    x, y = xy[:,0:1], xy[:,1:2]
    D = np.hstack((x * x, x * y, y * y, x, y, np.ones_like(x)))
    S, C = np.dot(D.T, D), np.zeros([6, 6])
    C[0, 2], C[2, 0], C[1, 1] = 2, 2, -1
    U, s, V = np.linalg.svd(np.dot(np.linalg.inv(S), C))
    e = U[:, 0]
#    E, V =  np.linalg.eig(np.dot(np.linalg.inv(S), C))
#    n = np.argmax(np.abs(E))
#    e = V[:,n]
        
    # get ellipse axis lengths, center and orientation:
    b, c, d, f, g, a = e[1] / 2, e[2], e[3] / 2, e[4] / 2, e[5], e[0]
    
    # get ellipse center:
    num = b * b - a * c
    if num == 0:
        xc = 0
        yc = 0
    else:
        xc = ((c * d - b * f) / num) 
        yc = ((a * f - b * d) / num) 
    
    # get ellipse orientation:
    theta = np.arctan2(np.array(2 * b), np.array((a - c))) / 2
#    if b == 0:
#        if a > c:
#            theta = 0
#        else:
#            theta = np.pi/2
#    else:
#        if a > c:
#            theta = np.arctan2(2*b,(a-c))/2
#        else:
#            theta =  np.arctan2(2*b,(a-c))/2 + np.pi/2
        
    # axis lengths:
    up = 2 * (a * f * f + c * d * d + g * b * b - 2 * b * d * f - a * c * g)
    down1 = (b * b - a * c) * ((c - a) * np.sqrt(1 + 4 * b * b / ((a - c) * (a - c))) - (c + a))
    down2 = (b * b - a * c) * ((a - c) * np.sqrt(1 + 4 * b * b / ((a - c) * (a - c))) - (c + a))
    a, b  = np.sqrt((up / down1)), np.sqrt((up / down2))


    # assert that a is the major axis (otherwise swap and correct angle)
    if(b > a):
        b, a = a, b
        # ensure the angle is betwen 0 and 2*pi
        theta = fmod(theta, 2.0 * np.pi)
        
    if center_on_mean_xy == True:
        xc,yc = xy.mean(axis=0)

    return np.hstack((a, b, xc, yc, theta))
Ejemplo n.º 9
0
def run(data,
        xyzw=_DEFAULT_WHITE_POINT,
        Yw=None,
        outin='J,aM,bM',
        conditions=None,
        forward=True,
        yellowbluepurplecorrect=False,
        mcat='cat02'):
    """ 
    Run CIECAM02 color appearance model in forward or backward modes.
    
    Args:
        :data:
            | ndarray with relative sample xyz values (forward mode) or J'a'b' coordinates (inverse mode)
        :xyzw:
            | ndarray with relative white point tristimulus values 
        :Yw: 
            | None, optional
            | Luminance factor of white point.
            | If None: xyz (in data) and xyzw are entered as relative tristimulus values 
            |          (normalized to Yw = 100). 
            | If not None: input tristimulus are absolute and Yw is used to
            |              rescale the absolute values to relative ones 
            |              (relative to a reference perfect white diffuser 
            |               with Ywr = 100). 
            | Yw can be < 100 for e.g. paper as white point. If Yw is None, it 
            | is assumed that the relative Y-tristimulus value in xyzw 
            | represents the luminance factor Yw.
        :conditions:
            | None, optional
            | Dictionary with viewing condition parameters for:
            |       La, Yb, D and surround.
            |  surround can contain:
            |      - str (options: 'avg','dim','dark') or 
            |      - dict with keys c, Nc, F.
            | None results in:
            |   {'La':100, 'Yb':20, 'D':1, 'surround':'avg'}
        :forward:
            | True, optional
            | If True: run in CAM in forward mode, else: inverse mode.
        :outin:
            | 'J,aM,bM', optional
            | String with requested output (e.g. "J,aM,bM,M,h") [Forward mode]
            | - attributes: 'J': lightness,'Q': brightness,
            |               'M': colorfulness,'C': chroma, 's': saturation,
            |               'h': hue angle, 'H': hue quadrature/composition,
            | String with inputs in data [inverse mode]. 
            | Input must have data.shape[-1]==3 and last dim of data must have 
            | the following structure for inverse mode: 
            |  * data[...,0] = J or Q,
            |  * data[...,1:] = (aM,bM) or (aC,bC) or (aS,bS) or (M,h) or (C, h), ...
        :yellowbluepurplecorrect:
            | False, optional
            | If False: don't correct for yellow-blue and purple problems in ciecam02. 
            | If 'brill-suss': 
            |       for yellow-blue problem, see: 
            |          - Brill [Color Res Appl, 2006; 31, 142-145] and 
            |          - Brill and Süsstrunk [Color Res Appl, 2008; 33, 424-426] 
            | If 'jiang-luo': 
            |       for yellow-blue problem + purple line problem, see:
            |          - Jiang, Jun et al. [Color Res Appl 2015: 40(5), 491-503] 
        :mcat:
            | 'cat02', optional
            | Specifies CAT sensor space.
            | - options:
            |    - None defaults to 'cat02' 
            |         (others e.g. 'cat02-bs', 'cat02-jiang',
            |         all trying to correct gamut problems of original cat02 matrix)
            |    - str: see see luxpy.cat._MCATS.keys() for options 
            |         (details on type, ?luxpy.cat)
            |    - ndarray: matrix with sensor primaries
    Returns:
        :camout: 
            | ndarray with color appearance correlates (forward mode) 
            |  or 
            | XYZ tristimulus values (inverse mode)
        
    References:
        1. `N. Moroney, M. D. Fairchild, R. W. G. Hunt, C. Li, M. R. Luo, and T. Newman, (2002), 
        "The CIECAM02 color appearance model,” 
        IS&T/SID Tenth Color Imaging Conference. p. 23, 2002.
        <http://rit-mcsl.org/fairchild/PDFs/PRO19.pdf>`_
    """
    outin = outin.split(',') if isinstance(outin, str) else outin

    #--------------------------------------------
    # Get condition parameters:
    if conditions is None:
        conditions = _DEFAULT_CONDITIONS
    D, Dtype, La, Yb, surround = (conditions[x]
                                  for x in sorted(conditions.keys()))

    surround_parameters = _SURROUND_PARAMETERS
    if isinstance(surround, str):
        surround = surround_parameters[conditions['surround']]
    F, FLL, Nc, c = [surround[x] for x in sorted(surround.keys())]

    #--------------------------------------------
    # Define sensor space and cat matrices:
    # Hunt-Pointer-Estevez sensors (cone fundamentals)
    mhpe = cat._MCATS['hpe']

    # chromatic adaptation sensors:
    if (mcat is None) | (mcat == 'cat02'):
        mcat = cat._MCATS['cat02']
        if yellowbluepurplecorrect == 'brill-suss':
            mcat = cat._MCATS[
                'cat02-bs']  # for yellow-blue problem, Brill [Color Res Appl 2006;31:142-145] and Brill and Süsstrunk [Color Res Appl 2008;33:424-426]
        elif yellowbluepurplecorrect == 'jiang-luo':
            mcat = cat._MCATS[
                'cat02-jiang-luo']  # for yellow-blue problem + purple line problem
    elif isinstance(mcat, str):
        mcat = cat._MCATS[mcat]

    #--------------------------------------------
    # pre-calculate some matrices:
    invmcat = np.linalg.inv(mcat)
    mhpe_x_invmcat = np.dot(mhpe, invmcat)
    if not forward: mcat_x_invmhpe = np.dot(mcat, np.linalg.inv(mhpe))

    #--------------------------------------------
    # Set Yw:
    if Yw is not None:
        Yw = (Yw * np.ones_like(xyzw2[..., 1:2]).T)
    else:
        Yw = xyzw[..., 1:2].T

    #--------------------------------------------
    # calculate condition dependent parameters:
    k = 1.0 / (5.0 * La + 1.0)
    FL = 0.2 * (k**4.0) * (5.0 * La) + 0.1 * ((1.0 - k**4.0)**2.0) * (
        (5.0 * La)**(1.0 / 3.0))  # luminance adaptation factor
    n = Yb / Yw
    Nbb = 0.725 * (1 / n)**0.2
    Ncb = Nbb
    z = 1.48 + FLL * n**0.5
    yw = xyzw[..., 1:2].T  # original Y in xyzw (pre-transposed)

    #--------------------------------------------
    # Calculate degree of chromatic adaptation:
    if D is None:
        D = F * (1.0 - (1.0 / 3.6) * np.exp((-La - 42.0) / 92.0))

    #===================================================================
    # WHITE POINT transformations (common to forward and inverse modes):

    #--------------------------------------------
    # Normalize white point (keep transpose for next step):
    xyzw = Yw * xyzw.T / yw

    #--------------------------------------------
    # transform from xyzw to cat sensor space:
    rgbw = math.dot23(mcat, xyzw)

    #--------------------------------------------
    # apply von Kries cat:
    rgbwc = (
        (D * Yw / rgbw) + (1 - D)
    ) * rgbw  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)

    #--------------------------------------------
    # convert from cat02 sensor space to cone sensors (hpe):
    rgbwp = math.dot23(mhpe_x_invmcat, rgbwc).T

    #--------------------------------------------
    # apply Naka_rushton repsonse compression to white:
    NK = lambda x, forward: naka_rushton(x,
                                         scaling=400,
                                         n=0.42,
                                         sig=27.13**(1 / 0.42),
                                         noise=0.1,
                                         forward=forward)

    pw = np.where(rgbwp < 0)

    # if requested apply yellow-blue correction:
    if (yellowbluepurplecorrect == 'brill-suss'
        ):  # Brill & Susstrunck approach, for purple line problem
        rgbwp[pw] = 0.0
    rgbwpa = NK(FL * rgbwp / 100.0, True)
    rgbwpa[pw] = 0.1 - (NK(FL * np.abs(rgbwp[pw]) / 100.0, True) - 0.1)

    #--------------------------------------------
    # Calculate achromatic signal of white:
    Aw = (2.0 * rgbwpa[..., 0] + rgbwpa[..., 1] +
          (1.0 / 20.0) * rgbwpa[..., 2] - 0.305) * Nbb

    # massage shape of data for broadcasting:
    original_ndim = data.ndim
    if data.ndim == 2: data = data[:, None]

    #===================================================================
    # STIMULUS transformations
    if forward:

        #--------------------------------------------
        # Normalize xyz (keep transpose for matrix multiplication in next step):
        xyz = (Yw / yw)[..., None] * data.T

        #--------------------------------------------
        # transform from xyz to cat sensor space:
        rgb = math.dot23(mcat, xyz)

        #--------------------------------------------
        # apply von Kries cat:
        rgbc = (
            (D * Yw / rgbw)[..., None] + (1 - D)
        ) * rgb  # factor 100 from ciecam02 is replaced with Yw[i] in ciecam16, but see 'note' in Fairchild's "Color Appearance Models" (p291 ni 3ed.)

        #--------------------------------------------
        # convert from cat02 sensor space to cone sensors (hpe):
        rgbp = math.dot23(mhpe_x_invmcat, rgbc).T

        #--------------------------------------------
        # apply Naka_rushton repsonse compression:
        p = np.where(rgbp < 0)
        if (yellowbluepurplecorrect == 'brill-suss'
            ):  # Brill & Susstrunck approach, for purple line problem
            rgbp[p] = 0.0
        rgbpa = NK(FL * rgbp / 100.0, forward)
        rgbpa[p] = 0.1 - (NK(FL * np.abs(rgbp[p]) / 100.0, forward) - 0.1)

        #--------------------------------------------
        # Calculate achromatic signal:
        A = (2.0 * rgbpa[..., 0] + rgbpa[..., 1] +
             (1.0 / 20.0) * rgbpa[..., 2] - 0.305) * Nbb

        #--------------------------------------------
        # calculate initial opponent channels:
        a = rgbpa[..., 0] - 12.0 * rgbpa[..., 1] / 11.0 + rgbpa[..., 2] / 11.0
        b = (1.0 / 9.0) * (rgbpa[..., 0] + rgbpa[..., 1] - 2.0 * rgbpa[..., 2])

        #--------------------------------------------
        # calculate hue h and eccentricity factor, et:
        h = hue_angle(a, b, htype='deg')
        et = (1.0 / 4.0) * (np.cos(h * np.pi / 180 + 2.0) + 3.8)

        #--------------------------------------------
        # calculate Hue quadrature (if requested in 'out'):
        if 'H' in outin:
            H = hue_quadrature(h, unique_hue_data=_UNIQUE_HUE_DATA)
        else:
            H = None

        #--------------------------------------------
        # calculate lightness, J:
        J = 100.0 * (A / Aw)**(c * z)

        #--------------------------------------------
        # calculate brightness, Q:
        Q = (4.0 / c) * ((J / 100.0)**0.5) * (Aw + 4.0) * (FL**0.25)

        #--------------------------------------------
        # calculate chroma, C:
        t = ((50000.0 / 13.0) * Nc * Ncb * et *
             ((a**2.0 + b**2.0)**0.5)) / (rgbpa[..., 0] + rgbpa[..., 1] +
                                          (21.0 / 20.0 * rgbpa[..., 2]))
        C = (t**0.9) * ((J / 100.0)**0.5) * (1.64 - 0.29**n)**0.73

        #--------------------------------------------
        # calculate colorfulness, M:
        M = C * FL**0.25

        #--------------------------------------------
        # calculate saturation, s:
        s = 100.0 * (M / Q)**0.5
        S = s  # make extra variable, jsut in case 'S' is called

        #--------------------------------------------
        # calculate cartesian coordinates:
        if ('aS' in outin):
            aS = s * np.cos(h * np.pi / 180.0)
            bS = s * np.sin(h * np.pi / 180.0)

        if ('aC' in outin):
            aC = C * np.cos(h * np.pi / 180.0)
            bC = C * np.sin(h * np.pi / 180.0)

        if ('aM' in outin):
            aM = M * np.cos(h * np.pi / 180.0)
            bM = M * np.sin(h * np.pi / 180.0)

        #--------------------------------------------
        if outin != ['J', 'aM', 'bM']:
            camout = eval('ajoin((' + ','.join(outin) + '))')
        else:
            camout = ajoin((J, aM, bM))

        if (camout.shape[1] == 1) & (original_ndim < 3):
            camout = camout[:, 0, :]

        return camout

    elif forward == False:

        #--------------------------------------------
        # Get Lightness J from data:
        if ('J' in outin[0]):
            J = data[..., 0].copy()
        elif ('Q' in outin[0]):
            Q = data[..., 0].copy()
            J = 100.0 * (Q / ((Aw + 4.0) * (FL**0.25) * (4.0 / c)))**2.0
        else:
            raise Exception(
                'No lightness or brightness values in data. Inverse CAM-transform not possible!'
            )

        #--------------------------------------------
        if 'a' in outin[1]:
            # calculate hue h:
            h = hue_angle(data[..., 1], data[..., 2], htype='deg')

            #--------------------------------------------
            # calculate Colorfulness M or Chroma C or Saturation s from a,b:
            MCs = (data[..., 1]**2.0 + data[..., 2]**2.0)**0.5
        else:
            h = data[..., 2]
            MCs = data[..., 1]

        if ('S' in outin[1]):
            Q = (4.0 / c) * ((J / 100.0)**0.5) * (Aw + 4.0) * (FL**0.25)
            M = Q * (MCs / 100.0)**2.0
            C = M / (FL**0.25)

        if ('M' in outin[1]):  # convert M to C:
            C = MCs / (FL**0.25)

        if ('C' in outin[1]):
            C = MCs

        #--------------------------------------------
        # calculate t from J, C:
        t = (C / ((J / 100.0)**(1.0 / 2.0) * (1.64 - 0.29**n)**0.73))**(1.0 /
                                                                        0.9)

        #--------------------------------------------
        # calculate eccentricity factor, et:
        et = (np.cos(h * np.pi / 180.0 + 2.0) + 3.8) / 4.0

        #--------------------------------------------
        # calculate achromatic signal, A:
        A = Aw * (J / 100.0)**(1.0 / (c * z))

        #--------------------------------------------
        # calculate temporary cart. co. at, bt and p1,p2,p3,p4,p5:
        at = np.cos(h * np.pi / 180.0)
        bt = np.sin(h * np.pi / 180.0)
        p1 = (50000.0 / 13.0) * Nc * Ncb * et / t
        p2 = A / Nbb + 0.305
        p3 = 21.0 / 20.0
        p4 = p1 / bt
        p5 = p1 / at

        #--------------------------------------------
        #q = np.where(np.abs(bt) < np.abs(at))[0]
        q = (np.abs(bt) < np.abs(at))

        b = p2 * (2.0 + p3) * (460.0 / 1403.0) / (p4 + (2.0 + p3) *
                                                  (220.0 / 1403.0) *
                                                  (at / bt) -
                                                  (27.0 / 1403.0) + p3 *
                                                  (6300.0 / 1403.0))
        a = b * (at / bt)

        a[q] = p2[q] * (2.0 + p3) * (460.0 / 1403.0) / (p5[q] + (2.0 + p3) *
                                                        (220.0 / 1403.0) -
                                                        ((27.0 / 1403.0) - p3 *
                                                         (6300.0 / 1403.0)) *
                                                        (bt[q] / at[q]))
        b[q] = a[q] * (bt[q] / at[q])

        #--------------------------------------------
        # calculate post-adaptation values
        rpa = (460.0 * p2 + 451.0 * a + 288.0 * b) / 1403.0
        gpa = (460.0 * p2 - 891.0 * a - 261.0 * b) / 1403.0
        bpa = (460.0 * p2 - 220.0 * a - 6300.0 * b) / 1403.0

        #--------------------------------------------
        # join values:
        rgbpa = ajoin((rpa, gpa, bpa))

        #--------------------------------------------
        # decompress signals:
        rgbp = (100.0 / FL) * NK(rgbpa, forward)

        # apply yellow-blue correction:
        if (yellowbluepurplecorrect == 'brill-suss'
            ):  # Brill & Susstrunck approach, for purple line problem
            p = np.where(rgbp < 0.0)
            rgbp[p] = 0.0

        #--------------------------------------------
        # convert from to cone sensors (hpe) cat02 sensor space:
        rgbc = math.dot23(mcat_x_invmhpe, rgbp.T)

        #--------------------------------------------
        # apply inverse von Kries cat:
        rgb = rgbc / ((D * Yw / rgbw)[..., None] + (1.0 - D))

        #--------------------------------------------
        # transform from cat sensor space to xyz:
        xyz = math.dot23(invmcat, rgb)

        #--------------------------------------------
        # unnormalize xyz:
        xyz = ((yw / Yw)[..., None] * xyz).T

        return xyz