Ejemplo n.º 1
0
def _hue_bin_data_to_rg(hue_bin_data):
    jabt_closed = np.vstack(
        (hue_bin_data['jabt_hj'], hue_bin_data['jabt_hj'][:1, ...]))
    jabr_closed = np.vstack(
        (hue_bin_data['jabr_hj'], hue_bin_data['jabr_hj'][:1, ...]))
    notnan_t = np.logical_not(np.isnan(
        jabt_closed[..., 1]))  # avoid NaN's (i.e. empty hue-bins)
    notnan_r = np.logical_not(np.isnan(jabr_closed[..., 1]))
    Rg = np.array([[
        100 * _polyarea(jabt_closed[notnan_t[:, i], i, 1],
                        jabt_closed[notnan_t[:, i], i, 2]) /
        _polyarea(jabr_closed[notnan_r[:, i], i, 1],
                  jabr_closed[notnan_r[:, i], i, 2])
        for i in range(notnan_r.shape[-1])
    ]])
    return Rg
def add_to_cmf_dict(bar=None, cieobs='indv', K=683, M=np.eye(3)):
    """
    Add set of cmfs to _CMF dict.
    
    Args:
        :bar: 
            | None, optional
            | Set of CMFs. None: initializes to empty ndarray.
        :cieobs:
            | 'indv' or str, optional
            | Name of CMF set.
        :K: 
            | 683 (lm/W), optional
            | Conversion factor from radiometric to photometric quantity.
        :M: 
            | np.eye, optional
            | Matrix for lms to xyz conversion.

    """
    if bar is None:
        wl3 = getwlr(_WL3)
        bar = np.vstack((wl3, np.empty((3, wl3.shape[0]))))
    _CMF['types'].append(cieobs)
    _CMF[cieobs] = {'bar': bar}
    _CMF[cieobs]['K'] = K
    _CMF[cieobs]['M'] = M
Ejemplo n.º 3
0
def v_to_cik(v, inverse = False):
    """
    Calculate 2x2 '(covariance matrix)^-1' elements cik 
    
    Args:
        :v: 
            | (Nx5) np.ndarray
            | ellipse parameters [Rmax,Rmin,xc,yc,theta]
        :inverse:
            | If True: return inverse of cik.
    
    Returns:
        :cik: 
            | 'Nx2x2' (covariance matrix)^-1
    
    Notes:
        | cik is not actually a covariance matrix,
        | only for a Gaussian or normal distribution!

    """
    v = np.atleast_2d(v)
    g11 = (1/v[:,0]*np.cos(v[:,4]))**2 + (1/v[:,1]*np.sin(v[:,4]))**2
    g22 = (1/v[:,0]*np.sin(v[:,4]))**2 + (1/v[:,1]*np.cos(v[:,4]))**2
    g12 = (1/v[:,0]**2 - 1/v[:,1]**2)*np.sin(v[:,4])*np.cos(v[:,4])
    cik = np.zeros((g11.shape[0],2,2))

    for i in range(g11.shape[0]):
        cik[i,:,:] = np.vstack((np.hstack((g11[i],g12[i])), np.hstack((g12[i],g22[i]))))
        if inverse == True:
            cik[i,:,:] = np.linalg.inv(cik[i,:,:])
    return cik
Ejemplo n.º 4
0
def blackbody(cct, wl3 = None, n = None):
    """
    Calculate blackbody radiator spectrum for correlated color temperature (cct).
    
    Args:
        :cct: 
            | int or float 
            | (for list of cct values, use cri_ref() with ref_type = 'BB')
        :wl3: 
            | None, optional
            | New wavelength range for interpolation. 
            | Defaults to wavelengths specified by luxpy._WL3.
        :n:
            | None, optional
            | Refractive index.
            | If None: use the one stored in _BB['n']

    Returns:
        :returns:
            | ndarray with blackbody radiator spectrum
            | (:returns:[0] contains wavelengths)
            
    References:
        1. `CIE15:2018, “Colorimetry,” CIE, Vienna, Austria, 2018. <https://doi.org/10.25039/TR.015.2018>`_
    """
    cct = float(cct)
    if wl3 is None: wl3 = _WL3 
    if n is None: n = _BB['n']
    wl = getwlr(wl3)
    def fSr(x):
        return (1/np.pi)*_BB['c1']*((x*1.0e-9)**(-5))*(n**(-2.0))*(np.exp(_BB['c2']*((n*x*1.0e-9*(cct+_EPS))**(-1.0)))-1.0)**(-1.0)
    return np.vstack((wl,(fSr(wl)/fSr(560.0))))
Ejemplo n.º 5
0
def _plot_target_vs_predicted_lab(labtarget, labpredicted, cspace = 'lab', verbosity = 1):
    """ Make a plot of target vs predicted color coordinates """
    if verbosity > 0:
        xylabels = _CSPACE_AXES[cspace]
        laball = np.vstack((labtarget,labpredicted))
        ml,ma,mb = laball.min(axis=0)
        Ml,Ma,Mb = laball.max(axis=0)
        fml = 0.95*ml
        fMl = 1.05*Ml
        fma = 1.05*ma if ma < 0 else 0.95*ma
        fMa = 0.95*Ma if Ma < 0 else 1.05*Ma
        fmb = 1.05*mb if mb < 0 else 0.95*mb
        fMb = 0.95*Mb if Mb < 0 else 1.05*Mb
        fig,(ax0,ax1,ax2) = plt.subplots(nrows=1,ncols=3, figsize = (15,4))
        ax0.plot(labtarget[...,1],labtarget[...,2],'bo',label = 'target')
        ax0.plot(labpredicted[...,1],labpredicted[...,2],'ro',label = 'predicted')
        ax0.axis([fma,fMa,fmb,fMb])
        ax1.plot(labtarget[...,1],labtarget[...,0],'bo',label = 'target')
        ax1.plot(labpredicted[...,1],labpredicted[...,0],'ro',label = 'predicted')
        ax1.axis([fma,fMa,fml,fMl])
        ax2.plot(labtarget[...,2],labtarget[...,0],'bo',label = 'target')
        ax2.plot(labpredicted[...,2],labpredicted[...,0],'ro',label = 'predicted')
        ax2.axis([fmb,fMb,fml,fMl])
        ax0.set_xlabel(xylabels[1])
        ax0.set_ylabel(xylabels[2])
        ax1.set_xlabel(xylabels[1])
        ax1.set_ylabel(xylabels[0])
        ax2.set_xlabel(xylabels[2])
        ax2.set_ylabel(xylabels[0])
        ax2.legend(loc='upper left')
Ejemplo n.º 6
0
def _complete_ldt_lid(LDT, Isym=4):
    """
    Convert LDT LID map with Isym symmetry to a 'full' map with phi: [0,360] and theta: [0,180].
    """
    cangles = LDT['h_angs']
    tangles = LDT['v_angs']
    candela_2d = LDT['candela_2d']
    if Isym == 4:
        # complete cangles:
        a = candela_2d.copy().T
        b = np.hstack((a, a[:, (a.shape[1] - 2)::-1]))
        c = np.hstack((b, b[:, (b.shape[1] - 2):0:-1]))
        candela_2d_0C360 = np.hstack((c, c[:, :1]))
        cangles = np.hstack(
            (cangles, cangles[1:] + 90, cangles[1:] + 180, cangles[1:] + 270))
        # complete  tangles:
        a = candela_2d_0C360.copy()
        b = np.vstack((a, np.zeros(a.shape)[1:, :]))
        tangles = np.hstack((tangles, tangles[1:] + 90))
        candela_2d = b
    elif Isym == -4:
        # complete cangles:
        a = candela_2d.copy().T
        b = np.hstack((a, a[:, (a.shape[1] - 2)::-1]))
        c = np.hstack((b, b[:, (b.shape[1] - 2):0:-1]))
        candela_2d_0C360 = np.hstack((c, c[:, :1]))
        cangles = np.hstack(
            (cangles, -cangles[(cangles.shape[0] - 2)::-1] + 180))
        cangles = np.hstack(
            (cangles, -cangles[(cangles.shape[0] - 2):0:-1] + 360))
        cangles = np.hstack((cangles, cangles[:1]))
        # complete  tangles:
        a = candela_2d_0C360.copy()
        b = np.vstack((a, np.zeros(a.shape)[1:, :]))
        tangles = np.hstack(
            (tangles, -tangles[(tangles.shape[0] - 2)::-1] + 180))
        candela_2d = b
    else:
        raise Exception(
            'complete_ldt_lid(): Other "Isym" than "4", not yet implemented (31/10/2018).'
        )

    LDT['map'] = {'thetas': tangles}
    LDT['map']['phis'] = cangles
    LDT['map']['values'] = candela_2d.T
    return LDT
Ejemplo n.º 7
0
def plotceruleanline(cieobs=_CIEOBS,
                     cspace=_CSPACE,
                     axh=None,
                     formatstr='ko-',
                     cspace_pars={}):
    """
    Plot cerulean (yellow (577 nm) - blue (472 nm)) line 
    
    | Kuehni, CRA, 2014: 
    |   Table II: spectral lights.
    
    Args: 
        :axh: 
            | None or axes handle, optional
            | Determines axes to plot data in.
            | None: make new figure.
        :cieobs:
            | luxpy._CIEOBS or str, optional
            | Determines CMF set to calculate spectrum locus or other.
        :cspace:
            | luxpy._CSPACE or str, optional
            | Determines color space / chromaticity diagram to plot data in.
            | Note that data is expected to be in specified :cspace:
        :formatstr:
            | 'k-' or str, optional
            | Format str for plotting (see ?matplotlib.pyplot.plot)
        :cspace_pars:
            | {} or dict, optional
            | Dict with parameters required by color space specified in :cspace: 
            | (for use with luxpy.colortf())
        :kwargs:
            | additional keyword arguments for use with matplotlib.pyplot.
    
    Returns:
        :returns:
            | handle to cerulean line
        
    References:
        1. `Kuehni, R. G. (2014). 
        Unique hues and their stimuli—state of the art. 
        Color Research & Application, 39(3), 279–287. 
        <https://doi.org/10.1002/col.21793>`_
        (see Table II, IV)
    """
    if isinstance(cieobs, str):
        cmf = _CMF[cieobs]['bar']
    else:
        cmf = cieobs
    p_y = cmf[0] == 577.0  #Kuehni, CRA 2013 (mean, table IV)
    p_b = cmf[0] == 472.0  #Kuehni, CRA 2013 (mean, table IV)
    xyz_y = cmf[1:, p_y].T
    xyz_b = cmf[1:, p_b].T
    lab = colortf(np.vstack((xyz_b, xyz_y)), tf=cspace, tfa0=cspace_pars)
    if axh is None:
        axh = plt.gca()
    hcerline = axh.plot(lab[:, 1], lab[:, 2], formatstr, label='Cerulean line')
    return hcerline
Ejemplo n.º 8
0
 def normalize_to_Lw(Ill, Lw, cieobs, rflM):
     xyzw = lx.spd_to_xyz(Ill, cieobs=cieobs, relative=False)
     for i in range(Ill.shape[0] - 1):
         Ill[i + 1] = Lw * Ill[i + 1] / xyzw[i, 1]
     IllM = []
     for i in range(Ill.shape[0] - 1):
         IllM.append(np.vstack((Ill1[0], Ill[i + 1] * rflM[1:, :])))
     IllM = np.transpose(np.array(IllM), (1, 0, 2))
     return Ill, IllM
Ejemplo n.º 9
0
def hsi_to_rgb(hsi,
               spd=None,
               cieobs=_CIEOBS,
               srgb=False,
               linear_rgb=False,
               CSF=None,
               wl=[380, 780, 1]):
    """ 
    Convert HyperSpectral Image to rgb.
    
    Args:
        :hsi:
            | ndarray with hyperspectral image [M,N,L]
        :spd:
            | None, optional
            | ndarray with illumination spectrum
        :cieobs:
            | _CIEOBS, optional
            | CMF set to convert spectral data to xyz tristimulus values.
        :srgb:
            | False, optional
            | If False: Use xyz_to_srgb(spd_to_xyz(...)) to convert to srgb values
            | If True: use camera sensitivity functions.
        :linear_rgb:
            | False, optional
            | If False: use gamma = 2.4 in xyz_to_srgb, if False: use gamma = 1.
        :CSF:
            | None, optional
            | ndarray with camera sensitivity functions 
            | If None: use Nikon D700
        :wl:
            | [380,780,1], optional
            | Wavelength range and spacing or ndarray with wavelengths of HSI image.
    
    Returns:
        :rgb:
            | ndarray with rgb image [M,N,3]
    """
    if spd is None:
        spd = _CIE_E.copy()
    wlr = getwlr(wl)
    spd = cie_interp(spd, wl, kind='linear')

    hsi_2d = np.reshape(hsi, (hsi.shape[0] * hsi.shape[1], hsi.shape[2]))

    if srgb:
        xyz = spd_to_xyz(spd,
                         cieobs=cieobs,
                         relative=True,
                         rfl=np.vstack((wlr, hsi_2d)))
        gamma = 1 if linear_rgb else 2.4
        rgb = xyz_to_srgb(xyz, gamma=gamma) / 255
    else:
        if CSF is None: CSF = _CSF_NIKON_D700
        rgb = rfl_to_rgb(hsi_2d, spd=spd, CSF=CSF, wl=wl)
    return np.reshape(rgb, (hsi.shape[0], hsi.shape[1], 3))
Ejemplo n.º 10
0
 def spdBB(CCT=5500, wl=[400, 700, 5], Lw=25000, cieobs='1964_10'):
     wl = getwlr(wl)
     dl = wl[1] - wl[0]
     spd = 2 * np.pi * 6.626068E-34 * (299792458**2) / (
         (wl * 0.000000001)**
         5) / (np.exp(6.626068E-34 * 299792458 /
                      (wl * 0.000000001) / 1.3806503E-23 / CCT) - 1)
     spd = Lw * spd / (dl * 683 * (spd * cie_interp(
         _CMF[cieobs]['bar'].copy(), wl, kind='cmf')[2, :]).sum())
     return np.vstack((wl, spd))
Ejemplo n.º 11
0
def _cri_ref_i(cct,
               wl3=_WL,
               ref_type='iestm30',
               mix_range=[4000, 5000],
               cieobs='1931_2',
               force_daylight_below4000K=False,
               n=None,
               daylight_locus=None):
    """
    Calculates a reference illuminant spectrum based on cct 
    for color rendering index calculations.
    """
    if mix_range is None:
        mix_range = _CRI_REF_TYPES[ref_type]
    if (cct < mix_range[0]) | (ref_type == 'BB'):
        return blackbody(cct, wl3, n=n)
    elif (cct > mix_range[0]) | (ref_type == 'DL'):
        return daylightphase(
            cct,
            wl3,
            force_daylight_below4000K=force_daylight_below4000K,
            cieobs=cieobs,
            daylight_locus=daylight_locus)
    else:
        SrBB = blackbody(cct, wl3, n=n)
        SrDL = daylightphase(
            cct,
            wl3,
            verbosity=None,
            force_daylight_below4000K=force_daylight_below4000K,
            cieobs=cieobs,
            daylight_locus=daylight_locus)
        cmf = _CMF[cieobs]['bar'] if isinstance(cieobs, str) else cieobs
        wl = SrBB[0]
        ld = getwld(wl)

        SrBB = 100.0 * SrBB[1] / np.array(np.sum(SrBB[1] * cmf[2] * ld))
        SrDL = 100.0 * SrDL[1] / np.array(np.sum(SrDL[1] * cmf[2] * ld))
        Tb, Te = float(mix_range[0]), float(mix_range[1])
        cBB, cDL = (Te - cct) / (Te - Tb), (cct - Tb) / (Te - Tb)
        if cBB < 0.0:
            cBB = 0.0
        elif cBB > 1:
            cBB = 1.0
        if cDL < 0.0:
            cDL = 0.0
        elif cDL > 1:
            cDL = 1.0

        Sr = SrBB * cBB + SrDL * cDL
        Sr[Sr == float('NaN')] = 0.0
        Sr = np.vstack((wl, (Sr / Sr[_POS_WL560])))

        return Sr
Ejemplo n.º 12
0
def _rgb_delinearizer(rgblin, tr, tr_type = 'lut'):
    """ De-linearize linear rgblin using tr tone response function or lut """
    if tr_type == 'gog':
        return np.array([TRi(rgblin[:,i],*tr[i]) for i in range(3)]).T
    elif tr_type == 'lut':
        maxv = (tr.shape[0] - 1)
        bins = np.vstack((tr-np.diff(tr,axis=0,prepend=0)/2,tr[-1,:]+0.01)) # create bins
        idxs = np.array([(np.digitize(rgblin[:,i],bins[:,i]) - 1)  for i in range(3)]).T # find bin indices
        idxs[idxs>maxv] = maxv 
        rgb = np.arange(tr.shape[0])[idxs]
        return rgb
Ejemplo n.º 13
0
def interpolate_efficiency_functions(wl, cs_cl_lrs):
    """
    Interpolate all spectral data in dict cs_cl_lrs to new wavelength range.
    """
    
    for key in cs_cl_lrs:
        if key[-1] == 'l': #signifies l for spectral data
            temp = np.vstack((cs_cl_lrs['WL'],cs_cl_lrs[key])) # construct [wl,S] data
            cs_cl_lrs[key] = cie_interp(temp,wl, kind = 'cmf', negative_values_allowed=True)[1:] # interpolate and store in dict
    cs_cl_lrs['WL'] = wl # store new wavelength range
    
    return  cs_cl_lrs
Ejemplo n.º 14
0
def cik_to_v(cik, xyc = None, inverse = False):
    """
    Calculate v-format ellipse descriptor from 2x2 'covariance matrix'^-1 cik 
    
    Args:
        :cik: 
            | 'Nx2x2' (covariance matrix)^-1
        :inverse:
            | If True: input is inverse of cik.
              
            
    Returns:
        :v: 
            | (Nx5) np.ndarray
            | ellipse parameters [Rmax,Rmin,xc,yc,theta]

    Notes:
        | cik is not actually the inverse covariance matrix,
        | only for a Gaussian or normal distribution!

    """
    if cik.ndim < 3:
        cik = cik[None,...]
    
    if inverse == True:
        for i in range(cik.shape[0]):
            cik[i,:,:] = np.linalg.inv(cik[i,:,:])
            
    g11 = cik[:,0,0]
    g22 = cik[:,1,1] 
    g12 = cik[:,0,1]

    theta = 0.5*np.arctan2(2*g12,(g11-g22)) + (np.pi/2)*(g12<0)
    #theta = theta2 + (np.pi/2)*(g12<0)
    #theta2 = theta
    cottheta = np.cos(theta)/np.sin(theta) #np.cot(theta)
    cottheta[np.isinf(cottheta)] = 0

    a = 1/np.sqrt((g22 + g12*cottheta))
    b = 1/np.sqrt((g11 - g12*cottheta))

    # ensure largest ellipse axis is first (correct angle):
    c = b>a; a[c], b[c], theta[c] = b[c],a[c],theta[c]+np.pi/2

    v = np.vstack((a, b, np.zeros(a.shape), np.zeros(a.shape), theta)).T
    
    # add center coordinates:
    if xyc is not None:
        v[:,2:4] = xyc
    
    return v
def lmsb_to_xyzb(lms, fieldsize=10, out='XYZ', allow_negative_values=False):
    """
    Convert from LMS cone fundamentals to XYZ color matching functions.
    
    Args:
        :lms: 
            | ndarray with lms cone fundamentals, optional
        :fieldsize: 
            | fieldsize in degrees, optional
            | Defaults to 10°.
        :out: 
            | 'xyz' or str, optional
            | Determines output.
        :allow_negative_values:
            | False, optional
            | XYZ color matching functions should not have negative values.
            |     If False: xyz[xyz<0] = 0.
    Returns:
        :returns:
            | LMS 
            |   - LMS: ndarray with population XYZ color matching functions.    
    
    Note: 
        For intermediate field sizes (2° < fieldsize < 10°) a conversion matrix
        is calculated by linear interpolation between 
        the _INDVCMF_M_2d and _INDVCMF_M_10d matrices.
    """
    wl = lms[None, 0]  #store wavelengths
    M = get_lms_to_xyz_matrix(fieldsize=fieldsize)
    if lms.ndim > 2:
        xyz = np.vstack((wl, math.dot23(M, lms[1:, ...], keepdims=False)))
    else:
        xyz = np.vstack((wl, np.dot(M, lms[1:, ...])))
    if allow_negative_values == False:
        xyz[np.where(xyz < 0)] = 0
    return xyz
Ejemplo n.º 16
0
def crowdingdistance(F):
    """
    Computes the crowding distance of a nondominated front.
    
    | The crowding distance gives a measure of how close the individuals are
    | with regard to its neighbors. The higher this value, the greater the
    | spacing. This is used to promote better diversity in the population.

    Args:
       :F: 
           | an m x mu ndarray with mu individuals and m objectives

    Returns:
       :cdist: 
           | a m-length column vector
    """
    m, mu = F.shape #gets the size of F
    
    if mu == 2:
       cdist = np.vstack((np.inf, np.inf))
       return cdist

    
    #[Fs, Is] = sort(F,2); #sorts the objectives by individuals
    Is = F.argsort(axis = 1)
    Fs = np.sort(F,axis=1)
    
    # Creates the numerator
    C = Fs[:,2:] - Fs[:,:-2]
    C = np.hstack((np.inf*np.ones((m,1)), C, np.inf*np.ones((m,1)))) #complements with inf in the extremes
    
    # Indexing to permute the C matrix in the right ordering
    Aux = np.arange(m).repeat(mu).reshape(m,mu)   
    ind = np.ravel_multi_index((Aux.flatten(),Is.flatten()),(m, mu)) #converts to lin. indexes # ind = sub2ind([m, mu], Aux(:), Is(:));
    C2 = C.flatten().copy()
    C2[ind] = C2.flatten()
    C = C2.reshape((m, mu))

    # Constructs the denominator
    den = np.repeat((Fs[:,-1] - Fs[:,0])[:,None], mu, axis = 1)
    
    # Calculates the crowding distance
    cdist = (C/den).sum(axis=0)
    cdist = cdist.flatten() #assures a column vector
    return cdist
Ejemplo n.º 17
0
def _get_distance_matrix_grouping(*X, metric='euclidean', Dscale=1):
    """ Get distance matrix (skbio format) and grouping indexing array from raw data"""
    # Create long format data array and grouping indices:
    ni = np.empty((len(X), ), dtype=int)
    for i, Xi in enumerate(X):
        ni[i] = Xi.shape[0]
        if i == 0:
            XY = Xi
        else:
            XY = np.vstack((XY, Xi))
    grouping = [[i] * n for i, n in enumerate(ni)]
    grouping = list(itertools.chain(*grouping))

    # Calculate pairwise distances:
    D = scipy.spatial.distance.pdist(XY, metric=metric) * Dscale
    Dsq = scipy.spatial.distance.squareform(D)

    # Get skbio distance matrix:
    Dm = skbio.stats.distance.DistanceMatrix(Dsq)

    return Dm, grouping
Ejemplo n.º 18
0
def rgb_to_spec_smits(rgb, intent='rfl', bitdepth=8, wlr=_WL3, rgb2spec=None):
    """
    Convert an array of RGB values to a spectrum using a Smits like conversion as implemented in Mitsuba.
    
    Args:
        :rgb: 
            | ndarray of list of rgb values
        :intent:
            | 'rfl' (or 'spd'), optional
            | type of requested spectrum conversion .
        :bitdepth:
            | 8, optional
            | bit depth of rgb values
        :wlr: 
            | _WL3, optional
            | desired wavelength (nm) range of spectrum.
        :rgb2spec:
            | None, optional
            | Dict with base spectra for white, cyan, magenta, yellow, blue, green and red for each intent.
            | If None: use _BASESPEC_SMITS.
        
    Returns:
        :spec: 
            | ndarray with spectrum or spectra (one for each rgb value, first row are the wavelengths)
    """
    if isinstance(rgb, list):
        rgb = np.atleast_2d(rgb)
    if rgb.max() > 1:
        rgb = rgb / (2**bitdepth - 1)
    if rgb2spec is None:
        rgb2spec = _BASESPEC_SMITS
    if not np.array_equal(rgb2spec['wlr'], getwlr(wlr)):
        rgb2spec = _convert_to_wlr(entries=copy.deepcopy(rgb2spec), wlr=wlr)
    spec = np.zeros((rgb.shape[0], rgb2spec['wlr'].shape[0]))
    for i in range(rgb.shape[0]):
        spec[i, :] = _fromLinearRGB(rgb[i, :],
                                    intent=intent,
                                    rgb2spec=rgb2spec,
                                    wlr=wlr)
    return np.vstack((rgb2spec['wlr'], spec))
Ejemplo n.º 19
0
def dtlz_range(fname, M):
    """
    Returns the decision range of a DTLZ function
    
    | The range is simply [0,1] for all variables. What varies is the number 
    | of decision variables in each problem. The equation for that is
    | n = (M-1) + k
    | wherein k = 5 for DTLZ1, 10 for DTLZ2-6, and 20 for DTLZ7.
    
    Args:
        :fname: 
            | a string with the name of the function ('dtlz1', 'dtlz2' etc.)
        :M: 
            | a scalar with the number of objectives
    
       Returns:
          :lim: 
              | a 2 x n matrix wherein the first row is the lower limit 
              | (0), and the second row, the upper limit of search (1)
    """
     #Checks if the string has or not the prefix 'dtlz', or if the number later
     #is greater than 7:
    fname = fname.lower()
    if (len(fname) < 5) or (fname[:4] != 'dtlz') or (float(fname[4]) > 7) :
       raise Exception('Sorry, the function {:s} is not implemented.'.format(fname))


    # If the name is o.k., defines the value of k
    if fname ==  'dtlz1':
       k = 5
    elif fname == 'dtlz7':
       k = 20
    else: #any other function
       k = 10;

    n = (M-1) + k #number of decision variables
    
    lim = np.vstack((np.zeros((1,n)), np.ones((1,n))))
    return lim
Ejemplo n.º 20
0
def get_pixel_coordinates(jab,
                          jab_ranges=None,
                          jab_deltas=None,
                          limit_grid_radius=0):
    """
    Get pixel coordinates corresponding to array of jab color coordinates.
    
    Args:
        :jab: 
            | ndarray of color coordinates
        :jab_ranges:
            | None or ndarray, optional
            | Specifies the pixelization of color space.
            |    (ndarray.shape = (3,3), with  first axis: J,a,b, and second 
                 axis: min, max, delta)
        :jab_deltas:
            | float or ndarray, optional
            | Specifies the sampling range. 
            | A float uses jab_deltas as the maximum Euclidean distance to select
            | samples around each pixel center. A ndarray of 3 deltas, uses
            | a city block sampling around each pixel center.
        :limit_grid_radius: 
            | 0, optional
            | A value of zeros keeps grid as specified by axr,bxr.
            | A value > 0 only keeps (a,b) coordinates within :limit_grid_radius: 
    
    Returns:
        :returns:
            | gridp, idxp, jabp, samplenrs, samplesIDs
            |   - :gridp: ndarray with coordinates of all pixel centers.
            |   - :idxp: list[int] with pixel index for each non-empty pixel
            |   - :jabp: ndarray with center color coordinates of non-empty pixels
            |   - :samplenrs: list[list[int]] with sample numbers belong to each 
            |                 non-empty pixel
            |   - :sampleIDs: summarizing list, 
            |                 with column order: 'idxp, jabp, samplenrs'
    """
    if jab_deltas is None:
        jab_deltas = np.array([_VF_DELTAR, _VF_DELTAR, _VF_DELTAR])
    if jab_ranges is None:
        jab_ranges = np.vstack(
            ([0, 100, jab_deltas[0]
              ], [-_VF_MAXR, _VF_MAXR + jab_deltas[1], jab_deltas[1]],
             [-_VF_MAXR, _VF_MAXR + jab_deltas[2], jab_deltas[2]]))

    # Get pixel grid:
    gridp = generate_grid(jab_ranges=jab_ranges,
                          limit_grid_radius=limit_grid_radius)

    # determine pixel coordinates of each sample in jab:
    samplesIDs = []
    for idx in range(gridp.shape[0]):

        # get pixel coordinates:
        jp = gridp[idx, 0]
        ap = gridp[idx, 1]
        bp = gridp[idx, 2]
        #Cp = np.sqrt(ap**2+bp**2)

        if type(jab_deltas) == np.ndarray:
            sampleID = np.where(
                ((np.abs(jab[..., 0] - jp) <= jab_deltas[0] / 2) &
                 (np.abs(jab[..., 1] - ap) <= jab_deltas[1] / 2) &
                 (np.abs(jab[..., 2] - bp) <= jab_deltas[2] / 2)))
        else:
            sampleID = np.where(
                (np.sqrt((jab[..., 0] - jp)**2 + (jab[..., 1] - ap)**2 +
                         (jab[..., 2] - bp)**2) <= jab_deltas / 2))

        if (sampleID[0].shape[0] > 0):
            samplesIDs.append(
                np.hstack((idx, np.array([jp, ap, bp]), sampleID[0])))

    idxp = [np.int(samplesIDs[i][0]) for i in range(len(samplesIDs))]
    jabp = np.vstack([samplesIDs[i][1:4] for i in range(len(samplesIDs))])
    samplenrs = [
        np.array(samplesIDs[i][4:], dtype=int).tolist()
        for i in range(len(samplesIDs))
    ]

    return gridp, idxp, jabp, samplenrs, samplesIDs
Ejemplo n.º 21
0
def _simple_cam(
        data,
        dataw=None,
        Lw=100.0,
        relative=True,
        inputtype='xyz',
        direction='forward',
        cie_illuminant='D65',
        parameters={
            'cA': 1,
            'ca': np.array([1, -1, 0]),
            'cb': (1 / 3) * np.array([0.5, 0.5, -1]),
            'n': 1 / 3,
            'Mxyz2lms': _CMF['1931_2']['M'].copy()
        },
        cieobs='2006_10',
        match_to_conversionmatrix_to_cieobs=True):
    """
    An example CAM illustration the usage of the functions in luxpy.cam.helpers 
    
    | Note that this example uses NO chromatic adaptation 
    | and SIMPLE compression, opponent and correlate processing.
    | THIS IS ONLY FOR ILLUSTRATION PURPOSES !!!

    Args:
        :data: 
            | ndarray with input:
            |  - tristimulus values 
            | or
            |  - spectral data 
            | or 
            |  - input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of :cie_illuminant:
        :cie_illuminant:
            | 'D65', optional
            | String corresponding to one of the illuminants (keys) 
            | in luxpy._CIE_ILLUMINANT
            | If ndarray, then use this one.
            | This is ONLY USED WHEN dataw is NONE !!!
        :Lw:
            | 100.0, optional
            | Luminance (cd/m²) of white point.
        :relative:
            | True or False, optional
            | True: data and dataw input is relative (i.e. Yw = 100)
        :parameters:
            | {'cA': 1, 'ca':np.array([1,-1,0]), 'cb':(1/3)*np.array([0.5,0.5,-1]),
            |  'n': 1/3, 'Mxyz2lms': _CMF['1931_2']['M'].copy()}
            | Dict with model parameters 
            | (For illustration purposes of match_conversionmatrix_to_cieobs, 
            |  the conversion matrix luxpy._CMF['1931_2']['M'] does NOT match
            |  the default observer specification of the input data in :cieobs: !!!)
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam
            |   -'inverse': cam -> xyz 
        :cieobs:
            | '2006_10', optional
            | CMF set to use to perform calculations where spectral data 
            | is involved (inputtype == 'spd'; dataw = None)
            | Other options: see luxpy._CMF['types']
        :match_conversionmatrix_to_cieobs:
            | True, optional
            | When changing to a different CIE observer, change the xyz_to_lms
            | matrix to the one corresponding to that observer. 
            | Set to False to keep the one in the parameter dict!
    
    Returns:
        :returns: 
            | ndarray with:
            | - color appearance correlates (:direction: == 'forward')
            |  or 
            | - XYZ tristimulus values (:direction: == 'inverse')
    """
    #--------------------------------------------------------------------------
    # Get model parameters:
    #--------------------------------------------------------------------------
    args = locals().copy(
    )  # gets all local variables (i.e. the function arguments)

    parameters = _update_parameter_dict(
        args,
        parameters=parameters,
        cieobs=cieobs,
        match_conversionmatrix_to_cieobs=match_to_conversionmatrix_to_cieobs,
        Mxyz2lms_whitepoint=np.array([[1, 1, 1]]))

    #unpack model parameters:
    (Mxyz2lms, cA, ca, cb,
     n) = [parameters[x] for x in sorted(parameters.keys())]

    #--------------------------------------------------------------------------
    # Setup default white point / adaptation field:
    #--------------------------------------------------------------------------
    dataw = _setup_default_adaptation_field(dataw=dataw,
                                            Lw=Lw,
                                            cie_illuminant='C',
                                            inputtype=inputtype,
                                            relative=relative,
                                            cieobs=cieobs)

    #--------------------------------------------------------------------------
    # Redimension input data to ensure most appropriate sizes
    # for easy and efficient looping and initialize output array:
    #--------------------------------------------------------------------------
    n_out = 5  # this example outputs 5 'correlates': J, a, b, C, h
    (data, dataw, camout,
     originalshape) = _massage_input_and_init_output(data,
                                                     dataw,
                                                     inputtype=inputtype,
                                                     direction=direction,
                                                     n_out=n_out)

    #--------------------------------------------------------------------------
    # Do precomputations needed for both the forward and inverse model,
    # and which do not depend on sample or light source data:
    #--------------------------------------------------------------------------
    # Create matrix with scale factors for L, M, S
    # for quick matrix multiplications to obtain neural signals:
    MAab = np.array([[cA, cA, cA], ca, cb])

    if direction == 'inverse':
        invMxyz2lms = np.linalg.inv(
            Mxyz2lms)  # Calculate the inverse lms-to-xyz conversion matrix
        invMAab = np.linalg.inv(
            MAab)  # Pre-calculate its inverse to avoid repeat in loop.

    #--------------------------------------------------------------------------
    # Apply forward/inverse model by looping over each row (=light source dim.)
    # in data:
    #--------------------------------------------------------------------------
    N = data.shape[0]
    for i in range(N):
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #  START FORWARD MODE and common part of inverse mode
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

        #-----------------------------------------------------------------------------
        # Get tristimulus values for stimulus field and white point for row i:
        #-----------------------------------------------------------------------------
        # Note that xyzt will contain a None in case of inverse mode !!!
        xyzt, xyzw, xyzw_abs = _get_absolute_xyz_xyzw(data,
                                                      dataw,
                                                      i=i,
                                                      Lw=Lw,
                                                      direction=direction,
                                                      cieobs=cieobs,
                                                      inputtype=inputtype,
                                                      relative=relative)

        #---------------------------------------------------------------------
        # stage 1 (white point): calculate lms values of white:
        #----------------------------------------------------------------------
        lmsw = np.dot(Mxyz2lms, xyzw.T).T

        #------------------------------------------------------------------
        # stage 2 (white): apply simple chromatic adaptation:
        #------------------------------------------------------------------
        lmsw_a = lmsw / lmsw

        #----------------------------------------------------------------------
        # stage 3 (white point): apply simple compression to lms values
        #----------------------------------------------------------------------
        lmsw_ac = lmsw_a**n

        #----------------------------------------------------------------------
        # stage 4 (white point): calculate achromatic A, and opponent signals a,b):
        #----------------------------------------------------------------------
        Aabw = np.dot(MAab, lmsw_ac.T).T

        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # SPLIT CALCULATION STEPS IN FORWARD AND INVERSE MODES:
        #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

        if direction == 'forward':
            #------------------------------------------------------------------
            # stage 1 (stimulus): calculate lms values
            #------------------------------------------------------------------
            lms = np.dot(Mxyz2lms, xyzt.T).T

            #------------------------------------------------------------------
            # stage 2 (stimulus): apply simple chromatic adaptation:
            #------------------------------------------------------------------
            lms_a = lms / lmsw

            #------------------------------------------------------------------
            # stage 3 (stimulus): apply simple compression to lms values
            #------------------------------------------------------------------
            lms_ac = lms_a**n

            #------------------------------------------------------------------
            # stage 3 (stimulus): calculate achromatic A, and opponent signals a,b:
            #------------------------------------------------------------------
            Aab = np.dot(MAab, lms_ac.T).T

            #------------------------------------------------------------------
            # stage 4 (stimulus): calculate J, C, h
            #------------------------------------------------------------------
            J = Aab[..., 0] / Aabw[..., 0]
            C = (Aab[..., 1]**2 + Aab[..., 2]**2)**0.5
            h = math.positive_arctan(Aab[..., 1], Aab[..., 2])

            # # stack together:
            camout[i] = np.vstack((J, Aab[..., 1], Aab[..., 2], C, h)).T

        #--------------------------------------
        # INVERSE MODE FROM PERCEPTUAL SIGNALS:
        #--------------------------------------
        elif direction == 'inverse':
            pass

    return _massage_output_data_to_original_shape(camout, originalshape)
Ejemplo n.º 22
0

if __name__ == '__main__':

    #--------------------------------------------------------------------------
    # Code test
    #--------------------------------------------------------------------------

    import luxpy as lx
    from luxpy import np

    # Prepare some illuminant data:
    C = _CIE_ILLUMINANTS['C'].copy()
    Ill1 = C
    Ill2 = np.vstack(
        (C, lx.cie_interp(_CIE_ILLUMINANTS['D65'], C[0],
                          kind='spd')[1:], C[1:, :] * 2, C[1:, :] * 3))

    # Prepare some sample data:
    rflM = lx._MUNSELL['R'].copy()
    rflM = lx.cie_interp(rflM, C[0], kind='rfl')

    # Setup some model parameters:
    cieobs = '2006_10'
    Lw = 400

    # Create Lw normalized data:
    # Normalize to Lw:
    def normalize_to_Lw(Ill, Lw, cieobs, rflM):
        xyzw = lx.spd_to_xyz(Ill, cieobs=cieobs, relative=False)
        for i in range(Ill.shape[0] - 1):
Ejemplo n.º 23
0
def _massage_input_and_init_output(data,
                                   dataw,
                                   inputtype='xyz',
                                   direction='forward',
                                   n_out=3):
    """
    Redimension input data to ensure most they have the appropriate sizes for easy and efficient looping.
    |
    | 1. Convert data and dataw to atleast_2d ndarrays
    | 2. Make axis 1 of dataw have 'same' dimensions as data
    | 3. Make dataw have same lights source axis size as data
    | 4. Flip light source axis to axis=0 for efficient looping
    | 5. Initialize output array camout to 'same' shape as data but with camout.shape[-1] == n_out
    
    Args:
        :data: 
            | ndarray with input tristimulus values 
            | or spectral data 
            | or input color appearance correlates
            | Can be of shape: (N [, xM], x 3), whereby: 
            | N refers to samples and M refers to light sources.
            | Note that for spectral input shape is (N x (M+1) x wl) 
        :dataw: 
            | None or ndarray, optional
            | Input tristimulus values or spectral data of white point.
            | None defaults to the use of CIE illuminant C.
        :inputtype:
            | 'xyz' or 'spd', optional
            | Specifies the type of input: 
            |     tristimulus values or spectral data for the forward mode.
        :direction:
            | 'forward' or 'inverse', optional
            |   -'forward': xyz -> cam
            |   -'inverse': cam -> xyz 
        :n_out:
            | 3, optional
            | output size of last dimension of camout 
            | (e.g. n_out=3 for j,a,b output or n_out = 5 for J,M,h,a,b output)
            
    Returns:
        :data:
            | ndarray with reshaped data
        :dataw:
            | ndarray with reshaped dataw
        :camout:
            | NaN filled ndarray for output of CAMv (camout.shape[-1] == Nout) 
        :originalshape:
            | original shape of data
            
    Notes:
        For an example on the use, see code _simple_cam() (type: _simple_cam??)
    """
    # Convert data and dataw to atleast_2d ndarrays:
    data = np2d(data).copy(
    )  # stimulus data (can be upto NxMx3 for xyz, or [N x (M+1) x wl] for spd))
    dataw = np2d(dataw).copy(
    )  # white point (can be upto Nx3 for xyz, or [(N+1) x wl] for spd)
    originalshape = data.shape  # to restore output to same shape

    # Make axis 1 of dataw have 'same' dimensions as data:
    if (data.ndim == 2):
        data = np.expand_dims(data, axis=1)  # add light source axis 1

    # Flip light source dim to axis 0:
    data = np.transpose(data, axes=(1, 0, 2))

    dataw = np.expand_dims(
        dataw, axis=1)  # add extra axis to move light source to axis 0

    # Make dataw have same lights source dimension size as data:
    if inputtype == 'xyz':
        if dataw.shape[0] == 1:
            dataw = np.repeat(dataw, data.shape[0], axis=0)
        if (data.shape[0] == 1) & (dataw.shape[0] > 1):
            data = np.repeat(data, dataw.shape[0], axis=0)
    else:
        dataw = np.array([
            np.vstack((dataw[:1, 0, :], dataw[i + 1:i + 2, 0, :]))
            for i in range(dataw.shape[0] - 1)
        ])
        if (data.shape[0] == 1) & (dataw.shape[0] > 1):
            data = np.repeat(data, dataw.shape[0], axis=0)

    # Initialize output array:
    if n_out is not None:
        dshape = list((data).shape)
        dshape[-1] = n_out  # requested number of correlates: e.g. j,a,b
        if (inputtype != 'xyz') & (direction == 'forward'):
            dshape[-2] = dshape[
                -2] - 1  # wavelength row doesn't count & only with forward can the input data be spectral
        camout = np.zeros(dshape)
        camout.fill(np.nan)
    else:
        camout = None
    return data, dataw, camout, originalshape
Ejemplo n.º 24
0
def calculate_VF_PX_models(S, cri_type = _VF_CRI_DEFAULT, sampleset = None, pool = False, \
                           pcolorshift = {'href': np.arange(np.pi/10,2*np.pi,2*np.pi/10),\
                                          'Cref' : _VF_MAXR, 'sig' : _VF_SIG, 'labels' : '#'},\
                           vfcolor = 'k', verbosity = 0):
    """
    Calculate Vector Field and Pixel color shift models.
    
    Args:
        :cri_type: 
            | _VF_CRI_DEFAULT or str or dict, optional
            | Specifies type of color fidelity model to use. 
            | Controls choice of ref. ill., sample set, averaging, scaling, etc.
            | See luxpy.cri.spd_to_cri for more info.
        :sampleset:
            | None or str or ndarray, optional
            | Sampleset to be used when calculating vector field model.
        :pool:
            | False, optional
            | If :S: contains multiple spectra, True pools all jab data before 
            | modeling the vector field, while False models a different field 
            |  for each spectrum.
        :pcolorshift: 
            | default dict (see below) or user defined dict, optional
            | Dict containing the specification input 
            |  for apply_poly_model_at_hue_x().
            | Default dict = {'href': np.arange(np.pi/10,2*np.pi,2*np.pi/10),
            |                'Cref' : _VF_MAXR, 
            |                'sig' : _VF_SIG, 
            |                'labels' : '#'} 
            | The polynomial models of degree 5 and 6 can be fully specified or 
            | summarized by the model parameters themselved OR by calculating the
            | dCoverC and dH at resp. 5 and 6 hues.
        :vfcolor:
            | 'k', optional
            | For plotting the vector fields.
        :verbosity: 
            | 0, optional
            | Report warnings or not.
    
    Returns:
        :returns:
            | :dataVF:, :dataPX: 
            | Dicts, for more info, see output description of resp.: 
            | luxpy.cri.VF_colorshift_model() and luxpy.cri.PX_colorshift_model()
    """
    # calculate VectorField cri_color_shift model:
    dataVF = VF_colorshift_model(S,
                                 cri_type=cri_type,
                                 sampleset=sampleset,
                                 vfcolor=vfcolor,
                                 pcolorshift=pcolorshift,
                                 pool=pool,
                                 verbosity=verbosity)

    # Set jab_ranges and _deltas for PX-model pixel calculations:
    PX_jab_deltas = np.array([_VF_DELTAR, _VF_DELTAR, _VF_DELTAR
                              ])  #set same as for vectorfield generation
    PX_jab_ranges = np.vstack(
        ([0, 100, _VF_DELTAR], [-_VF_MAXR, _VF_MAXR + _VF_DELTAR, _VF_DELTAR],
         [-_VF_MAXR, _VF_MAXR + _VF_DELTAR, _VF_DELTAR]))  #IES4880 gamut

    # Calculate shift vectors using vectorfield and pixel methods:
    delta_SvsVF_vshift_ab_mean = np.zeros((len(dataVF), 1))
    delta_SvsVF_vshift_ab_mean.fill(np.nan)
    delta_SvsVF_vshift_ab_mean_normalized = delta_SvsVF_vshift_ab_mean.copy()
    delta_PXvsVF_vshift_ab_mean = np.zeros((len(dataVF), 1))
    delta_PXvsVF_vshift_ab_mean.fill(np.nan)
    delta_PXvsVF_vshift_ab_mean_normalized = delta_PXvsVF_vshift_ab_mean.copy()
    dataPX = [[] for k in range(len(dataVF))]
    for Snr in range(len(dataVF)):

        # Calculate shifts using pixel method, PX:
        dataPX[Snr] = PX_colorshift_model(dataVF[Snr]['Jab']['Jabt'][:, 0, :],
                                          dataVF[Snr]['Jab']['Jabr'][:, 0, :],
                                          jab_ranges=PX_jab_ranges,
                                          jab_deltas=PX_jab_deltas,
                                          limit_grid_radius=_VF_MAXR)

        # Calculate shift difference between Samples (S) and VectorField model predictions (VF):
        delta_SvsVF_vshift_ab = dataVF[Snr]['vshifts']['vshift_ab_s'] - dataVF[
            Snr]['vshifts']['vshift_ab_s_vf']
        delta_SvsVF_vshift_ab_mean[Snr] = np.nanmean(np.sqrt(
            (delta_SvsVF_vshift_ab[..., 1:3]**2).sum(
                axis=delta_SvsVF_vshift_ab[..., 1:3].ndim - 1)),
                                                     axis=0)
        delta_SvsVF_vshift_ab_mean_normalized[
            Snr] = delta_SvsVF_vshift_ab_mean[Snr] / dataVF[Snr]['Jab'][
                'DEi'].mean(axis=0)

        # Calculate shift difference between PiXel method (PX) and VectorField (VF):
        delta_PXvsVF_vshift_ab = dataPX[Snr]['vshifts'][
            'vectorshift_ab_J0'] - dataVF[Snr]['vshifts']['vshift_ab_vf']
        delta_PXvsVF_vshift_ab_mean[Snr] = np.nanmean(np.sqrt(
            (delta_PXvsVF_vshift_ab[..., 1:3]**2).sum(
                axis=delta_PXvsVF_vshift_ab[..., 1:3].ndim - 1)),
                                                      axis=0)
        delta_PXvsVF_vshift_ab_mean_normalized[
            Snr] = delta_PXvsVF_vshift_ab_mean[Snr] / dataVF[Snr]['Jab'][
                'DEi'].mean(axis=0)

        dataVF[Snr]['vshifts'][
            'delta_PXvsVF_vshift_ab_mean'] = delta_PXvsVF_vshift_ab_mean[Snr]
        dataVF[Snr]['vshifts'][
            'delta_SvsVF_vshift_ab_mean'] = delta_SvsVF_vshift_ab_mean[Snr]
        dataVF[Snr]['vshifts'][
            'delta_SvsVF_vshift_ab_mean_normalized'] = delta_SvsVF_vshift_ab_mean_normalized[
                Snr]
        dataVF[Snr]['vshifts'][
            'delta_PXvsVF_vshift_ab_mean_normalized'] = delta_PXvsVF_vshift_ab_mean_normalized[
                Snr]
        dataPX[Snr]['vshifts']['delta_PXvsVF_vshift_ab_mean'] = dataVF[Snr][
            'vshifts']['delta_PXvsVF_vshift_ab_mean']
        dataPX[Snr]['vshifts'][
            'delta_PXvsVF_vshift_ab_mean_normalized'] = dataVF[Snr]['vshifts'][
                'delta_PXvsVF_vshift_ab_mean_normalized']

    return dataVF, dataPX
Ejemplo n.º 25
0
def subsample_RFL_set(rfl, rflpath = '', samplefcn = 'rand', S = _CIE_ILLUMINANTS['E'], \
                      jab_ranges = None, jab_deltas = None, cieobs = _VF_CIEOBS, cspace = _VF_CSPACE, \
                      ax = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR), \
                      bx = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR), \
                      jx = None, limit_grid_radius = 0):
    """
    Sub-samples a spectral reflectance set by pixelization of color space.
    
    Args:
        :rfl: 
            | ndarray or str
            | Array with of str referring to a set of spectral reflectance 
            |  functions to be subsampled.
            | If str to file: file must contain data as columns, with first 
            |  column the wavelengths.
        :rflpath:
            | '' or str, optional
            | Path to folder with rfl-set specified in a str :rfl: filename.
        :samplefcn:
            | 'rand' or 'mean', optional
            |   -'rand': selects a random sample from the samples within each pixel
            |   -'mean': returns the mean spectral reflectance in each pixel.
        :S: 
            | _CIE_ILLUMINANTS['E'], optional
            | Illuminant used to calculate the color coordinates of the spectral 
            |  reflectance samples.
        :jab_ranges:
            | None or ndarray, optional
            | Specifies the pixelization of color space.
            |  (ndarray.shape = (3,3), with  first axis: J,a,b, and second 
            |   axis: min, max, delta)
        :jab_deltas:
            | float or ndarray, optional
            | Specifies the sampling range. 
            | A float uses jab_deltas as the maximum Euclidean distance to select
            | samples around each pixel center. A ndarray of 3 deltas, uses
            | a city block sampling around each pixel center.
        :cspace:
            | _VF_CSPACE or dict, optional
            | Specifies color space. See _VF_CSPACE_EXAMPLE for example structure.
        :cieobs:
            | _VF_CIEOBS or str, optional
            | Specifies CMF set used to calculate color coordinates.
        :ax: 
            | default ndarray or user defined ndarray, optional
            | default = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR) 
        :bx: 
            | default ndarray or user defined ndarray, optional
            | default = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR) 
        :jx: 
            | None, optional
            | Note that not-None :jab_ranges: override :ax:, :bx: and :jx input.
        :limit_grid_radius:
            | 0, optional
            | A value of zeros keeps grid as specified  by axr,bxr.
            | A value > 0 only keeps (a,b) coordinates within :limit_grid_radius:
   
    Returns:
        :returns:
            | rflsampled, jabp
            | ndarrays with resp. the subsampled set of spectral reflectance 
            | functions and the pixel coordinate centers.
    """
    # Testing effects of sample set, pixel size and gamut size:
    if type(rfl) == str:
        rfl = pd.read_csv(os.path.join(rflpath, rfl),
                          header=None).get_values().T

    # Calculate Jab coordinates of samples:
    xyz, xyzw = spd_to_xyz(S, cieobs=cieobs, rfl=rfl.copy(), out=2)
    cspace_pars = cspace.copy()
    cspace_pars.pop('type')
    cspace_pars['xyzw'] = xyzw
    jab = colortf(xyz, tf=cspace['type'], fwtf=cspace_pars)

    # Generate grid and get samples in each grid:
    gridp, idxp, jabp, pixelsamplenrs, pixelIDs = get_pixel_coordinates(
        jab,
        jab_ranges=jab_ranges,
        jab_deltas=jab_deltas,
        limit_grid_radius=limit_grid_radius)

    # Get rfls from set using sampling function (mean or rand):
    W = rfl[:1]
    R = rfl[1:]
    rflsampled = np.zeros((len(idxp), R.shape[1]))
    rflsampled.fill(np.nan)
    for i in range(len(idxp)):
        if samplefcn == 'mean':
            rfl_i = np.nanmean(rfl[pixelsamplenrs[i], :], axis=0)
        else:
            samplenr_i = np.random.randint(len(pixelsamplenrs[i]))
            rfl_i = rfl[pixelsamplenrs[i][samplenr_i], :]
        rflsampled[i, :] = rfl_i
    rflsampled = np.vstack((W, rflsampled))
    return rflsampled, jabp
Ejemplo n.º 26
0
def xyz_to_rfl(xyz, CSF = None, rfl = None, out = 'rfl_est', \
                 refspd = None, D = None, cieobs = _CIEOBS, \
                 cspace = 'xyz', cspace_tf = {},\
                 interp_type = 'nd', k_neighbours = 4, verbosity = 0):
    """
    Approximate spectral reflectance of xyz values based on nd-dimensional linear interpolation 
    or k nearest neighbour interpolation of samples from a standard reflectance set.
    
    Args:
        :xyz: 
            | ndarray with xyz values of target points.
        :CSF:
            | None, optional
            | RGB camera response functions.
            | If None: input :xyz: contains raw rgb (float) values. Override :cspace:
            | argument and perform estimation directly in raw rgb space!!!
        :rfl: 
            | ndarray, optional
            | Reflectance set for color coordinate to rfl mapping.
        :out: 
            | 'rfl_est' or str, optional
        :refspd: 
            | None, optional
            | Refer ence spectrum for color coordinate to rfl mapping.
            | None defaults to D65.
        :cieobs:
            | _CIEOBS, optional
            | CMF set used for calculation of xyz from spectral data.
        :cspace:
            | 'xyz',  optional
            | Color space for color coordinate to rfl mapping.
            | Tip: Use linear space (e.g. 'xyz', 'Yuv',...) for (interp_type == 'nd'),
            |      and perceptually uniform space (e.g. 'ipt') for (interp_type == 'nearest')
        :cspace_tf:
            | {}, optional
            | Dict with parameters for xyz_to_cspace and cspace_to_xyz transform.
        :interp_type:
            | 'nd', optional
            | Options:
            | - 'nd': perform n-dimensional linear interpolation using Delaunay triangulation.
            | - 'nearest': perform nearest neighbour interpolation. 
        :k_neighbours:
            | 4 or int, optional
            | Number of nearest neighbours for reflectance spectrum interpolation.
            | Neighbours are found using scipy.spatial.cKDTree
        :verbosity:
            | 0, optional
            | If > 0: make a plot of the color coordinates of original and 
            | rendered image pixels.

    Returns:
        :returns: 
            | :rfl_est:
            | ndarrays with estimated reflectance spectra.
    """

    # get rfl set:
    if rfl is None:  # use IESTM30['4880'] set
        rfl = _CRI_RFL['ies-tm30']['4880']['5nm']

    wlr = rfl[0]

    # get Ref spd:
    if refspd is None:
        refspd = _CIE_ILLUMINANTS['D65'].copy()
    refspd = cie_interp(
        refspd, wlr,
        kind='linear')  # force spd to same wavelength range as rfl

    # Calculate rgb values of standard rfl set under refspd:
    if CSF is None:
        # Calculate lab coordinates:
        xyz_rr, xyz_wr = spd_to_xyz(refspd,
                                    relative=True,
                                    rfl=rfl,
                                    cieobs=cieobs,
                                    out=2)
        cspace_tf_copy = cspace_tf.copy()
        cspace_tf_copy[
            'xyzw'] = xyz_wr  # put correct white point in param. dict
        lab_rr = colortf(xyz_rr,
                         tf=cspace,
                         fwtf=cspace_tf_copy,
                         bwtf=cspace_tf_copy)[:, 0, :]
    else:
        # Calculate rgb coordinates from camera sensitivity functions
        rgb_rr = rfl_to_rgb(rfl, spd=refspd, CSF=CSF, wl=None)
        lab_rr = rgb_rr
        xyz = xyz
        lab_rr = np.round(lab_rr, _ROUNDING)  # speed up search

    # Convert xyz to lab-type values under refspd:
    if CSF is None:
        lab = colortf(xyz, tf=cspace, fwtf=cspace_tf_copy, bwtf=cspace_tf_copy)
    else:
        lab = xyz  # xyz contained rgb values !!!
        rgb = xyz
        lab = np.round(lab, _ROUNDING)  # speed up search

    if interp_type == 'nearest':
        # Find rfl (cfr. lab_rr) from rfl set that results in 'near' metameric
        # color coordinates for each value in lab_ur (i.e. smallest DE):
        # Construct cKDTree:
        tree = sp.spatial.cKDTree(lab_rr, copy_data=True)

        # Interpolate rfls using k nearest neightbours and inverse distance weigthing:
        d, inds = tree.query(lab, k=k_neighbours)
        if k_neighbours > 1:
            d += _EPS
            w = (1.0 / d**2)[:, :, None]  # inverse distance weigthing
            rfl_est = np.sum(w * rfl[inds + 1, :], axis=1) / np.sum(w, axis=1)
        else:
            rfl_est = rfl[inds + 1, :].copy()
    elif interp_type == 'nd':

        rfl_est = math.ndinterp1_scipy(lab_rr, rfl[1:], lab)

        _isnan = np.isnan(rfl_est[:, 0])

        if (
                _isnan.any()
        ):  #do nearest neigbour method for those that fail using Delaunay (i.e. ndinterp1_scipy)

            # Find rfl (cfr. lab_rr) from rfl set that results in 'near' metameric
            # color coordinates for each value in lab_ur (i.e. smallest DE):
            # Construct cKDTree:
            tree = sp.spatial.cKDTree(lab_rr, copy_data=True)

            # Interpolate rfls using k nearest neightbours and inverse distance weigthing:
            d, inds = tree.query(lab[_isnan, ...], k=k_neighbours)

            if k_neighbours > 1:
                d += _EPS
                w = (1.0 / d**2)[:, :, None]  # inverse distance weigthing
                rfl_est_isnan = np.sum(w * rfl[inds + 1, :], axis=1) / np.sum(
                    w, axis=1)
            else:
                rfl_est_isnan = rfl[inds + 1, :].copy()
            rfl_est[_isnan, :] = rfl_est_isnan

    else:
        raise Exception('xyz_to_rfl(): unsupported interp_type!')

    rfl_est[
        rfl_est <
        0] = 0  #can occur for points outside convexhull of standard rfl set.

    rfl_est = np.vstack((rfl[0], rfl_est))

    if ((verbosity > 0) | ('xyz_est' in out.split(',')) |
        ('lab_est' in out.split(',')) | ('DEi_ab' in out.split(',')) |
        ('DEa_ab' in out.split(','))) & (CSF is None):
        xyz_est, _ = spd_to_xyz(refspd,
                                rfl=rfl_est,
                                relative=True,
                                cieobs=cieobs,
                                out=2)
        cspace_tf_copy = cspace_tf.copy()
        cspace_tf_copy[
            'xyzw'] = xyz_wr  # put correct white point in param. dict
        lab_est = colortf(xyz_est, tf=cspace, fwtf=cspace_tf_copy)[:, 0, :]
        DEi_ab = np.sqrt(((lab_est[:, 1:3] - lab[:, 1:3])**2).sum(axis=1))
        DEa_ab = DEi_ab.mean()
    elif ((verbosity > 0) | ('xyz_est' in out.split(',')) |
          ('rgb_est' in out.split(',')) | ('DEi_rgb' in out.split(',')) |
          ('DEa_rgb' in out.split(','))) & (CSF is not None):
        rgb_est = rfl_to_rgb(rfl_est[1:], spd=refspd, CSF=CSF, wl=wlr)
        xyz_est = rgb_est
        DEi_rgb = np.sqrt(((rgb_est - rgb)**2).sum(axis=1))
        DEa_rgb = DEi_rgb.mean()

    if verbosity > 0:
        if CSF is None:
            ax = plot_color_data(lab[...,1], lab[...,2], z = lab[...,0], \
                            show = False, cieobs = cieobs, cspace = cspace, \
                            formatstr = 'ro', label = 'Original')
            plot_color_data(lab_est[...,1], lab_est[...,2], z = lab_est[...,0], \
                            show = True, axh = ax, cieobs = cieobs, cspace = cspace, \
                            formatstr = 'bd', label = 'Rendered')
        else:
            n = 100  #min(rfl.shape[0]-1,rfl_est.shape[0]-1)
            s = np.random.permutation(rfl.shape[0] -
                                      1)[:min(n, rfl.shape[0] - 1)]
            st = np.random.permutation(rfl_est.shape[0] -
                                       1)[:min(n, rfl_est.shape[0] - 1)]
            fig = plt.figure()
            ax = np.zeros((3, ), dtype=np.object)
            ax[0] = fig.add_subplot(131)
            ax[1] = fig.add_subplot(132)
            ax[2] = fig.add_subplot(133, projection='3d')
            ax[0].plot(rfl[0], rfl[1:][s].T, linestyle='-')
            ax[0].set_title('Original RFL set (random selection of all)')
            ax[0].set_ylim([0, 1])
            ax[1].plot(rfl_est[0], rfl_est[1:][st].T, linestyle='--')
            ax[0].set_title('Estimated RFL set (random selection of targets)')
            ax[1].set_ylim([0, 1])
            ax[2].plot(rgb[st, 0],
                       rgb[st, 1],
                       rgb[st, 2],
                       'ro',
                       label='Original')
            ax[2].plot(rgb_est[st, 0],
                       rgb_est[st, 1],
                       rgb_est[st, 2],
                       'bd',
                       label='Rendered')
            ax[2].legend()
    if out == 'rfl_est':
        return rfl_est
    elif out == 'rfl_est,xyz_est':
        return rfl_est, xyz_est
    else:
        return eval(out)
Ejemplo n.º 27
0
def get_superresolution_hsi(lrhsi,
                            hrci,
                            CSF,
                            wl=[380, 780, 1],
                            interp_type='nd',
                            k_neighbours=4,
                            verbosity=0):
    """ 
    Get a HighResolution HyperSpectral Image (super-resolution HSI) based on a LowResolution HSI and a HighResolution Color Image.
    
    Args:
        :lrhsi:
            | ndarray with LowResolution HSI [m,m,L].
        :hrci:
            | ndarray with HighResolution HSI [M,N,3].
        :CSF:
            | None, optional
            | ndarray with camera sensitivity functions 
            | If None: use Nikon D700
        :wl:
            | [380,780,1], optional
            | Wavelength range and spacing or ndarray with wavelengths of HSI image.
        :interp_type:
            | 'nd', optional
            | Options:
            | - 'nd': perform n-dimensional linear interpolation using Delaunay triangulation.
            | - 'nearest': perform nearest neighbour interpolation. 
        :k_neighbours:
            | 4 or int, optional
            | Number of nearest neighbours for reflectance spectrum interpolation.
            | Neighbours are found using scipy.spatial.cKDTree
        :verbosity:
            | 0, optional
            | Verbosity level for sub-call to render_image().
            | If > 0: make a plot of the color coordinates of original and 
            | rendered image pixels.
    Returns:
        :hrhsi:
            | ndarray with HighResolution HSI [M,N,L].
        
    Procedure:
        | Call render_image(hrci, rfl = lrhsi_2, CSF = ...) to estimate a hyperspectral image
        | from the high-resolution color image hrci with the reflectance spectra 
        | in the low-resolution hyper-spectral image as database for the estimation.
        | Estimation is done in raw RGB space with the lrhsi converted using the
        | camera sensitivity functions in CSF.
    """
    wlr = getwlr(wl)
    eew = np.vstack((wlr, np.ones_like(wlr)))
    lrhsi_2d = np.vstack(
        (wlr,
         np.reshape(lrhsi, (lrhsi.shape[0] * lrhsi.shape[1],
                            lrhsi.shape[2]))))  # create 2D rfl database
    if CSF is None: CSF = _CSF_NIKON_D700
    hrhsi = render_image(
        hrci,
        spd=eew,
        refspd=eew,
        rfl=lrhsi_2d,
        D=None,
        interp_type=interp_type,
        k_neighbours=k_neighbours,
        verbosity=verbosity,
        CSF=CSF)  # render HR-hsi from HR-ci using LR-HSI rfls as database
    return hrhsi
Ejemplo n.º 28
0
def spd_to_tm30(St):
    """
    Calculate tm30 measures from spd.
    """
    # calculate CIE 1931 2° white point xyz:
    xyzw_cct, _ = spd_to_xyz(St, cieobs='1931_2', relative=True, out=2)

    # calculate cct, duv:
    cct, duv = xyz_to_cct(xyzw_cct, cieobs='1931_2', out='cct,duv')

    # calculate ref illuminant:
    Sr = _cri_ref(cct, mix_range=[4000, 5000], cieobs='1931_2', wl3=St[0])

    # calculate CIE 1964 10° sample and white point xyz under test and ref. illuminants:
    xyz, xyzw = spd_to_xyz(np.vstack((St, Sr[1:])),
                           cieobs='1964_10',
                           rfl=_TM30_SAMPLE_SET,
                           relative=True,
                           out=2)
    N = St.shape[0] - 1

    xyzt, xyzr = xyz[:, :N, :], xyz[:, N:, :]
    xyzwt, xyzwr = xyzw[:N, :], xyzw[N:, :]

    # calculate CAM02-UCS coordinates
    # (standard conditions = {'La':100.0,'Yb':20.0,'surround':'avg','D':1.0):
    jabt = _xyz_to_jab_cam02ucs(xyzt, xyzw=xyzwt)
    jabr = _xyz_to_jab_cam02ucs(xyzr, xyzw=xyzwr)

    # calculate DEi, Rfi:
    DEi = (((jabt - jabr)**2).sum(axis=-1, keepdims=True)**0.5)[..., 0]
    Rfi = log_scale(DEi, scale_factor=[6.73])

    # calculate Rf
    DEa = DEi.mean(axis=0, keepdims=True)
    Rf = log_scale(DEa, scale_factor=[6.73])

    # calculate hue-bin data:
    hue_bin_data = _get_hue_bin_data(jabt, jabr, start_hue=0, nhbins=16)

    # calculate Rg:
    Rg = _hue_bin_data_to_rg(hue_bin_data)

    # calculate local color fidelity values, Rfhj,
    # local hue shift, Rhshj and local chroma shifts, Rcshj:
    Rcshj, Rhshj, Rfhj, DEhj = _hue_bin_data_to_Rxhj(hue_bin_data,
                                                     scale_factor=[6.73])

    # Fit ellipse to gamut shape of samples under test source:
    gamut_ellipse_fit = _hue_bin_data_to_ellipsefit(hue_bin_data)
    hue_bin_data['gamut_ellipse_fit'] = gamut_ellipse_fit

    # return output dict:
    return {
        'St': St,
        'Sr': Sr,
        'xyzw_cct': xyzw_cct,
        'xyzwt': xyzwt,
        'xyzwr': xyzwr,
        'xyzt': xyzt,
        'xyzr': xyzr,
        'cct': cct.T,
        'duv': duv.T,
        'jabt': jabt,
        'jabr': jabr,
        'DEi': DEi,
        'DEa': DEa,
        'Rfi': Rfi,
        'Rf': Rf,
        'hue_bin_data': hue_bin_data,
        'Rg': Rg,
        'DEhj': DEhj,
        'Rfhj': Rfhj,
        'Rcshj': Rcshj,
        'Rhshj': Rhshj,
        'hue_bin_data': hue_bin_data
    }
Ejemplo n.º 29
0
def render_image(img = None, spd = None, rfl = None, out = 'img_hyp', \
                 refspd = None, D = None, cieobs = _CIEOBS, \
                 cspace = 'xyz', cspace_tf = {}, CSF = None,\
                 interp_type = 'nd', k_neighbours = 4, show = True,
                 verbosity = 0, show_ref_img = True,\
                 stack_test_ref = 12,\
                 write_to_file = None):
    """
    Render image under specified light source spd.
    
    Args:
        :img: 
            | None or str or ndarray with float (max = 1) rgb image.
            | None load a default image.
        :spd: 
            | ndarray, optional
            | Light source spectrum for rendering
            | If None: use CIE illuminant F4
        :rfl: 
            | ndarray, optional
            | Reflectance set for color coordinate to rfl mapping.
        :out: 
            | 'img_hyp' or str, optional
            |  (other option: 'img_ren': rendered image under :spd:)
        :refspd:
            | None, optional
            | Reference spectrum for color coordinate to rfl mapping.
            | None defaults to D65 (srgb has a D65 white point)
        :D: 
            | None, optional
            | Degree of (von Kries) adaptation from spd to refspd. 
        :cieobs:
            | _CIEOBS, optional
            | CMF set for calculation of xyz from spectral data.
        :cspace:
            | 'xyz',  optional
            | Color space for color coordinate to rfl mapping.
            | Tip: Use linear space (e.g. 'xyz', 'Yuv',...) for (interp_type == 'nd'),
            |      and perceptually uniform space (e.g. 'ipt') for (interp_type == 'nearest')
        :cspace_tf:
            | {}, optional
            | Dict with parameters for xyz_to_cspace and cspace_to_xyz transform.
        :CSF:
            | None, optional
            | RGB camera response functions.
            | If None: input :xyz: contains raw rgb values. Override :cspace:
            | argument and perform estimation directly in raw rgb space!!!
        :interp_type:
            | 'nd', optional
            | Options:
            | - 'nd': perform n-dimensional linear interpolation using Delaunay triangulation.
            | - 'nearest': perform nearest neighbour interpolation. 
        :k_neighbours:
            | 4 or int, optional
            | Number of nearest neighbours for reflectance spectrum interpolation.
            | Neighbours are found using scipy.spatial.cKDTree
        :show: 
            | True, optional
            |  Show images.
        :verbosity:
            | 0, optional
            | If > 0: make a plot of the color coordinates of original and 
              rendered image pixels.
        :show_ref_img:
            | True, optional
            | True: shows rendered image under reference spd. False: shows
            |  original image.
        :write_to_file:
            | None, optional
            | None: do nothing, else: write to filename(+path) in :write_to_file:
        :stack_test_ref: 
            | 12, optional
            |   - 12: left (test), right (ref) format for show and imwrite
            |   - 21: top (test), bottom (ref)
            |   - 1: only show/write test
            |   - 2: only show/write ref
            |   - 0: show both, write test

    Returns:
        :returns: 
            | img_hyp, img_ren, 
            | ndarrays with float hyperspectral image and rendered images 
    """

    # Get image:
    #imread = lambda x: plt.imread(x) #matplotlib.pyplot

    if img is not None:
        if isinstance(img, str):
            img = plt.imread(img)  # use matplotlib.pyplot's imread
    else:
        img = plt.imread(_HYPSPCIM_DEFAULT_IMAGE)
    if isinstance(img, np.uint8):
        img = img / 255
    elif isinstance(img, np.uint16):
        img = img / (2**16 - 1)

    # Convert to 2D format:
    rgb = img.reshape(img.shape[0] * img.shape[1], 3)  # *1.0: make float
    rgb[rgb == 0] = _EPS  # avoid division by zero for pure blacks.

    # Get unique rgb values and positions:
    rgb_u, rgb_indices = np.unique(rgb, return_inverse=True, axis=0)

    # get rfl set:
    if rfl is None:  # use IESTM30['4880'] set
        rfl = _CRI_RFL['ies-tm30']['4880']['5nm']
    wlr = rfl[
        0]  # spectral reflectance set determines wavelength range for estimation (xyz_to_rfl())

    # get Ref spd:
    if refspd is None:
        refspd = _CIE_ILLUMINANTS['D65'].copy()
    refspd = cie_interp(
        refspd, wlr,
        kind='linear')  # force spd to same wavelength range as rfl

    # Convert rgb_u to xyz and lab-type values under assumed refspd:
    if CSF is None:
        xyz_wr = spd_to_xyz(refspd, cieobs=cieobs, relative=True)
        xyz_ur = colortf(rgb_u * 255, tf='srgb>xyz')
    else:
        xyz_ur = rgb_u  # for input in xyz_to_rfl (when CSF is not None: this functions assumes input is indeed rgb !!!)

    # Estimate rfl's for xyz_ur:
    rfl_est, xyzri = xyz_to_rfl(xyz_ur, rfl = rfl, out = 'rfl_est,xyz_est', \
                 refspd = refspd, D = D, cieobs = cieobs, \
                 cspace = cspace, cspace_tf = cspace_tf, CSF = CSF,\
                 interp_type = interp_type, k_neighbours = k_neighbours,
                 verbosity = verbosity)

    # Get default test spd if none supplied:
    if spd is None:
        spd = _CIE_ILLUMINANTS['F4']

    if CSF is None:
        # calculate xyz values under test spd:
        xyzti, xyztw = spd_to_xyz(spd, rfl=rfl_est, cieobs=cieobs, out=2)

        # Chromatic adaptation from test spd to refspd:
        if D is not None:
            xyzti = cat.apply(xyzti, xyzw1=xyztw, xyzw2=xyz_wr, D=D)

        # Convert xyzti under test spd to srgb:
        rgbti = colortf(xyzti, tf='srgb') / 255
    else:
        # Calculate rgb coordinates from camera sensitivity functions under spd:
        rgbti = rfl_to_rgb(rfl_est, spd=spd, CSF=CSF, wl=None)

        # Chromatic adaptation from test spd to refspd:
        if D is not None:
            white = np.ones_like(spd)
            white[0] = spd[0]
            rgbwr = rfl_to_rgb(white, spd=refspd, CSF=CSF, wl=None)
            rgbwt = rfl_to_rgb(white, spd=spd, CSF=CSF, wl=None)
            rgbti = cat.apply_vonkries2(rgbti,
                                        rgbwt,
                                        rgbwr,
                                        xyzw0=np.array([[1.0, 1.0, 1.0]]),
                                        in_='rgb',
                                        out_='rgb',
                                        D=1)

    # Reconstruct original locations for rendered image rgbs:
    img_ren = rgbti[rgb_indices]
    img_ren.shape = img.shape  # reshape back to 3D size of original
    img_ren = img_ren

    # For output:
    if show_ref_img == True:
        rgb_ref = colortf(xyzri, tf='srgb') / 255 if (
            CSF is None
        ) else xyzri  # if CSF not None: xyzri contains rgbri !!!
        img_ref = rgb_ref[rgb_indices]
        img_ref.shape = img.shape  # reshape back to 3D size of original
        img_str = 'Rendered (under ref. spd)'
        img = img_ref
    else:
        img_str = 'Original'
        img = img

    if (stack_test_ref > 0) | show == True:
        if stack_test_ref == 21:
            img_original_rendered = np.vstack(
                (img_ren, np.ones((4, img.shape[1], 3)), img))
            img_original_rendered_str = 'Rendered (under test spd)\n ' + img_str
        elif stack_test_ref == 12:
            img_original_rendered = np.hstack(
                (img_ren, np.ones((img.shape[0], 4, 3)), img))
            img_original_rendered_str = 'Rendered (under test spd) | ' + img_str
        elif stack_test_ref == 1:
            img_original_rendered = img_ren
            img_original_rendered_str = 'Rendered (under test spd)'
        elif stack_test_ref == 2:
            img_original_rendered = img
            img_original_rendered_str = img_str
        elif stack_test_ref == 0:
            img_original_rendered = img_ren
            img_original_rendered_str = 'Rendered (under test spd)'

    if write_to_file is not None:
        # Convert from RGB to BGR formatand write:
        #print('Writing rendering results to image file: {}'.format(write_to_file))
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            imsave(write_to_file, img_original_rendered)

    if show == True:
        # show images using pyplot.show():
        plt.figure()

        plt.imshow(img_original_rendered)
        plt.title(img_original_rendered_str)
        plt.gca().get_xaxis().set_ticklabels([])
        plt.gca().get_yaxis().set_ticklabels([])

        if stack_test_ref == 0:
            plt.figure()
            plt.imshow(img)
            plt.title(img_str)
            plt.axis('off')

    if 'img_hyp' in out.split(','):
        # Create hyper_spectral image:
        rfl_image_2D = rfl_est[
            rgb_indices +
            1, :]  # create array with all rfls required for each pixel
        img_hyp = rfl_image_2D.reshape(img.shape[0], img.shape[1],
                                       rfl_image_2D.shape[1])

    # Setup output:
    if out == 'img_hyp':
        return img_hyp
    elif out == 'img_ren':
        return img_ren
    else:
        return eval(out)
Ejemplo n.º 30
0
_HYPSPCIM_PATH = _PKG_PATH + _SEP + 'hypspcim' + _SEP
_HYPSPCIM_DEFAULT_IMAGE = _PKG_PATH + _SEP + 'toolboxes' + _SEP + 'hypspcim' + _SEP + 'data' + _SEP + 'testimage1.jpg'

_ROUNDING = 6  # to speed up xyz_to_rfl search algorithm

# Nikon D700 camera sensitivity functions:
_CSF_NIKON_D700 = np.vstack(
    (np.arange(400, 710, 10),
     np.array([[
         0.005, 0.007, 0.012, 0.015, 0.023, 0.025, 0.030, 0.026, 0.024, 0.019,
         0.010, 0.004, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
         0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
         0.000
     ],
               [
                   0.000, 0.000, 0.000, 0.000, 0.000, 0.001, 0.002, 0.003,
                   0.005, 0.007, 0.012, 0.013, 0.015, 0.016, 0.017, 0.020,
                   0.013, 0.011, 0.009, 0.005, 0.001, 0.001, 0.001, 0.001,
                   0.001, 0.001, 0.001, 0.001, 0.002, 0.002, 0.003
               ],
               [
                   0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
                   0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
                   0.001, 0.003, 0.010, 0.012, 0.013, 0.022, 0.020, 0.020,
                   0.018, 0.017, 0.016, 0.016, 0.014, 0.014, 0.013
               ]])[::-1]))


def xyz_to_rfl(xyz, CSF = None, rfl = None, out = 'rfl_est', \
                 refspd = None, D = None, cieobs = _CIEOBS, \
                 cspace = 'xyz', cspace_tf = {},\
                 interp_type = 'nd', k_neighbours = 4, verbosity = 0):