def plotcircle(center = np.array([0.,0.]),\ radii = np.arange(0,60,10), \ angles = np.arange(0,350,10),\ color = 'k',linestyle = '--', out = None): """ Plot one or more concentric circles. Args: :center: | np.array([0.,0.]) or ndarray with center coordinates, optional :radii: | np.arange(0,60,10) or ndarray with radii of circle(s), optional :angles: | np.arange(0,350,10) or ndarray with angles (°), optional :color: | 'k', optional | Color for plotting. :linestyle: | '--', optional | Linestyle of circles. :out: | None, optional | If None: plot circles, return (x,y) otherwise. """ xs = np.array([0]) ys = xs.copy() for ri in radii: x = ri*np.cos(angles*np.pi/180) y = ri*np.sin(angles*np.pi/180) xs = np.hstack((xs,x)) ys = np.hstack((ys,y)) if out != 'x,y': plt.plot(x,y,color = color, linestyle = linestyle) if out == 'x,y': return xs,ys
def histogram(a, bins=10, bin_center = False, range=None, normed=False, weights=None, density=None): """ Histogram function that can take as bins either the center (cfr. matlab hist) or bin-edges. Args: :bin_center: | False, optional | False: if :bins: int, str or sequence of scalars: | default to numpy.histogram (uses bin edges). | True: if :bins: is a sequence of scalars: | bins (containing centers) are transformed to edges | and nump.histogram is run. | Mimicks matlab hist (uses bin centers). Note: For other armuments and output, see ?numpy.histogram Returns: :returns: | ndarray with histogram """ if (isinstance(bins, list) | isinstance(bins, np.ndarray)) & (bin_center == True): if len(bins) == 1: edges = np.hstack((bins[0],np.inf)) else: centers = bins d = np.diff(centers)/2 edges = np.hstack((centers[0]-d[0], centers[:-1] + d, centers[-1] + d[-1])) edges[1:] = edges[1:] + np.finfo(float).eps return np.histogram(a, bins=edges, range=range, normed=normed, weights=weights, density=density) else: return np.histogram(a, bins=bins, range=range, normed=normed, weights=weights, density=density)
def v_to_cik(v, inverse=False): """ Calculate 2x2 '(covariance matrix)^-1' elements cik Args: :v: | (Nx5) np.ndarray | ellipse parameters [Rmax,Rmin,xc,yc,theta] :inverse: | If True: return inverse of cik. Returns: :cik: 'Nx2x2' (covariance matrix)^-1 Notes: | cik is not actually a covariance matrix, | only for a Gaussian or normal distribution! """ v = np.atleast_2d(v) g11 = (1 / v[:, 0] * np.cos(v[:, 4]))**2 + (1 / v[:, 1] * np.sin(v[:, 4]))**2 g22 = (1 / v[:, 0] * np.sin(v[:, 4]))**2 + (1 / v[:, 1] * np.cos(v[:, 4]))**2 g12 = (1 / v[:, 0]**2 - 1 / v[:, 1]**2) * np.sin(v[:, 4]) * np.cos(v[:, 4]) cik = np.zeros((g11.shape[0], 2, 2)) for i in range(g11.shape[0]): cik[i, :, :] = np.vstack((np.hstack( (g11[i], g12[i])), np.hstack((g12[i], g22[i])))) if inverse == True: cik[i, :, :] = np.linalg.inv(cik[i, :, :]) return cik
def fit_ellipse(xy): """ Fit an ellipse to supplied data points. Args: :xy: | coordinates of points to fit (Nx2 array) Returns: :v: | vector with ellipse parameters [Rmax,Rmin, xc,yc, theta] """ # remove centroid: center = xy.mean(axis=0) xy = xy - center # Fit ellipse: x, y = xy[:, 0:1], xy[:, 1:2] D = np.hstack((x * x, x * y, y * y, x, y, np.ones_like(x))) S, C = np.dot(D.T, D), np.zeros([6, 6]) C[0, 2], C[2, 0], C[1, 1] = 2, 2, -1 U, s, V = np.linalg.svd(np.dot(np.linalg.inv(S), C)) e = U[:, 0] # get ellipse axis lengths, center and orientation: b, c, d, f, g, a = e[1] / 2, e[2], e[3] / 2, e[4] / 2, e[5], e[0] # get ellipse center: num = b * b - a * c xc = ((c * d - b * f) / num) + center[0] yc = ((a * f - b * d) / num) + center[1] # get ellipse orientation: theta = np.arctan2(np.array(2 * b), np.array((a - c))) / 2 # axis lengths: up = 2 * (a * f * f + c * d * d + g * b * b - 2 * b * d * f - a * c * g) down1 = (b * b - a * c) * ((c - a) * np.sqrt(1 + 4 * b * b / ((a - c) * (a - c))) - (c + a)) down2 = (b * b - a * c) * ((a - c) * np.sqrt(1 + 4 * b * b / ((a - c) * (a - c))) - (c + a)) a, b = np.sqrt(up / down1), np.sqrt(up / down2) # assert that a is the major axis (otherwise swap and correct angle) if (b > a): b, a = a, b # ensure the angle is betwen 0 and 2*pi theta = fmod(theta, 2.0 * np.pi) return np.hstack((a, b, xc, yc, theta))
def plot(v, origin=None, ax=None, color='k', marker='.', linestyle='-', **kwargs): """ Plot a vector from origin. Args: :v: | vec3 vector. :origin: | vec3 vector with same size attributes as in :v:. :ax: | None, optional | axes handle. | If None, create new figure with axes ax. :color: | 'k', optional | color specifier. :marker: | '.', optional | marker specifier. :linestyle: | '-', optional | linestyle specifier :**kwargs: | other keyword specifiers for plot. Returns: :ax: | handle to figure axes. """ if ax is None: fig = plt.figure() ax = fig.add_subplot(111, projection='3d') if origin is None: origin = vec3(np.zeros(v.x.shape), np.zeros(v.x.shape), np.zeros(v.x.shape)) ax.plot(np.hstack([origin.x, v.x]), np.hstack([origin.y, v.y]), np.hstack([origin.z, v.z]), color=color, marker=marker, **kwargs) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') return ax
def join(self, data): """ Join data along last axis and return instance. """ if data[0].ndim == 2: #faster implementation self.value = np.transpose( np.concatenate(data, axis=0).reshape((np.hstack( (len(data), data[0].shape)))), (1, 2, 0)) elif data[0].ndim == 1: self.value = np.concatenate(data, axis=0).reshape((np.hstack( (len(data), data[0].shape)))).T else: self.value = np.hstack(data)[0] return self
def vec_to_dict(vec=None, dic={}, vsize=None, keys=None): """ Convert dict to vec and vice versa. Args: :vec: | list or vector array, optional :dic: | dict, optional :vsize: | list or vector array with size of values of dict, optional :keys: | list or vector array with keys in dict (must be provided). Returns: :returns: | x, vsize | x is an array, if vec is None | x is a dict, if vec is not None """ if vec is not None: # Put values in vec in dic: n = 0 # keeps track of length already read from x for i, v in enumerate(keys): dic[v] = vec[n + np.arange(vsize[i])] n += dic[v].shape[0] return dic, vsize else: # Put values of keys in dic in vec: vec = [] vsize = [] for i, v in enumerate(keys): vec = np.hstack((vec, dic[v])) vsize.append(dic[v].shape[0]) return vec, vsize
def apply_poly_model_at_hue_x(poly_model, pmodel, dCHoverC_res, \ hx = None, Cxr = 40, sig = _VF_SIG): """ Applies base color shift model at (hue,chroma) coordinates Args: :poly_model: | function handle to model :pmodel: | ndarray with model parameters. :dCHoverC_res: | ndarray with residuals between 'dCoverC,dH' of samples | and 'dCoverC,dH' predicted by the model. | Note: dCoverC = (Ct - Cr)/Cr and dH = ht - hr | (predicted from model, see notes luxpy.cri.get_poly_model()) :hx: | None or ndarray, optional | None defaults to np.arange(np.pi/10.0,2*np.pi,2*np.pi/10.0) :Cxr: | 40, optional :sig: | _VF_SIG or float, optional | Determines smooth transition between hue-bin-boundaries (no hard cutoff at hue bin boundary). Returns: :returns: | ndarrays with dCoverC_x, dCoverC_x_sig, dH_x, dH_x_sig | Note '_sig' denotes the uncertainty: | e.g. dH_x_sig is the uncertainty of dH at input (hue/chroma). """ if hx is None: dh = 2*np.pi/10.0; hx = np.arange(dh/2,2*np.pi,dh) #hue angles at which to apply model, i.e. calculate 'average' measures # A calculate reference coordinates: axr = Cxr*np.cos(hx) bxr = Cxr*np.sin(hx) # B apply model at reference coordinates to obtain test coordinates: axt,bxt,Cxt,hxt,axr,bxr,Cxr,hxr = apply_poly_model_at_x(poly_model, pmodel,axr,bxr) # C Calculate dC/C, dH for test and ref at fixed hues: dCoverC_x = (Cxt-Cxr)/(np.hstack((Cxr+Cxt)).max()) dH_x = (180/np.pi)*(hxt-hxr) # dCoverC_x = np.round(dCoverC_x,decimals = 2) # dH_x = np.round(dH_x,decimals = 0) # D calculate 'average' noise measures using sig-value: href = dCHoverC_res[:,0:1] dCoverC_res = dCHoverC_res[:,1:2] dHoverC_res = dCHoverC_res[:,2:3] dHsigi = np.exp((np.dstack((np.abs(hx-href),np.abs((hx-href-2*np.pi)),np.abs(hx-href-2*np.pi))).min(axis=2)**2)/(-2)/sig) dH_x_sig = (180/np.pi)*(np.sqrt((dHsigi*(dHoverC_res**2)).sum(axis=0,keepdims=True)/dHsigi.sum(axis=0,keepdims=True))) #dH_x_sig_avg = np.sqrt(np.sum(dH_x_sig**2,axis=1)/hx.shape[0]) dCoverC_x_sig = (np.sqrt((dHsigi*(dCoverC_res**2)).sum(axis=0,keepdims=True)/dHsigi.sum(axis=0,keepdims=True))) #dCoverC_x_sig_avg = np.sqrt(np.sum(dCoverC_x_sig**2,axis=1)/hx.shape[0]) return dCoverC_x, dCoverC_x_sig, dH_x, dH_x_sig
def ajoin(data): """ Join data on last axis. Args: :data: | tuple (ndarray, ndarray, ...) Returns: :returns: | ndarray (shape[-1] is equal to tuple length) """ if data[0].ndim == 2: #faster implementation return np.transpose(np.concatenate(data,axis=0).reshape((np.hstack((len(data),data[0].shape)))),(1,2,0)) elif data[0].ndim == 1: return np.concatenate(data,axis=0).reshape((np.hstack((len(data),data[0].shape)))).T else: return np.hstack(data)[0]
def _complete_ies_lid(IES, lamp_h_type='TYPE90'): """ Convert IES LID map with lamp_h_type symmetry to a 'full' map with phi: [0,360] and theta: [0,180]. """ # Create full theta (0-180) and phi (0-360) sets IES['theta'] = IES['v_angs'] if IES['lamp_h_type'] == 'TYPE90': IES['values'] = np.matlib.repmat(IES['candela_2d'], 4, 1) IES['phi'] = np.hstack((IES['h_angs'], IES['h_angs'] + 90, IES['h_angs'] + 180, IES['h_angs'] + 270)) elif IES['lamp_h_type'] == 'TYPE180': IES['values'] = np.matlib.repmat(IES['candela_2d'], 2, 1) IES['phi'] = np.hstack((IES['h_angs'], IES['h_angs'] + 180)) else: IES['values'] = IES['candela_2d'] IES['phi'] = IES['h_angs'] IES['map']['thetas'] = IES['theta'] IES['map']['phis'] = IES['phi'] IES['map']['values'] = IES['values'] return IES
def xtransform(x, params): """ Converts unconstrained variables into their original domains. """ xtrans = np.zeros((params['n'])) # k allows some variables to be fixed, thus dropped from the optimization. k = 0 for i in np.arange(params['n']): if params['BoundClass'][i] == 1: # lower bound only xtrans[i] = params['LB'][i] + x[k]**2 elif params['BoundClass'][i] == 2: # upper bound only xtrans[i] = params['UB'][i] - x[k]**2 elif params['BoundClass'][i] == 3: # lower and upper bounds xtrans[i] = (np.sin(x[k]) + 1) / 2 xtrans[i] = xtrans[i] * (params['UB'][i] - params['LB'][i]) + params['LB'][i] # just in case of any floating point problems xtrans[i] = np.hstack( (params['LB'][i], np.hstack( (params['UB'][i], xtrans[i])).min())).max() elif params['BoundClass'][i] == 4: # fixed variable, bounds are equal, set it at either bound xtrans[i] = params['LB'][i] elif params['BoundClass'][i] == 0: # unconstrained variable. xtrans[i] = x[k] if params['BoundClass'][i] != 4: k += 1 return xtrans
def crowdingdistance(F): """ Computes the crowding distance of a nondominated front. The crowding distance gives a measure of how close the individuals are with regard to its neighbors. The higher this value, the greater the spacing. This is used to promote better diversity in the population. Args: F: | an m x mu ndarray with mu individuals and m objectives Returns: cdist: | a m-length column vector """ m, mu = F.shape #gets the size of F if mu == 2: cdist = np.vstack((np.inf, np.inf)) return cdist #[Fs, Is] = sort(F,2); #sorts the objectives by individuals Is = F.argsort(axis=1) Fs = np.sort(F, axis=1) # Creates the numerator C = Fs[:, 2:] - Fs[:, :-2] C = np.hstack((np.inf * np.ones((m, 1)), C, np.inf * np.ones( (m, 1)))) #complements with inf in the extremes # Indexing to permute the C matrix in the right ordering Aux = np.arange(m).repeat(mu).reshape(m, mu) ind = np.ravel_multi_index( (Aux.flatten(), Is.flatten()), (m, mu )) #converts to lin. indexes # ind = sub2ind([m, mu], Aux(:), Is(:)); C2 = C.flatten().copy() C2[ind] = C2.flatten() C = C2.reshape((m, mu)) # Constructs the denominator den = np.repeat((Fs[:, -1] - Fs[:, 0])[:, None], mu, axis=1) # Calculates the crowding distance cdist = (C / den).sum(axis=0) cdist = cdist.flatten() #assures a column vector return cdist
def getUSCensusAgeDist(): """ Get US Census Age Distribution """ t_num = _INDVCMF_DATA['USCensus2010population'] list_AgeCensus = t_num[0] freq_AgeCensus = np.round( t_num[1] / 1000 ) # Reduce # of populations to manageable number, this doesn't change probability # Remove age < 10 and 70 < age: freq_AgeCensus[:10] = 0 freq_AgeCensus[71:] = 0 list_Age = [] for k in range(len(list_AgeCensus)): list_Age = np.hstack( (list_Age, np.repeat(list_AgeCensus[k], freq_AgeCensus[k]))) return list_Age
def calculate_lut(ccts=None, cieobs=None, add_to_lut=True): """ Function that calculates LUT for the ccts stored in ./data/cctluts/cct_lut_cctlist.dat or given as input argument. Calculation is performed for CMF set specified in cieobs. Adds a new (temprorary) field to the _CCT_LUT dict. Args: :ccts: | ndarray or str, optional | list of ccts for which to (re-)calculate the LUTs. | If str, ccts contains path/filename.dat to list. :cieobs: | None or str, optional | str specifying cmf set. Returns: :returns: | ndarray with cct and duv. Note: Function changes the global variable: _CCT_LUT! """ if ccts is None: ccts = getdata('{}cct_lut_cctlist.dat'.format(_CCT_LUT_PATH)) elif isinstance(ccts, str): ccts = getdata(ccts) Yuv = np.ones((ccts.shape[0], 2)) * np.nan for i, cct in enumerate(ccts): Yuv[i, :] = xyz_to_Yuv( spd_to_xyz(blackbody(cct, wl3=[360, 830, 1]), cieobs=cieobs))[:, 1:3] u = Yuv[:, 0, None] # get CIE 1960 u v = (2.0 / 3.0) * Yuv[:, 1, None] # get CIE 1960 v cctuv = np.hstack((ccts, u, v)) if add_to_lut == True: _CCT_LUT[cieobs] = cctuv return cctuv
def dtlz_range(fname, M): """ Returns the decision range of a DTLZ function The range is simply [0,1] for all variables. What varies is the number of decision variables in each problem. The equation for that is n = (M-1) + k wherein k = 5 for DTLZ1, 10 for DTLZ2-6, and 20 for DTLZ7. Args: :fname: | a string with the name of the function ('dtlz1', 'dtlz2' etc.) :M: | a scalar with the number of objectives Returns: :lim: | a n x 2 matrix wherein the first column is the lower limit (0), and the second column, the upper limit of search (1) """ #Checks if the string has or not the prefix 'dtlz', or if the number later #is greater than 7: fname = fname.lower() if (len(fname) < 5) or (fname[:4] != 'dtlz') or (float(fname[4]) > 7): raise Exception( 'Sorry, the function {:s} is not implemented.'.format(fname)) # If the name is o.k., defines the value of k if fname == 'dtlz1': k = 5 elif fname == 'dtlz7': k = 20 else: #any other function k = 10 n = (M - 1) + k #number of decision variables lim = np.hstack((np.zeros((n, 1)), np.ones((n, 1)))) return lim
def xyz_to_Ydlep(xyz, cieobs=_CIEOBS, xyzw=_COLORTF_DEFAULT_WHITE_POINT, flip_axes=False, **kwargs): """ Convert XYZ tristimulus values to Y, dominant (complementary) wavelength and excitation purity. Args: :xyz: | ndarray with tristimulus values :xyzw: | None or ndarray with tristimulus values of a single (!) native white point, optional | None defaults to xyz of CIE D65 using the :cieobs: observer. :cieobs: | luxpy._CIEOBS, optional | CMF set to use when calculating spectrum locus coordinates. :flip_axes: | False, optional | If True: flip axis 0 and axis 1 in Ydelep to increase speed of loop in function. | (single xyzw with is not flipped!) Returns: :Ydlep: | ndarray with Y, dominant (complementary) wavelength and excitation purity """ xyz3 = np3d(xyz).copy().astype(np.float) # flip axis so that shortest dim is on axis0 (save time in looping): if (xyz3.shape[0] < xyz3.shape[1]) & (flip_axes == True): axes12flipped = True xyz3 = xyz3.transpose((1, 0, 2)) else: axes12flipped = False # convert xyz to Yxy: Yxy = xyz_to_Yxy(xyz3) Yxyw = xyz_to_Yxy(xyzw) # get spectrum locus Y,x,y and wavelengths: SL = _CMF[cieobs]['bar'] wlsl = SL[0] Yxysl = xyz_to_Yxy(SL[1:4].T)[:, None] # center on xyzw: Yxy = Yxy - Yxyw Yxysl = Yxysl - Yxyw Yxyw = Yxyw - Yxyw #split: Y, x, y = asplit(Yxy) Yw, xw, yw = asplit(Yxyw) Ysl, xsl, ysl = asplit(Yxysl) # calculate hue: h = math.positive_arctan(x, y, htype='deg') hsl = math.positive_arctan(xsl, ysl, htype='deg') hsl_max = hsl[0] # max hue angle at min wavelength hsl_min = hsl[-1] # min hue angle at max wavelength dominantwavelength = np.empty(Y.shape) purity = np.empty(Y.shape) for i in range(xyz3.shape[1]): # find index of complementary wavelengths/hues: pc = np.where( (h[:, i] >= hsl_max) & (h[:, i] <= hsl_min + 360.0) ) # hue's requiring complementary wavelength (purple line) h[:, i][pc] = h[:, i][pc] - np.sign( h[:, i][pc] - 180.0 ) * 180.0 # add/subtract 180° to get positive complementary wavelength # find 2 closest hues in sl: #hslb,hib = meshblock(hsl,h[:,i:i+1]) hib, hslb = np.meshgrid(h[:, i:i + 1], hsl) dh = np.abs(hslb - hib) q1 = dh.argmin(axis=0) # index of closest hue dh[q1] = 1000.0 q2 = dh.argmin(axis=0) # index of second closest hue dominantwavelength[:, i] = wlsl[q1] + np.divide( np.multiply((wlsl[q2] - wlsl[q1]), (h[:, i] - hsl[q1, 0])), (hsl[q2, 0] - hsl[q1, 0]) ) # calculate wl corresponding to h: y = y1 + (y2-y1)*(x-x1)/(x2-x1) dominantwavelength[:, i][pc] = -dominantwavelength[:, i][ pc] #complementary wavelengths are specified by '-' sign # calculate excitation purity: x_dom_wl = xsl[q1, 0] + (xsl[q2, 0] - xsl[q1, 0]) * (h[:, i] - hsl[ q1, 0]) / (hsl[q2, 0] - hsl[q1, 0]) # calculate x of dom. wl y_dom_wl = ysl[q1, 0] + (ysl[q2, 0] - ysl[q1, 0]) * (h[:, i] - hsl[ q1, 0]) / (hsl[q2, 0] - hsl[q1, 0]) # calculate y of dom. wl d_wl = (x_dom_wl**2.0 + y_dom_wl**2.0)**0.5 # distance from white point to sl d = (x[:, i]**2.0 + y[:, i]**2.0)**0.5 # distance from white point to test point purity[:, i] = d / d_wl # correct for those test points that have a complementary wavelength # calculate intersection of line through white point and test point and purple line: xy = np.vstack((x[:, i], y[:, i])).T xyw = np.hstack((xw, yw)) xypl1 = np.hstack((xsl[0, None], ysl[0, None])) xypl2 = np.hstack((xsl[-1, None], ysl[-1, None])) da = (xy - xyw) db = (xypl2 - xypl1) dp = (xyw - xypl1) T = np.array([[0.0, -1.0], [1.0, 0.0]]) dap = np.dot(da, T) denom = np.sum(dap * db, axis=1, keepdims=True) num = np.sum(dap * dp, axis=1, keepdims=True) xy_linecross = (num / denom) * db + xypl1 d_linecross = np.atleast_2d( (xy_linecross[:, 0]**2.0 + xy_linecross[:, 1]**2.0)**0.5).T #[0] purity[:, i][pc] = d[pc] / d_linecross[pc][:, 0] Ydlep = np.dstack((xyz3[:, :, 1], dominantwavelength, purity)) if axes12flipped == True: Ydlep = Ydlep.transpose((1, 0, 2)) else: Ydlep = Ydlep.transpose((0, 1, 2)) return Ydlep.reshape(xyz.shape)
def get_poly_model(jabt, jabr, modeltype = _VF_MODEL_TYPE): """ Setup base color shift model (delta_a, delta_b), determine model parameters and accuracy. | Calculates a base color shift (delta) from the ref. chromaticity ar, br. Args: :jabt: | ndarray with jab color coordinates under the test SPD. :jabr: | ndarray with jab color coordinates under the reference SPD. :modeltype: | _VF_MODEL_TYPE or 'M6' or 'M5', optional | Specifies degree 5 or degree 6 polynomial model in ab-coordinates. (see notes below) Returns: :returns: | (poly_model, | pmodel, | dab_model, | dab_res, | dCHoverC_res, | dab_std, | dCHoverC_std) | | :poly_model: function handle to model | :pmodel: ndarray with model parameters | :dab_model: ndarray with ab model predictions from ar, br. | :dab_res: ndarray with residuals between 'da,db' of samples and | 'da,db' predicted by the model. | :dCHoverC_res: ndarray with residuals between 'dCoverC,dH' | of samples and 'dCoverC,dH' predicted by the model. | Note: dCoverC = (Ct - Cr)/Cr and dH = ht - hr | (predicted from model, see notes below) | :dab_std: ndarray with std of :dab_res: | :dCHoverC_std: ndarray with std of :dCHoverC_res: Notes: 1. Model types: | poly5_model = lambda a,b,p: p[0]*a + p[1]*b + p[2]*(a**2) + p[3]*a*b + p[4]*(b**2) | poly6_model = lambda a,b,p: p[0] + p[1]*a + p[2]*b + p[3]*(a**2) + p[4]*a*b + p[5]*(b**2) 2. Calculation of dCoverC and dH: | dCoverC = (np.cos(hr)*da + np.sin(hr)*db)/Cr | dHoverC = (np.cos(hr)*db - np.sin(hr)*da)/Cr """ at = jabt[...,1] bt = jabt[...,2] ar = jabr[...,1] br = jabr[...,2] # A. Calculate da, db: da = at - ar db = bt - br # B.1 Calculate model matrix: # 5-parameter model: M5 = np.array([[np.sum(ar*ar), np.sum(ar*br), np.sum(ar*ar**2),np.sum(ar*ar*br),np.sum(ar*br**2)], [np.sum(br*ar), np.sum(br*br), np.sum(br*ar**2),np.sum(br*ar*br),np.sum(br*br**2)], [np.sum((ar**2)*ar), np.sum((ar**2)*br), np.sum((ar**2)*ar**2),np.sum((ar**2)*ar*br),np.sum((ar**2)*br**2)], [np.sum(ar*br*ar), np.sum(ar*br*br), np.sum(ar*br*ar**2),np.sum(ar*br*ar*br),np.sum(ar*br*br**2)], [np.sum((br**2)*ar), np.sum((br**2)*br), np.sum((br**2)*ar**2),np.sum((br**2)*ar*br),np.sum((br**2)*br**2)]]) #6-parameters model M6 = np.array([[ar.size,np.sum(1.0*ar), np.sum(1.0*br), np.sum(1.0*ar**2),np.sum(1.0*ar*br),np.sum(1.0*br**2)], [np.sum(ar*1.0),np.sum(ar*ar), np.sum(ar*br), np.sum(ar*ar**2),np.sum(ar*ar*br),np.sum(ar*br**2)], [np.sum(br*1.0),np.sum(br*ar), np.sum(br*br), np.sum(br*ar**2),np.sum(br*ar*br),np.sum(br*br**2)], [np.sum((ar**2)*1.0),np.sum((ar**2)*ar), np.sum((ar**2)*br), np.sum((ar**2)*ar**2),np.sum((ar**2)*ar*br),np.sum((ar**2)*br**2)], [np.sum(ar*br*1.0),np.sum(ar*br*ar), np.sum(ar*br*br), np.sum(ar*br*ar**2),np.sum(ar*br*ar*br),np.sum(ar*br*br**2)], [np.sum((br**2)*1.0),np.sum((br**2)*ar), np.sum((br**2)*br), np.sum((br**2)*ar**2),np.sum((br**2)*ar*br),np.sum((br**2)*br**2)]]) # B.2 Define model function: poly5_model = lambda a,b,p: p[0]*a + p[1]*b + p[2]*(a**2) + p[3]*a*b + p[4]*(b**2) poly6_model = lambda a,b,p: p[0] + p[1]*a + p[2]*b + p[3]*(a**2) + p[4]*a*b + p[5]*(b**2) if modeltype == 'M5': M = M5 poly_model = poly5_model else: M = M6 poly_model = poly6_model M = np.linalg.inv(M) # C.1 Data a,b analysis output: if modeltype == 'M5': da_model_parameters = np.dot(M, np.array([np.sum(da*ar), np.sum(da*br), np.sum(da*ar**2),np.sum(da*ar*br),np.sum(da*br**2)])) db_model_parameters = np.dot(M, np.array([np.sum(db*ar), np.sum(db*br), np.sum(db*ar**2),np.sum(db*ar*br),np.sum(db*br**2)])) else: da_model_parameters = np.dot(M, np.array([np.sum(da*1.0),np.sum(da*ar), np.sum(da*br), np.sum(da*ar**2),np.sum(da*ar*br),np.sum(da*br**2)])) db_model_parameters = np.dot(M, np.array([np.sum(db*1.0),np.sum(db*ar), np.sum(db*br), np.sum(db*ar**2),np.sum(db*ar*br),np.sum(db*br**2)])) pmodel = np.vstack((da_model_parameters,db_model_parameters)) # D.1 Calculate model da, db: da_model = poly_model(ar,br,pmodel[0]) db_model = poly_model(ar,br,pmodel[1]) dab_model = np.hstack((da_model,db_model)) # D.2 Calculate residuals for da & db: da_res = da - da_model db_res = db - db_model dab_res = np.hstack((da_res,db_res)) dab_std = np.vstack((np.std(da_res,axis=0),np.std(db_res,axis=0))) # E Calculate href, Cref: href = np.arctan2(br,ar) Cref = (ar**2 + br**2)**0.5 # F Calculate dC/C, dH/C for data and model and calculate residuals: dCoverC = (np.cos(href)*da + np.sin(href)*db)/Cref dHoverC = (np.cos(href)*db - np.sin(href)*da)/Cref dCoverC_model = (np.cos(href)*da_model + np.sin(href)*db_model)/Cref dHoverC_model = (np.cos(href)*db_model - np.sin(href)*da_model)/Cref dCoverC_res = dCoverC - dCoverC_model dHoverC_res = dHoverC - dHoverC_model dCHoverC_std = np.vstack((np.std(dCoverC_res,axis = 0),np.std(dHoverC_res,axis = 0))) dCHoverC_res = np.hstack((href,dCoverC_res,dHoverC_res)) return poly_model, pmodel, dab_model, dab_res, dCHoverC_res, dab_std, dCHoverC_std
def generate_grid(jab_ranges = None, out = 'grid', \ ax = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR),\ bx = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR), \ jx = None, limit_grid_radius = 0): """ Generate a grid of color coordinates. Args: :out: | 'grid' or 'vectors', optional | - 'grid': outputs a single 2d numpy.nd-vector with the grid coordinates | - 'vector': outputs each dimension seperately. :jab_ranges: | None or ndarray, optional | Specifies the pixelization of color space. (ndarray.shape = (3,3), with first axis: J,a,b, and second axis: min, max, delta) :ax: | default ndarray or user defined ndarray, optional | default = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR) :bx: | default ndarray or user defined ndarray, optional | default = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR) :jx: | None, optional | Note that not-None :jab_ranges: override :ax:, :bx: and :jx input. :limit_grid_radius: | 0, optional | A value of zeros keeps grid as specified by axr,bxr. | A value > 0 only keeps (a,b) coordinates within :limit_grid_radius: Returns: :returns: | single ndarray with ax,bx [,jx] | or | seperate ndarrays for each dimension specified. """ # generate grid from jab_ranges array input, otherwise use ax, bx, jx input: if jab_ranges is not None: if jab_ranges.shape[0] == 3: jx = np.arange(jab_ranges[0][0],jab_ranges[0][1],jab_ranges[0][2]) ax = np.arange(jab_ranges[1][0],jab_ranges[1][1],jab_ranges[1][2]) bx = np.arange(jab_ranges[2][0],jab_ranges[2][1],jab_ranges[2][2]) else: jx = None ax = np.arange(jab_ranges[0][0],jab_ranges[0][1],jab_ranges[0][2]) bx = np.arange(jab_ranges[1][0],jab_ranges[1][1],jab_ranges[1][2]) # Generate grid from (jx), ax, bx: Ax,Bx = np.meshgrid(ax,bx) grid = np.dstack((Ax,Bx)) grid = np.reshape(grid,(np.array(grid.shape[:-1]).prod(),grid.ndim-1)) if jx is not None: for i,v in enumerate(jx): gridi = np.hstack((np.ones((grid.shape[0],1))*v,grid)) if i == 0: gridwithJ = gridi else: gridwithJ = np.vstack((gridwithJ,gridi)) grid = gridwithJ if jx is None: ax = grid[:,0:1] bx = grid[:,1:2] else: jx = grid[:,0:1] ax = grid[:,1:2] bx = grid[:,2:3] if limit_grid_radius > 0:# limit radius of grid: Cr = (ax**2+bx**2)**0.5 ax = ax[Cr<=limit_grid_radius,None] bx = bx[Cr<=limit_grid_radius,None] if jx is not None: jx = jx[Cr<=limit_grid_radius,None] # create output: if out == 'grid': if jx is None: return np.hstack((ax,bx)) else: return np.hstack((jx,ax,bx)) else: if jx is None: return ax, bx else: return jx, ax, bx
def plot_shift_data(data, fieldtype = 'vectorfield', scalef = _VF_MAXR, color = 'k', \ axtype = 'polar', ax = None, \ hbins = 10, start_hue = 0.0, bin_labels = '#', plot_center_lines = True, \ plot_axis_labels = False, plot_edge_lines = False, plot_bin_colors = True, \ force_CVG_layout = True): """ Plots vector or circle fields generated by VFcolorshiftmodel() or PXcolorshiftmodel(). Args: :data: | dict generated by VFcolorshiftmodel() or PXcolorshiftmodel() | Must contain 'fielddata'- key, which is a dict with possible keys: | - key: 'vectorfield': ndarray with vector field data | - key: 'circlefield': ndarray with circle field data :color: | 'k', optional | Color for plotting the vector-fields. :axtype: | 'polar' or 'cart', optional | Make polar or Cartesian plot. :ax: | None or 'new' or 'same', optional | - None or 'new' creates new plot | - 'same': continue plot on same axes. | - axes handle: plot on specified axes. :hbins: | 16 or ndarray with sorted hue bin centers (°), optional :start_hue: | _VF_MAXR, optional | Scale factor for graphic. :plot_axis_labels: | False, optional | Turns axis ticks on/off (True/False). :bin_labels: | None or list[str] or '#', optional | Plots labels at the bin center hues. | - None: don't plot. | - list[str]: list with str for each bin. | (len(:bin_labels:) = :nhbins:) | - '#': plots number. :plot_edge_lines: | True or False, optional | Plot grey bin edge lines with '--'. :plot_center_lines: | False or True, optional | Plot colored lines at 'center' of hue bin. :plot_bin_colors: | True, optional | Colorize hue-bins. :force_CVG_layout: | False or True, optional | True: Force plot of basis of CVG. Returns: :returns: | figCVG, hax, cmap | :figCVG: handle to CVG figure | :hax: handle to CVG axes | :cmap: list with rgb colors for hue bins | (for use in other plotting fcns) """ # Plot basis of CVG: figCVG, hax, cmap = plot_hue_bins(hbins = hbins, axtype = axtype, ax = ax, plot_center_lines = plot_center_lines, plot_edge_lines = plot_edge_lines, plot_bin_colors = plot_bin_colors, scalef = scalef, force_CVG_layout = force_CVG_layout, bin_labels = bin_labels) # plot vector field: if data is not None: if fieldtype is not None: vf = data['fielddata'][fieldtype] if axtype == 'polar': if fieldtype == 'vectorfield': vfrtheta = math.positive_arctan(vf['axr'], vf['bxr'],htype = 'rad') vfrr = np.sqrt(vf['axr']**2 + vf['bxr']**2) hax.quiver(vfrtheta, vfrr, vf['axt'] - vf['axr'], vf['bxt'] - vf['bxr'], headlength=3,color = color,angles='uv', scale_units='y', scale = 2,linewidth = 0.5) else: vfttheta = math.positive_arctan(vf['axt'], vf['bxt'],htype = 'rad') vfrtheta = math.positive_arctan(vf['axr'], vf['bxr'],htype = 'rad') vftr = np.sqrt(vf['axt']**2 + vf['bxt']**2) dh = (math.angle_v1v2(np.hstack((vf['axt'],vf['bxt'])),np.hstack((vf['axr'],vf['bxr'])),htype='deg')[:,None]) #hue shift dh = dh/np.nanmax(dh) plt.set_cmap('jet') hax.scatter(vfttheta, vftr, s = 100*dh, c = dh, linestyle = 'None', marker = 'o',norm = None) hax.set_ylim([0, 1.1*scalef]) else: if fieldtype == 'vectorfield': hax.quiver(vf['axr'], vf['bxr'], vf['axt'] - vf['axr'], vf['bxt'] - vf['bxr'], headlength=1,color = color,angles='uv', scale_units='xy', scale = 1,linewidth = 0.5) else: hax.plot(vf['axr'], vf['bxr'], color = color, marker = '.',linestyle = 'None') return figCVG, hax, cmap
def plotBB(ccts = None, cieobs =_CIEOBS, cspace = _CSPACE, axh = None, cctlabels = True, show = True, cspace_pars = {}, formatstr = 'k-', **kwargs): """ Plot blackbody locus. Args: :ccts: | None or list[float], optional | None defaults to [1000 to 1e19 K]. | Range: | [1000,1500,2000,2500,3000,3500,4000,5000,6000,8000,10000] | + [15000 K to 1e19 K] in 100 steps on a log10 scale :cctlabels: | True or False, optional | Add cct text labels at various points along the blackbody locus. :axh: | None or axes handle, optional | Determines axes to plot data in. | None: make new figure. :show: | True or False, optional | Invoke matplotlib.pyplot.show() right after plotting :cieobs: | luxpy._CIEOBS or str, optional | Determines CMF set to calculate spectrum locus or other. :cspace: | luxpy._CSPACE or str, optional | Determines color space / chromaticity diagram to plot data in. | Note that data is expected to be in specified :cspace: :formatstr: | 'k-' or str, optional | Format str for plotting (see ?matplotlib.pyplot.plot) :cspace_pars: | {} or dict, optional | Dict with parameters required by color space specified in :cspace: (for use with luxpy.colortf()) :kwargs: | additional keyword arguments for use with matplotlib.pyplot. Returns: :returns: | None (:show: == True) | or | handle to current axes (:show: == False) """ if ccts is None: ccts1 = np.array([1000.0,1500.0,2000.0,2500.0,3000.0,3500.0,4000.0,5000.0,6000.0,8000.0,10000.0]) ccts2 = 10**np.linspace(np.log10(15000.0),np.log10(10.0**19.0),100.0) ccts = np.hstack((ccts1,ccts2)) else: ccts1 = None BB = cri_ref(ccts,ref_type='BB') xyz = spd_to_xyz(BB,cieobs = cieobs) Yxy = colortf(xyz, tf = cspace, tfa0 = cspace_pars) Y,x,y = asplit(Yxy) axh = plot_color_data(x,y,axh = axh, cieobs = cieobs, cspace = cspace, show=show, formatstr=formatstr, **kwargs) if (cctlabels == True) & (ccts1 is not None): for i in range(ccts1.shape[0]): if ccts1[i]>= 3000.0: if i%2 == 0.0: plt.plot(x[i],y[i],'k+', color = '0.5') plt.text(x[i]*1.05,y[i]*0.95,'{:1.0f}K'.format(ccts1[i]), color = '0.5') plt.plot(x[-1],y[-1],'k+', color = '0.5') plt.text(x[-1]*1.05,y[-1]*0.95,'{:1.0e}K'.format(ccts[-1]), color = '0.5') if show == False: return axh
def VF_colorshift_model(S, cri_type = _VF_CRI_DEFAULT, model_type = _VF_MODEL_TYPE, \ cspace = _VF_CSPACE, sampleset = None, pool = False, \ pcolorshift = {'href': np.arange(np.pi/10,2*np.pi,2*np.pi/10),'Cref' : _VF_MAXR, 'sig' : _VF_SIG}, \ vfcolor = 'k',verbosity = 0): """ Applies full vector field model calculations to spectral data. Args: :S: | nump.ndarray with spectral data. :cri_type: | _VF_CRI_DEFAULT or str or dict, optional | Specifies type of color fidelity model to use. | Controls choice of ref. ill., sample set, averaging, scaling, etc. | See luxpy.cri.spd_to_cri for more info. :modeltype: | _VF_MODEL_TYPE or 'M6' or 'M5', optional | Specifies degree 5 or degree 6 polynomial model in ab-coordinates. :cspace: | _VF_CSPACE or dict, optional | Specifies color space. See _VF_CSPACE_EXAMPLE for example structure. :sampleset: | None or str or ndarray, optional | Sampleset to be used when calculating vector field model. :pool: | False, optional | If :S: contains multiple spectra, True pools all jab data before modeling the vector field, while False models a different field for each spectrum. :pcolorshift: | default dict (see below) or user defined dict, optional | Dict containing the specification input for apply_poly_model_at_hue_x(). | Default dict = {'href': np.arange(np.pi/10,2*np.pi,2*np.pi/10), | 'Cref' : _VF_MAXR, | 'sig' : _VF_SIG, | 'labels' : '#'} | The polynomial models of degree 5 and 6 can be fully specified or summarized by the model parameters themselved OR by calculating the dCoverC and dH at resp. 5 and 6 hues. :vfcolor: | 'k', optional | For plotting the vector fields. :verbosity: | 0, optional | Report warnings or not. Returns: :returns: | list[dict] (each list element refers to a different test SPD) | with the following keys: | - 'Source': dict with ndarrays of the S, cct and duv of source spd. | - 'metrics': dict with ndarrays for: | * Rf (color fidelity: base + metameric shift) | * Rt (metameric uncertainty index) | * Rfi (specific color fidelity indices) | * Rti (specific metameric uncertainty indices) | * cri_type (str with cri_type) | - 'Jab': dict with with ndarrays for Jabt, Jabr, DEi | - 'dC/C_dH_x_sig' : | np.vstack((dCoverC_x,dCoverC_x_sig,dH_x,dH_x_sig)).T | See get_poly_model() for more info. | - 'fielddata': dict with dicts containing data on the calculated | vector-field and circle-fields: | * 'vectorfield' : {'axt': vfaxt, 'bxt' : vfbxt, | 'axr' : vfaxr, 'bxr' : vfbxr}, | * 'circlefield' : {'axt': cfaxt, 'bxt' : cfbxt, | 'axr' : cfaxr, 'bxr' : cfbxr}}, | - 'modeldata' : dict with model info: | {'pmodel': pmodel, | 'pcolorshift' : pcolorshift, | 'dab_model' : dab_model, | 'dab_res' : dab_res, | 'dab_std' : dab_std, | 'modeltype' : modeltype, | 'fmodel' : poly_model, | 'Jabtm' : Jabtm, | 'Jabrm' : Jabrm, | 'DEim' : DEim}, | - 'vshifts' :dict with various vector shifts: | * 'Jabshiftvector_r_to_t' : ndarray with difference vectors | between jabt and jabr. | * 'vshift_ab_s' : vshift_ab_s: ab-shift vectors of samples | * 'vshift_ab_s_vf' : vshift_ab_s_vf: ab-shift vectors of | VF model predictions of samples. | * 'vshift_ab_vf' : vshift_ab_vf: ab-shift vectors of VF | model predictions of vector field grid. """ if type(cri_type) == str: cri_type_str = cri_type else: cri_type_str = None # Calculate Rf, Rfi and Jabr, Jabt: Rf, Rfi, Jabt, Jabr,cct,duv,cri_type = spd_to_cri(S, cri_type= cri_type,out='Rf,Rfi,jabt,jabr,cct,duv,cri_type', sampleset=sampleset) # In case of multiple source SPDs, pool: if (len(Jabr.shape) == 3) & (Jabr.shape[1]>1) & (pool == True): #Nsamples = Jabr.shape[0] Jabr = np.transpose(Jabr,(1,0,2)) # set lamps on first dimension Jabt = np.transpose(Jabt,(1,0,2)) Jabr = Jabr.reshape(Jabr.shape[0]*Jabr.shape[1],3) # put all lamp data one after the other Jabt = Jabt.reshape(Jabt.shape[0]*Jabt.shape[1],3) Jabt = Jabt[:,None,:] # add dim = 1 Jabr = Jabr[:,None,:] out = [{} for _ in range(Jabr.shape[1])] #initialize empty list of dicts if pool == False: N = Jabr.shape[1] else: N = 1 for i in range(N): Jabr_i = Jabr[:,i,:].copy() Jabr_i = Jabr_i[:,None,:] Jabt_i = Jabt[:,i,:].copy() Jabt_i = Jabt_i[:,None,:] DEi = np.sqrt((Jabr_i[...,0] - Jabt_i[...,0])**2 + (Jabr_i[...,1] - Jabt_i[...,1])**2 + (Jabr_i[...,2] - Jabt_i[...,2])**2) # Determine polynomial model: poly_model, pmodel, dab_model, dab_res, dCHoverC_res, dab_std, dCHoverC_std = get_poly_model(Jabt_i, Jabr_i, modeltype = _VF_MODEL_TYPE) # Apply model at fixed hues: href = pcolorshift['href'] Cref = pcolorshift['Cref'] sig = pcolorshift['sig'] dCoverC_x, dCoverC_x_sig, dH_x, dH_x_sig = apply_poly_model_at_hue_x(poly_model, pmodel, dCHoverC_res, hx = href, Cxr = Cref, sig = sig) # Calculate deshifted a,b values on original samples: Jt = Jabt_i[...,0].copy() at = Jabt_i[...,1].copy() bt = Jabt_i[...,2].copy() Jr = Jabr_i[...,0].copy() ar = Jabr_i[...,1].copy() br = Jabr_i[...,2].copy() ar = ar + dab_model[:,0:1] # deshift reference to model prediction br = br + dab_model[:,1:2] # deshift reference to model prediction Jabtm = np.hstack((Jt,at,bt)) Jabrm = np.hstack((Jr,ar,br)) # calculate color differences between test and deshifted ref: # DEim = np.sqrt((Jr - Jt)**2 + (at - ar)**2 + (bt - br)**2) DEim = np.sqrt(0*(Jr - Jt)**2 + (at - ar)**2 + (bt - br)**2) # J is not used # Apply scaling function to convert DEim to Rti: scale_factor = cri_type['scale']['cfactor'] scale_fcn = cri_type['scale']['fcn'] avg = cri_type['avg'] Rfi_deshifted = scale_fcn(DEim,scale_factor) Rf_deshifted = scale_fcn(avg(DEim,axis = 0),scale_factor) rms = lambda x: np.sqrt(np.sum(x**2,axis=0)/x.shape[0]) Rf_deshifted_rms = scale_fcn(rms(DEim),scale_factor) # Generate vector field: vfaxt,vfbxt,vfaxr,vfbxr = generate_vector_field(poly_model, pmodel,axr = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR), bxr = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR), limit_grid_radius = _VF_MAXR,color = 0) vfaxt,vfbxt,vfaxr,vfbxr = generate_vector_field(poly_model, pmodel,axr = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR), bxr = np.arange(-_VF_MAXR,_VF_MAXR+_VF_DELTAR,_VF_DELTAR), limit_grid_radius = _VF_MAXR,color = 0) # Calculate ab-shift vectors of samples and VF model predictions: vshift_ab_s = calculate_shiftvectors(Jabt_i, Jabr_i, average = False, vtype = 'ab')[:,0,0:3] vshift_ab_s_vf = calculate_shiftvectors(Jabtm,Jabrm, average = False, vtype = 'ab') # Calculate ab-shift vectors using vector field model: Jabt_vf = np.hstack((np.zeros((vfaxt.shape[0],1)), vfaxt, vfbxt)) Jabr_vf = np.hstack((np.zeros((vfaxr.shape[0],1)), vfaxr, vfbxr)) vshift_ab_vf = calculate_shiftvectors(Jabt_vf,Jabr_vf, average = False, vtype = 'ab') # Generate circle field: x,y = plotcircle(radii = np.arange(0,_VF_MAXR+_VF_DELTAR,10), angles = np.arange(0,359,1), out = 'x,y') cfaxt,cfbxt,cfaxr,cfbxr = generate_vector_field(poly_model, pmodel,make_grid = False,axr = x[:,None], bxr = y[:,None], limit_grid_radius = _VF_MAXR,color = 0) out[i] = {'Source' : {'S' : S, 'cct' : cct[i] , 'duv': duv[i]}, 'metrics' : {'Rf':Rf[:,i], 'Rt': Rf_deshifted, 'Rt_rms' : Rf_deshifted_rms, 'Rfi':Rfi[:,i], 'Rti': Rfi_deshifted, 'cri_type' : cri_type_str}, 'Jab' : {'Jabt' : Jabt_i, 'Jabr' : Jabr_i, 'DEi' : DEi}, 'dC/C_dH_x_sig' : np.vstack((dCoverC_x,dCoverC_x_sig,dH_x,dH_x_sig)).T, 'fielddata': {'vectorfield' : {'axt': vfaxt, 'bxt' : vfbxt, 'axr' : vfaxr, 'bxr' : vfbxr}, 'circlefield' : {'axt': cfaxt, 'bxt' : cfbxt, 'axr' : cfaxr, 'bxr' : cfbxr}}, 'modeldata' : {'pmodel': pmodel, 'pcolorshift' : pcolorshift, 'dab_model' : dab_model, 'dab_res' : dab_res,'dab_std' : dab_std, 'model_type' : model_type, 'fmodel' : poly_model, 'Jabtm' : Jabtm, 'Jabrm' : Jabrm, 'DEim' : DEim}, 'vshifts' : {'Jabshiftvector_r_to_t' : np.hstack((Jt-Jr,at-ar,bt-br)), 'vshift_ab_s' : vshift_ab_s, 'vshift_ab_s_vf' : vshift_ab_s_vf, 'vshift_ab_vf' : vshift_ab_vf}} return out
def cie2006cmfsEx(age = 32,fieldsize = 10, wl = None,\ var_od_lens = 0, var_od_macula = 0, \ var_od_L = 0, var_od_M = 0, var_od_S = 0,\ var_shft_L = 0, var_shft_M = 0, var_shft_S = 0,\ out = 'LMS', allow_negative_values = False): """ Generate Individual Observer CMFs (cone fundamentals) based on CIE2006 cone fundamentals and published literature on observer variability in color matching and in physiological parameters. Args: :age: | 32 or float or int, optional | Observer age :fieldsize: | 10, optional | Field size of stimulus in degrees (between 2° and 10°). :wl: | None, optional | Interpolation/extraplation of :LMS: output to specified wavelengths. | None: output original _WL = np.array([390,780,5]) :var_od_lens: | 0, optional | Std Dev. in peak optical density [%] of lens. :var_od_macula: | 0, optional | Std Dev. in peak optical density [%] of macula. :var_od_L: | 0, optional | Std Dev. in peak optical density [%] of L-cone. :var_od_M: | 0, optional | Std Dev. in peak optical density [%] of M-cone. :var_od_S: | 0, optional | Std Dev. in peak optical density [%] of S-cone. :var_shft_L: | 0, optional | Std Dev. in peak wavelength shift [nm] of L-cone. :var_shft_L: | 0, optional | Std Dev. in peak wavelength shift [nm] of M-cone. :var_shft_S: | 0, optional | Std Dev. in peak wavelength shift [nm] of S-cone. :out: | 'LMS' or , optional | Determines output. :allow_negative_values: | False, optional | Cone fundamentals or color matching functions should not have negative values. | If False: X[X<0] = 0. Returns: :returns: | - 'LMS' : ndarray with individual observer area-normalized | cone fundamentals. Wavelength have been added. | [- 'trans_lens': ndarray with lens transmission | (no wavelengths added, no interpolation) | - 'trans_macula': ndarray with macula transmission | (no wavelengths added, no interpolation) | - 'sens_photopig' : ndarray with photopigment sens. | (no wavelengths added, no interpolation)] References: 1. `Asano Y, Fairchild MD, and Blondé L (2016). Individual Colorimetric Observer Model. PLoS One 11, 1–19. <http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0145671>`_ 2. `Asano Y, Fairchild MD, Blondé L, and Morvan P (2016). Color matching experiment for highlighting interobserver variability. Color Res. Appl. 41, 530–539. <https://onlinelibrary.wiley.com/doi/abs/10.1002/col.21975>`_ 3. `CIE, and CIE (2006). Fundamental Chromaticity Diagram with Physiological Axes - Part I (Vienna: CIE). <http://www.cie.co.at/publications/fundamental-chromaticity-diagram-physiological-axes-part-1>`_ 4. `Asano's Individual Colorimetric Observer Model <https://www.rit.edu/cos/colorscience/re_AsanoObserverFunctions.php>`_ """ fs = fieldsize rmd = _INDVCMF_DATA['rmd'].copy() LMSa = _INDVCMF_DATA['LMSa'].copy() docul = _INDVCMF_DATA['docul'].copy() # field size corrected macular density: pkOd_Macula = 0.485 * np.exp(-fs / 6.132) * ( 1 + var_od_macula / 100) # varied peak optical density of macula corrected_rmd = rmd * pkOd_Macula # age corrected lens/ocular media density: if (age <= 60): correct_lomd = docul[:1] * (1 + 0.02 * (age - 32)) + docul[1:2] else: correct_lomd = docul[:1] * (1.56 + 0.0667 * (age - 60)) + docul[1:2] correct_lomd = correct_lomd * (1 + var_od_lens / 100 ) # varied overall optical density of lens # Peak Wavelength Shift: wl_shifted = np.empty(LMSa.shape) wl_shifted[0] = _WL + var_shft_L wl_shifted[1] = _WL + var_shft_M wl_shifted[2] = _WL + var_shft_S LMSa_shft = np.empty(LMSa.shape) kind = 'cubic' LMSa_shft[0] = interpolate.interp1d(wl_shifted[0], LMSa[0], kind=kind, bounds_error=False, fill_value="extrapolate")(_WL) LMSa_shft[1] = interpolate.interp1d(wl_shifted[1], LMSa[1], kind=kind, bounds_error=False, fill_value="extrapolate")(_WL) LMSa_shft[2] = interpolate.interp1d(wl_shifted[2], LMSa[2], kind=kind, bounds_error=False, fill_value="extrapolate")(_WL) # LMSa[2,np.where(_WL >= _WL_CRIT)] = 0 #np.nan # Not defined above 620nm # LMSa_shft[2,np.where(_WL >= _WL_CRIT)] = 0 ssw = np.hstack( (0, np.sign(np.diff(LMSa_shft[2, :])) )) #detect poor interpolation (sign switch due to instability) LMSa_shft[2, np.where((ssw >= 0) & (_WL > 560))] = np.nan # corrected LMS (no age correction): pkOd_L = (0.38 + 0.54 * np.exp(-fs / 1.333)) * ( 1 + var_od_L / 100) # varied peak optical density of L-cone pkOd_M = (0.38 + 0.54 * np.exp(-fs / 1.333)) * ( 1 + var_od_M / 100) # varied peak optical density of M-cone pkOd_S = (0.30 + 0.45 * np.exp(-fs / 1.333)) * ( 1 + var_od_S / 100) # varied peak optical density of S-cone alpha_lms = 0. * LMSa_shft alpha_lms[0] = 1 - 10**(-pkOd_L * (10**LMSa_shft[0])) alpha_lms[1] = 1 - 10**(-pkOd_M * (10**LMSa_shft[1])) alpha_lms[2] = 1 - 10**(-pkOd_S * (10**LMSa_shft[2])) # this fix is required because the above math fails for alpha_lms[2,:]==0 alpha_lms[2, np.where(_WL >= _WL_CRIT)] = 0 # Corrected to Corneal Incidence: lms_barq = alpha_lms * (10**(-corrected_rmd - correct_lomd)) * np.ones( alpha_lms.shape) # Corrected to Energy Terms: lms_bar = lms_barq * _WL # Set NaN values to zero: lms_bar[np.isnan(lms_bar)] = 0 # normalized: LMS = 100 * lms_bar / np.nansum(lms_bar, axis=1, keepdims=True) # Output extra: trans_lens = 10**(-correct_lomd) trans_macula = 10**(-corrected_rmd) sens_photopig = alpha_lms * _WL # Add wavelengths: LMS = np.vstack((_WL, LMS)) if ('xyz' in out.lower().split(',')): LMS = lmsb_to_xyzb(LMS, fieldsize, out='xyz', allow_negative_values=allow_negative_values) out = out.replace('xyz', 'LMS').replace('XYZ', 'LMS') if ('lms' in out.lower().split(',')): out = out.replace('lms', 'LMS') # Interpolate/extrapolate: if wl is None: interpolation = None else: interpolation = 'cubic' LMS = spd(LMS, wl=wl, interpolation=interpolation, norm_type='area') if (out == 'LMS'): return LMS elif (out == 'LMS,trans_lens,trans_macula,sens_photopig'): return LMS, trans_lens, trans_macula, sens_photopig elif (out == 'LMS,trans_lens,trans_macula,sens_photopig,LMSa'): return LMS, trans_lens, trans_macula, sens_photopig, LMSa else: return eval(out)
def fit_ellipse(xy, center_on_mean_xy=False): """ Fit an ellipse to supplied data points. Args: :xy: | coordinates of points to fit (Nx2 array) :center_on_mean_xy: | False, optional | Center ellipse on mean of xy | (otherwise it might be offset due to solving | the contrained minization problem: aT*S*a, see ref below.) Returns: :v: | vector with ellipse parameters [Rmax,Rmin, xc,yc, theta] Reference: 1. Fitzgibbon, A.W., Pilu, M., and Fischer R.B., Direct least squares fitting of ellipsees, Proc. of the 13th Internation Conference on Pattern Recognition, pp 253–257, Vienna, 1996. """ # remove centroid: # center = xy.mean(axis=0) # xy = xy - center # Fit ellipse: x, y = xy[:, 0:1], xy[:, 1:2] D = np.hstack((x * x, x * y, y * y, x, y, np.ones_like(x))) S, C = np.dot(D.T, D), np.zeros([6, 6]) C[0, 2], C[2, 0], C[1, 1] = 2, 2, -1 U, s, V = np.linalg.svd(np.dot(np.linalg.inv(S), C)) e = U[:, 0] # E, V = np.linalg.eig(np.dot(np.linalg.inv(S), C)) # n = np.argmax(np.abs(E)) # e = V[:,n] # get ellipse axis lengths, center and orientation: b, c, d, f, g, a = e[1] / 2, e[2], e[3] / 2, e[4] / 2, e[5], e[0] # get ellipse center: num = b * b - a * c if num == 0: xc = 0 yc = 0 else: xc = ((c * d - b * f) / num) yc = ((a * f - b * d) / num) # get ellipse orientation: theta = np.arctan2(np.array(2 * b), np.array((a - c))) / 2 # if b == 0: # if a > c: # theta = 0 # else: # theta = np.pi/2 # else: # if a > c: # theta = np.arctan2(2*b,(a-c))/2 # else: # theta = np.arctan2(2*b,(a-c))/2 + np.pi/2 # axis lengths: up = 2 * (a * f * f + c * d * d + g * b * b - 2 * b * d * f - a * c * g) down1 = (b * b - a * c) * ((c - a) * np.sqrt(1 + 4 * b * b / ((a - c) * (a - c))) - (c + a)) down2 = (b * b - a * c) * ((a - c) * np.sqrt(1 + 4 * b * b / ((a - c) * (a - c))) - (c + a)) a, b = np.sqrt((up / down1)), np.sqrt((up / down2)) # assert that a is the major axis (otherwise swap and correct angle) if (b > a): b, a = a, b # ensure the angle is betwen 0 and 2*pi theta = fmod(theta, 2.0 * np.pi) if center_on_mean_xy == True: xc, yc = xy.mean(axis=0) return np.hstack((a, b, xc, yc, theta))
def spd_to_ies_tm30_metrics(SPD, cri_type = None, \ hbins = 16, start_hue = 0.0,\ scalef = 100, \ vf_model_type = _VF_MODEL_TYPE, \ vf_pcolorshift = _VF_PCOLORSHIFT,\ scale_vf_chroma_to_sample_chroma = False): """ Calculates IES TM30 metrics from spectral data. Args: :data: | numpy.ndarray with spectral data :cri_type: | None, optional | If None: defaults to cri_type = 'iesrf'. | Not none values of :hbins:, :start_hue: and :scalef: overwrite input in cri_type['rg_pars'] :hbins: | None or numpy.ndarray with sorted hue bin centers (°), optional :start_hue: | None, optional :scalef: | None, optional | Scale factor for reference circle. :vf_pcolorshift: | _VF_PCOLORSHIFT or user defined dict, optional | The polynomial models of degree 5 and 6 can be fully specified or summarized by the model parameters themselved OR by calculating the dCoverC and dH at resp. 5 and 6 hues. :VF_pcolorshift: specifies these hues and chroma level. :scale_vf_chroma_to_sample_chroma: | False, optional | Scale chroma of reference and test vf fields such that average of binned reference chroma equals that of the binned sample chroma before calculating hue bin metrics. Returns: :data: | dict with color rendering data: | - 'SPD' : ndarray test SPDs | - 'bjabt': ndarray with binned jab data under test SPDs | - 'bjabr': ndarray with binned jab data under reference SPDs | - 'cct' : ndarray with CCT of test SPD | - 'duv' : ndarray with distance to blackbody locus of test SPD | - 'Rf' : ndarray with general color fidelity indices | - 'Rg' : ndarray with gamut area indices | - 'Rfi' : ndarray with specific color fidelity indices | - 'Rfhi' : ndarray with local (hue binned) fidelity indices | - 'Rcshi': ndarray with local chroma shifts indices | - 'Rhshi': ndarray with local hue shifts indices | - 'Rt' : ndarray with general metameric uncertainty index Rt | - 'Rti' : ndarray with specific metameric uncertainty indices Rti | - 'Rfhi_vf' : ndarray with local (hue binned) fidelity indices | obtained from VF model predictions at color space | pixel coordinates | - 'Rcshi_vf': ndarray with local chroma shifts indices | (same as above) | - 'Rhshi_vf': ndarray with local hue shifts indices | (same as above) """ if cri_type is None: cri_type = 'iesrf' #Calculate color rendering measures for SPDs in data: out = 'Rf,Rg,cct,duv,Rfi,jabt,jabr,Rfhi,Rcshi,Rhshi,cri_type' if isinstance(cri_type, str): # get dict cri_type = _CRI_DEFAULTS[cri_type].copy() if hbins is not None: cri_type['rg_pars']['nhbins'] = hbins if start_hue is not None: cri_type['rg_pars']['start_hue'] = start_hue if scalef is not None: cri_type['rg_pars']['normalized_chroma_ref'] = scalef Rf, Rg, cct, duv, Rfi, jabt, jabr, Rfhi, Rcshi, Rhshi, cri_type = spd_to_cri( SPD, cri_type=cri_type, out=out) rg_pars = cri_type['rg_pars'] #Calculate Metameric uncertainty and base color shifts: dataVF = VF_colorshift_model(SPD, cri_type=cri_type, model_type=vf_model_type, cspace=cri_type['cspace'], sampleset=eval(cri_type['sampleset']), pool=False, pcolorshift=vf_pcolorshift, vfcolor=0) Rf_ = np.array([dataVF[i]['metrics']['Rf'] for i in range(len(dataVF))]).T Rt = np.array([dataVF[i]['metrics']['Rt'] for i in range(len(dataVF))]).T Rti = np.array([dataVF[i]['metrics']['Rti'] for i in range(len(dataVF))][0]) # Get normalized and sliced sample data for plotting: rg_pars = cri_type['rg_pars'] nhbins, normalize_gamut, normalized_chroma_ref, start_hue = [ rg_pars[x] for x in sorted(rg_pars.keys()) ] normalized_chroma_ref = scalef # np.sqrt((jabr[...,1]**2 + jabr[...,2]**2)).mean(axis = 0).mean() if scale_vf_chroma_to_sample_chroma == True: normalize_gamut = False bjabt, bjabr = gamut_slicer( jabt, jabr, out='jabt,jabr', nhbins=nhbins, start_hue=start_hue, normalize_gamut=normalize_gamut, normalized_chroma_ref=normalized_chroma_ref, close_gamut=True) Cr_s = (np.sqrt(bjabr[:-1, ..., 1]**2 + bjabr[:-1, ..., 2]**2)).mean( axis=0) # for rescaling vector field average reference chroma normalize_gamut = True #(for plotting) bjabt, bjabr = gamut_slicer(jabt, jabr, out='jabt,jabr', nhbins=nhbins, start_hue=start_hue, normalize_gamut=normalize_gamut, normalized_chroma_ref=normalized_chroma_ref, close_gamut=True) Rfhi_vf = np.empty(Rfhi.shape) Rcshi_vf = np.empty(Rcshi.shape) Rhshi_vf = np.empty(Rhshi.shape) for i in range(cct.shape[0]): # Get normalized and sliced VF data for hue specific metrics: vfjabt = np.hstack( (np.ones(dataVF[i]['fielddata']['vectorfield']['axt'].shape), dataVF[i]['fielddata']['vectorfield']['axt'], dataVF[i]['fielddata']['vectorfield']['bxt'])) vfjabr = np.hstack( (np.ones(dataVF[i]['fielddata']['vectorfield']['axr'].shape), dataVF[i]['fielddata']['vectorfield']['axr'], dataVF[i]['fielddata']['vectorfield']['bxr'])) nhbins, normalize_gamut, normalized_chroma_ref, start_hue = [ rg_pars[x] for x in sorted(rg_pars.keys()) ] vfbjabt, vfbjabr, vfbDEi = gamut_slicer( vfjabt, vfjabr, out='jabt,jabr,DEi', nhbins=nhbins, start_hue=start_hue, normalize_gamut=normalize_gamut, normalized_chroma_ref=normalized_chroma_ref, close_gamut=False) if scale_vf_chroma_to_sample_chroma == True: #rescale vfbjabt and vfbjabr to same chroma level as bjabr. Cr_vfb = np.sqrt(vfbjabr[..., 1]**2 + vfbjabr[..., 2]**2) Cr_vf = np.sqrt(vfjabr[..., 1]**2 + vfjabr[..., 2]**2) hr_vf = np.arctan2(vfjabr[..., 2], vfjabr[..., 1]) Ct_vf = np.sqrt(vfjabt[..., 1]**2 + vfjabt[..., 2]**2) ht_vf = np.arctan2(vfjabt[..., 2], vfjabt[..., 1]) fC = Cr_s.mean() / Cr_vfb.mean() vfjabr[..., 1] = fC * Cr_vf * np.cos(hr_vf) vfjabr[..., 2] = fC * Cr_vf * np.sin(hr_vf) vfjabt[..., 1] = fC * Ct_vf * np.cos(ht_vf) vfjabt[..., 2] = fC * Ct_vf * np.sin(ht_vf) vfbjabt, vfbjabr, vfbDEi = gamut_slicer( vfjabt, vfjabr, out='jabt,jabr,DEi', nhbins=nhbins, start_hue=start_hue, normalize_gamut=normalize_gamut, normalized_chroma_ref=normalized_chroma_ref, close_gamut=False) scale_factor = cri_type['scale']['cfactor'] scale_fcn = cri_type['scale']['fcn'] vfRfhi, vfRcshi, vfRhshi = jab_to_rhi( jabt=vfbjabt, jabr=vfbjabr, DEi=vfbDEi, cri_type=cri_type, scale_factor=scale_factor, scale_fcn=scale_fcn, use_bin_avg_DEi=True ) # [:-1,...] removes last row from jab as this was added to close the gamut. Rfhi_vf[:, i:i + 1] = vfRfhi Rhshi_vf[:, i:i + 1] = vfRhshi Rcshi_vf[:, i:i + 1] = vfRcshi # Create dict with CRI info: data = {'SPD' : SPD, 'cct' : cct, 'duv' : duv, 'bjabt' : bjabt, 'bjabr' : bjabr,\ 'Rf' : Rf, 'Rg' : Rg, 'Rfi': Rfi, 'Rfhi' : Rfhi, 'Rchhi' : Rcshi, 'Rhshi' : Rhshi, \ 'Rt' : Rt, 'Rti' : Rti, 'Rfhi_vf' : Rfhi_vf, 'Rfcshi_vf' : Rcshi_vf, 'Rfhshi_vf' : Rhshi_vf, \ 'dataVF' : dataVF,'cri_type' : cri_type} return data
def get_pixel_coordinates(jab, jab_ranges=None, jab_deltas=None, limit_grid_radius=0): """ Get pixel coordinates corresponding to array of jab color coordinates. Args: :jab: | ndarray of color coordinates :jab_ranges: | None or ndarray, optional | Specifies the pixelization of color space. | (ndarray.shape = (3,3), with first axis: J,a,b, and second axis: min, max, delta) :jab_deltas: | float or ndarray, optional | Specifies the sampling range. | A float uses jab_deltas as the maximum Euclidean distance to select samples around each pixel center. A ndarray of 3 deltas, uses a city block sampling around each pixel center. :limit_grid_radius: | 0, optional | A value of zeros keeps grid as specified by axr,bxr. | A value > 0 only keeps (a,b) coordinates within :limit_grid_radius: Returns: :returns: | gridp, idxp, jabp, samplenrs, samplesIDs | - :gridp: ndarray with coordinates of all pixel centers. | - :idxp: list[int] with pixel index for each non-empty pixel | - :jabp: ndarray with center color coordinates of non-empty pixels | - :samplenrs: list[list[int]] with sample numbers belong to each | non-empty pixel | - :sampleIDs: summarizing list, | with column order: 'idxp, jabp, samplenrs' """ if jab_deltas is None: jab_deltas = np.array([_VF_DELTAR, _VF_DELTAR, _VF_DELTAR]) if jab_ranges is None: jab_ranges = np.vstack( ([0, 100, jab_deltas[0] ], [-_VF_MAXR, _VF_MAXR + jab_deltas[1], jab_deltas[1]], [-_VF_MAXR, _VF_MAXR + jab_deltas[2], jab_deltas[2]])) # Get pixel grid: gridp = generate_grid(jab_ranges=jab_ranges, limit_grid_radius=limit_grid_radius) # determine pixel coordinates of each sample in jab: samplesIDs = [] for idx in range(gridp.shape[0]): # get pixel coordinates: jp = gridp[idx, 0] ap = gridp[idx, 1] bp = gridp[idx, 2] #Cp = np.sqrt(ap**2+bp**2) if type(jab_deltas) == np.ndarray: sampleID = np.where( ((np.abs(jab[..., 0] - jp) <= jab_deltas[0] / 2) & (np.abs(jab[..., 1] - ap) <= jab_deltas[1] / 2) & (np.abs(jab[..., 2] - bp) <= jab_deltas[2] / 2))) else: sampleID = np.where( (np.sqrt((jab[..., 0] - jp)**2 + (jab[..., 1] - ap)**2 + (jab[..., 2] - bp)**2) <= jab_deltas / 2)) if (sampleID[0].shape[0] > 0): samplesIDs.append( np.hstack((idx, np.array([jp, ap, bp]), sampleID[0]))) idxp = [np.int(samplesIDs[i][0]) for i in range(len(samplesIDs))] jabp = np.vstack([samplesIDs[i][1:4] for i in range(len(samplesIDs))]) samplenrs = [ np.array(samplesIDs[i][4:], dtype=int).tolist() for i in range(len(samplesIDs)) ] return gridp, idxp, jabp, samplenrs, samplesIDs
def plotellipse(v, cspace_in = 'Yxy', cspace_out = None, nsamples = 100, \ show = True, axh = None, \ line_color = 'darkgray', line_style = ':', line_width = 1, line_marker = '', line_markersize = 4,\ plot_center = False, center_marker = 'o', center_color = 'darkgray', center_markersize = 4,\ show_grid = True, label_fontname = 'Times New Roman', label_fontsize = 12,\ out = None): """ Plot ellipse(s) given in v-format [Rmax,Rmin,xc,yc,theta]. Args: :v: | (Nx5) ndarray | ellipse parameters [Rmax,Rmin,xc,yc,theta] :cspace_in: | 'Yxy', optional | Color space of v. | If None: no color space assumed. Axis labels assumed ('x','y'). :cspace_out: | None, optional | Color space to plot ellipse(s) in. | If None: plot in cspace_in. :nsamples: | 100 or int, optional | Number of points (samples) in ellipse boundary :show: | True or boolean, optional | Plot ellipse(s) (True) or not (False) :axh: | None, optional | Ax-handle to plot ellipse(s) in. | If None: create new figure with axes. :line_color: | 'darkgray', optional | Color to plot ellipse(s) in. :line_style: | ':', optional | Linestyle of ellipse(s). :line_width': | 1, optional | Width of ellipse boundary line. :line_marker: | 'none', optional | Marker for ellipse boundary. :line_markersize: | 4, optional | Size of markers in ellipse boundary. :plot_center: | False, optional | Plot center of ellipse: yes (True) or no (False) :center_color: | 'darkgray', optional | Color to plot ellipse center in. :center_marker: | 'o', optional | Marker for ellipse center. :center_markersize: | 4, optional | Size of marker of ellipse center. :show_grid: | True, optional | Show grid (True) or not (False) :label_fontname: | 'Times New Roman', optional | Sets font type of axis labels. :label_fontsize: | 12, optional | Sets font size of axis labels. :out: | None, optional | Output of function | If None: returns None. Can be used to output axh of newly created | figure axes or to return Yxys an ndarray with coordinates of | ellipse boundaries in cspace_out (shape = (nsamples,3,N)) Returns: :returns: None, or whatever set by :out:. """ Yxys = np.zeros((nsamples,3,v.shape[0])) ellipse_vs = np.zeros((v.shape[0],5)) for i,vi in enumerate(v): # Set sample density of ellipse boundary: t = np.linspace(0, 2*np.pi, nsamples) a = vi[0] # major axis b = vi[1] # minor axis xyc = vi[2:4,None] # center theta = vi[-1] # rotation angle # define rotation matrix: R = np.hstack(( np.vstack((np.cos(theta), np.sin(theta))), np.vstack((-np.sin(theta), np.cos(theta))))) # Calculate ellipses: Yxyc = np.vstack((1, xyc)).T Yxy = np.vstack((np.ones((1,nsamples)), xyc + np.dot(R, np.vstack((a*np.cos(t), b*np.sin(t))) ))).T Yxys[:,:,i] = Yxy # Convert to requested color space: if (cspace_out is not None) & (cspace_in is not None): Yxy = colortf(Yxy, cspace_in + '>' + cspace_out) Yxyc = colortf(Yxyc, cspace_in + '>' + cspace_out) Yxys[:,:,i] = Yxy # get ellipse parameters in requested color space: ellipse_vs[i,:] = math.fit_ellipse(Yxy[:,1:]) #de = np.sqrt((Yxy[:,1]-Yxyc[:,1])**2 + (Yxy[:,2]-Yxyc[:,2])**2) #ellipse_vs[i,:] = np.hstack((de.max(),de.min(),Yxyc[:,1],Yxyc[:,2],np.nan)) # nan because orientation is xy, but request is some other color space. Change later to actual angle when fitellipse() has been implemented # plot ellipses: if show == True: if (axh is None) & (i == 0): fig = plt.figure() axh = fig.add_subplot(111) if (cspace_in is None): xlabel = 'x' ylabel = 'y' else: xlabel = _CSPACE_AXES[cspace_in][1] ylabel = _CSPACE_AXES[cspace_in][2] if (cspace_out is not None): xlabel = _CSPACE_AXES[cspace_out][1] ylabel = _CSPACE_AXES[cspace_out][2] if plot_center == True: plt.plot(Yxyc[:,1],Yxyc[:,2],color = center_color, linestyle = 'none', marker = center_marker, markersize = center_markersize) plt.plot(Yxy[:,1],Yxy[:,2],color = line_color, linestyle = line_style, linewidth = line_width, marker = line_marker, markersize = line_markersize) plt.xlabel(xlabel, fontname = label_fontname, fontsize = label_fontsize) plt.ylabel(ylabel, fontname = label_fontname, fontsize = label_fontsize) if show_grid == True: plt.grid() #plt.show() Yxys = np.transpose(Yxys,axes=(0,2,1)) if out is not None: return eval(out) else: return None
def plot_chromaticity_diagram_colors(diagram_samples = 256, diagram_opacity = 1.0, diagram_lightness = 0.25,\ cieobs = _CIEOBS, cspace = 'Yxy', cspace_pars = {},\ show = True, axh = None,\ show_grid = True, label_fontname = 'Times New Roman', label_fontsize = 12,\ **kwargs): """ Plot the chromaticity diagram colors. Args: :diagram_samples: | 256, optional | Sampling resolution of color space. :diagram_opacity: | 1.0, optional | Sets opacity of chromaticity diagram :diagram_lightness: | 0.25, optional | Sets lightness of chromaticity diagram :axh: | None or axes handle, optional | Determines axes to plot data in. | None: make new figure. :show: | True or False, optional | Invoke matplotlib.pyplot.show() right after plotting :cieobs: | luxpy._CIEOBS or str, optional | Determines CMF set to calculate spectrum locus or other. :cspace: | luxpy._CSPACE or str, optional | Determines color space / chromaticity diagram to plot data in. | Note that data is expected to be in specified :cspace: :cspace_pars: | {} or dict, optional | Dict with parameters required by color space specified in :cspace: | (for use with luxpy.colortf()) :show_grid: | True, optional | Show grid (True) or not (False) :label_fontname: | 'Times New Roman', optional | Sets font type of axis labels. :label_fontsize: | 12, optional | Sets font size of axis labels. :kwargs: | additional keyword arguments for use with matplotlib.pyplot. Returns: """ offset = _EPS ii, jj = np.meshgrid(np.linspace(offset, 1 + offset, diagram_samples), np.linspace(1+offset, offset, diagram_samples)) ij = np.dstack((ii, jj)) SL = _CMF[cieobs]['bar'][1:4].T SL = np.vstack((SL,SL[0])) SL = 100.0*SL/SL[:,1,None] SL = colortf(SL, tf = cspace, tfa0 = cspace_pars) Y,x,y = asplit(SL) SL = np.vstack((x,y)).T ij2D = ij.reshape((diagram_samples**2,2)) ij2D = np.hstack((diagram_lightness*100*np.ones((ij2D.shape[0],1)), ij2D)) xyz = colortf(ij2D, tf = cspace + '>xyz', tfa0 = cspace_pars) xyz[xyz < 0] = 0 xyz[np.isinf(xyz.sum(axis=1)),:] = np.nan xyz[np.isnan(xyz.sum(axis=1)),:] = offset srgb = xyz_to_srgb(xyz) srgb = srgb/srgb.max() srgb = srgb.reshape((diagram_samples,diagram_samples,3)) if show == True: if axh is None: fig = plt.figure() axh = fig.add_subplot(111) polygon = Polygon(SL, facecolor='none', edgecolor='none') axh.add_patch(polygon) image = axh.imshow( srgb, interpolation='bilinear', extent = (0.0, 1, -0.05, 1), clip_path=None, alpha=diagram_opacity) image.set_clip_path(polygon) plt.plot(x,y, color = 'darkgray') if cspace == 'Yxy': plt.xlim([0,1]) plt.ylim([0,1]) elif cspace == 'Yuv': plt.xlim([0,0.6]) plt.ylim([0,0.6]) if (cspace is not None): xlabel = _CSPACE_AXES[cspace][1] ylabel = _CSPACE_AXES[cspace][2] if (label_fontname is not None) & (label_fontsize is not None): plt.xlabel(xlabel, fontname = label_fontname, fontsize = label_fontsize) plt.ylabel(ylabel, fontname = label_fontname, fontsize = label_fontsize) if show_grid == True: plt.grid() #plt.show() return axh else: return None
def apply(data, catmode = '1>0>2', cattype = 'vonkries', xyzw1 = None, xyzw2 = None, xyzw0 = None,\ D = None, mcat = ['cat02'], normxyz0 = None, outtype = 'xyz', La = None, F = None, Dtype = None): """ Calculate corresponding colors by applying a von Kries chromatic adaptation transform (CAT), i.e. independent rescaling of 'sensor sensitivity' to data to adapt from current adaptation conditions (1) to the new conditions (2). Args: :data: | ndarray of tristimulus values (can be NxMx3) :catmode: | '1>0>2, optional | -'1>0>2': Two-step CAT | from illuminant 1 to baseline illuminant 0 to illuminant 2. | -'1>0': One-step CAT | from illuminant 1 to baseline illuminant 0. | -'0>2': One-step CAT | from baseline illuminant 0 to illuminant 2. :cattype: | 'vonkries' (others: 'rlab', see Farchild 1990), optional :xyzw1: | None, depending on :catmode: optional (can be Mx3) :xyzw2: | None, depending on :catmode: optional (can be Mx3) :xyzw0: | None, depending on :catmode: optional (can be Mx3) :D: | None, optional | Degrees of adaptation. Defaults to [1.0, 1.0]. :La: | None, optional | Adapting luminances. | If None: xyz values are absolute or relative. | If not None: xyz are relative. :F: | None, optional | Surround parameter(s) for CAT02/CAT16 calculations (:Dtype: == 'cat02' or 'cat16') | Defaults to [1.0, 1.0]. :Dtype: | None, optional | Type of degree of adaptation function from literature | See luxpy.cat.get_degree_of_adaptation() :mcat: | ['cat02'], optional | List[str] or List[ndarray] of sensor space matrices for each condition pair. If len(:mcat:) == 1, the same matrix is used. :normxyz0: | None, optional | Set of xyz tristimulus values to normalize the sensor space matrix to. :outtype: | 'xyz' or 'lms', optional | - 'xyz': return corresponding tristimulus values | - 'lms': return corresponding sensor space excitation values | (e.g. for further calculations) Returns: :returns: | ndarray with corresponding colors """ if (xyzw1 is None) & (xyzw2 is None): return data # do nothing else: # Make data 2d: data = np2d(data) data_original_shape = data.shape if data.ndim < 3: target_shape = np.hstack((1, data.shape)) data = data * np.ones(target_shape) else: target_shape = data.shape target_shape = data.shape # initialize xyzw0: if (xyzw0 is None): # set to iLL.E xyzw0 = np2d([100.0, 100.0, 100.0]) xyzw0 = np.ones(target_shape) * xyzw0 La0 = xyzw0[..., 1, None] # Determine cat-type (1-step or 2-step) + make input same shape as data for block calculations: expansion_axis = np.abs(1 * (len(data_original_shape) == 2) - 1) if ((xyzw1 is not None) & (xyzw2 is not None)): xyzw1 = xyzw1 * np.ones(target_shape) xyzw2 = xyzw2 * np.ones(target_shape) default_La12 = [xyzw1[..., 1, None], xyzw2[..., 1, None]] elif (xyzw2 is None) & (xyzw1 is not None): # apply one-step CAT: 1-->0 catmode = '1>0' #override catmode input xyzw1 = xyzw1 * np.ones(target_shape) default_La12 = [xyzw1[..., 1, None], La0] elif (xyzw1 is None) & (xyzw2 is not None): raise Exception( "von_kries(): cat transformation '0>2' not supported, use '1>0' !" ) # Get or set La (La == None: xyz are absolute or relative, La != None: xyz are relative): target_shape_1 = tuple(np.hstack((target_shape[:-1], 1))) La1, La2 = parse_x1x2_parameters(La, target_shape=target_shape_1, catmode=catmode, expand_2d_to_3d=expansion_axis, default=default_La12) # Set degrees of adaptation, D10, D20: (note D20 is degree of adaptation for 2-->0!!) D10, D20 = parse_x1x2_parameters(D, target_shape=target_shape_1, catmode=catmode, expand_2d_to_3d=expansion_axis) # Set F surround in case of Dtype == 'cat02': F1, F2 = parse_x1x2_parameters(F, target_shape=target_shape_1, catmode=catmode, expand_2d_to_3d=expansion_axis) # Make xyz relative to go to relative xyz0: if La is None: data = 100 * data / La1 xyzw1 = 100 * xyzw1 / La1 xyzw0 = 100 * xyzw0 / La0 if (catmode == '1>0>2') | (catmode == '1>2'): xyzw2 = 100 * xyzw2 / La2 # transform data (xyz) to sensor space (lms) and perform cat: xyzc = np.ones(data.shape) * np.nan mcat = np.array(mcat) if (mcat.shape[0] != data.shape[1]) & (mcat.shape[0] == 1): mcat = np.repeat(mcat, data.shape[1], axis=0) elif (mcat.shape[0] != data.shape[1]) & (mcat.shape[0] > 1): raise Exception( 'von_kries(): mcat.shape[0] > 1 and does not match data.shape[0]!' ) for i in range(xyzc.shape[1]): # get cat sensor matrix: if mcat[i].dtype == np.float64: mcati = mcat[i] else: mcati = _MCATS[mcat[i]] # normalize sensor matrix: if normxyz0 is not None: mcati = math.normalize_3x3_matrix(mcati, xyz0=normxyz0) # convert from xyz to lms: lms = np.dot(mcati, data[:, i].T).T lmsw0 = np.dot(mcati, xyzw0[:, i].T).T if (catmode == '1>0>2') | (catmode == '1>0'): lmsw1 = np.dot(mcati, xyzw1[:, i].T).T Dpar1 = dict(D=D10[:, i], F=F1[:, i], La=La1[:, i], La0=La0[:, i], order='1>0') D10[:, i] = get_degree_of_adaptation( Dtype=Dtype, **Dpar1) #get degree of adaptation depending on Dtype lmsw2 = None # in case of '1>0' if (catmode == '1>0>2'): lmsw2 = np.dot(mcati, xyzw2[:, i].T).T Dpar2 = dict(D=D20[:, i], F=F2[:, i], La=La2[:, i], La0=La0[:, i], order='0>2') D20[:, i] = get_degree_of_adaptation( Dtype=Dtype, **Dpar2) #get degree of adaptation depending on Dtype if (catmode == '1>2'): lmsw1 = np.dot(mcati, xyzw1[:, i].T).T lmsw2 = np.dot(mcati, xyzw2[:, i].T).T Dpar12 = dict(D=D10[:, i], F=F1[:, i], La=La1[:, i], La2=La2[:, i], order='1>2') D10[:, i] = get_degree_of_adaptation( Dtype=Dtype, **Dpar12) #get degree of adaptation depending on Dtype # Determine transfer function Dt: Dt = get_transfer_function(cattype=cattype, catmode=catmode, lmsw1=lmsw1, lmsw2=lmsw2, lmsw0=lmsw0, D10=D10[:, i], D20=D20[:, i], La1=La1[:, i], La2=La2[:, i]) # Perform cat: lms = np.dot(np.diagflat(Dt[0]), lms.T).T # Make xyz, lms 'absolute' again: if (catmode == '1>0>2'): lms = (La2[:, i] / La1[:, i]) * lms elif (catmode == '1>0'): lms = (La0[:, i] / La1[:, i]) * lms elif (catmode == '1>2'): lms = (La2[:, i] / La1[:, i]) * lms # transform back from sensor space to xyz (or not): if outtype == 'xyz': xyzci = np.dot(np.linalg.inv(mcati), lms.T).T xyzci[np.where(xyzci < 0)] = _EPS xyzc[:, i] = xyzci else: xyzc[:, i] = lms # return data to original shape: if len(data_original_shape) == 2: xyzc = xyzc[0] return xyzc
def minimizebnd(fun, x0, args=(), method = 'nelder-mead', use_bnd = True, \ bounds = (None,None) , options = None, \ x0_vsize = None, x0_keys = None, **kwargs): """ Minimization function that allows for bounds on any type of method in SciPy's minimize function by transforming the parameters values | (see Matlab's fminsearchbnd). | Starting values, and lower and upper bounds can also be provided as a dict. Args: :x0: | parameter starting values | If x0_keys is None then :x0: is vector else, :x0: is dict and | x0_size should be provided with length/size of values for each of the keys in :x0: to convert it to a vector. :use_bnd: | True, optional | False: omits bounds and defaults to regular minimize function. :bounds: | (lower, upper), optional | Tuple of lists or dicts (x0_keys is None) of lower and upper bounds for each of the parameters values. :kwargs: | allows input for other type of arguments (e.g. in OutputFcn) Note: For other input arguments, see ?scipy.minimize() Returns: :res: | dict with minimize() output. | Additionally, function value, fval, of solution is also in :res:, as well as a vector or dict (if x0 was dict) with final solutions (res['x']) """ # Convert dict to vec: if isinstance(x0, dict): x0 = vec_to_dict(dic=x0, vsize=x0_vsize, keys=x0_keys) if use_bnd == False: res = minimize(fun, x0, args=args, options=options, **kwargs) res['fval'] = fun(res['x'], *args) if x0_keys is None: res['x_final'] = res['x'] else: res['x_final'] = vec_to_dict(vec=res['x'], vsize=x0_vsize, keys=x0_keys) return res else: LB, UB = bounds # Convert dict to vec: if isinstance(LB, dict): LB = vec_to_dict(dic=LB, vsize=x0_vsize, keys=x0_keys) if isinstance(LB, dict): UB = vec_to_dict(dic=UB, vsize=x0_vsize, keys=x0_keys) #size checks xsize = x0.shape x0 = x0.flatten() n = x0.shape[0] if LB is None: LB = -np.inf * np.ones(n) else: LB = LB.flatten() if UB is None: UB = np.inf * np.ones(n) else: UB = UB.flatten() if (n != LB.shape[0]) | (n != UB.shape[0]): raise Exception( 'minimizebnd(): x0 is incompatible in size with either LB or UB.' ) #set default options if necessary if options is None: options = {} # stuff into a struct to pass around params = {} params['args'] = args params['LB'] = LB params['UB'] = UB params['fun'] = fun params['n'] = n params['OutputFcn'] = None # % 0 --> unconstrained variable # % 1 --> lower bound only # % 2 --> upper bound only # % 3 --> dual finite bounds # % 4 --> fixed variable params['BoundClass'] = np.zeros(n) for i in np.arange(n): k = np.isfinite(LB[i]) + 2 * np.isfinite(UB[i]) params['BoundClass'][i] = k if (k == 3) & (LB[i] == UB[i]): params['BoundClass'][i] = 4 # transform starting values into their unconstrained # surrogates. Check for infeasible starting guesses. x0u = x0 k = 0 for i in np.arange(n): if params['BoundClass'][i] == 1: # lower bound only if x0[i] <= LB[i]: # infeasible starting value. Use bound. x0u[k] = 0 else: x0u[k] = np.sqrt(x0[i] - LB[i]) elif params['BoundClass'][i] == 2: # upper bound only if x0[i] >= UB[i]: # infeasible starting value. use bound. x0u[k] = 0 else: x0u[k] = sqrt(UB[i] - x0[i]) elif params['BoundClass'][i] == 2: # lower and upper bounds if x0[i] <= LB[i]: # infeasible starting value x0u[k] = -np.pi / 2 elif x0[i] >= UB[i]: # infeasible starting value x0u[k] = np.pi / 2 else: x0u[k] = 2 * (x0[i] - LB[i]) / (UB[i] - LB[i]) - 1 # shift by 2*pi to avoid problems at zero in fminsearch #otherwise, the initial simplex is vanishingly small x0u[k] = 2 * np.pi + np.asin( np.hstack((-1, np.hstack((1, x0u[k]).min()))).max()) elif params['BoundClass'][i] == 0: # unconstrained variable. x0u(i) is set. x0u[k] = x0[i] if params['BoundClass'][i] != 4: # increment k k += 1 else: # fixed variable. drop it before fminsearch sees it. # k is not incremented for this variable. pass # if any of the unknowns were fixed, then we need to shorten x0u now. if k <= n: x0u = x0u[:k + 1] # were all the variables fixed? if x0u.shape[0] == 0: # All variables were fixed. quit immediately, setting the # appropriate parameters, then return. # undo the variable transformations into the original space x = xtransform(x0u, params) # final reshape x = x.reshape(xsize) # stuff fval with the final value fval = params['fun'](x, *params['args']) # minimize was not called output = {'success': False} output['x'] = x output['iterations'] = 0 output['funcount'] = 1 output['algorithm'] = method output[ 'message'] = 'All variables were held fixed by the applied bounds' output['status'] = 0 # return with no call at all to fminsearch return output # Check for an outputfcn. If there is any, then substitute my # own wrapper function. # Use a nested function as the OutputFcn wrapper def outfun_wrapper(x, **kwargs): # we need to transform x first xtrans = xtransform(x, params) # then call the user supplied OutputFcn stop = params['OutputFcn'](xtrans, **kwargs) return stop if 'OutputFcn' in options: if options['OutputFcn'] is not None: params['OutputFcn'] = options['OutputFcn'] options['OutputFcn'] = outfun_wrapper # now we can call minimize, but with our own # intra-objective function. res = minimize(intrafun, x0u, args=params, method=method, options=options) #[xu,fval,exitflag,output] = fminsearch(@intrafun,x0u,options,params); # get function value: fval = intrafun(res['x'], params) # undo the variable transformations into the original space x = xtransform(res['x'], params) # final reshape x = x.reshape(xsize) res['fval'] = fval res['x'] = x #overwrite x in res to unconstrained format if x0_keys is None: res['x_final'] = res['x'] else: res['x_final'] = vec_to_dict(vec=res['x'], vsize=x0_vsize, keys=x0_keys) return res
def render_image(img = None, spd = None, rfl = None, out = 'img_hyp', \ refspd = None, D = None, cieobs = _CIEOBS, \ cspace = 'ipt', cspace_tf = {},\ k_neighbours = 4, show = True, verbosity = 0, show_ref_img = True,\ stack_test_ref = 12,\ write_to_file = None): """ Render image under specified light source spd. Args: :img: | None or str or ndarray with uint8 rgb image. | None load a default image. :spd: | ndarray, optional | Light source spectrum for rendering :rfl: | ndarray, optional | Reflectance set for color coordinate to rfl mapping. :out: | 'img_hyp' or str, optional | (other option: 'img_ren': rendered image under :spd:) :refspd: | None, optional | Reference spectrum for color coordinate to rfl mapping. | None defaults to D65 (srgb has a D65 white point) :D: | None, optional | Degree of (von Kries) adaptation from spd to refspd. :cieobs: | _CIEOBS, optional | CMF set for calculation of xyz from spectral data. :cspace: | 'ipt', optional | Color space for color coordinate to rfl mapping. :cspace_tf: | {}, optional | Dict with parameters for xyz_to_cspace and cspace_to_xyz transform. :k_neighbours: | 4 or int, optional | Number of nearest neighbours for reflectance spectrum interpolation. | Neighbours are found using scipy.cKDTree :show: | True, optional | Show images. :verbosity: | 0, optional | If > 0: make a plot of the color coordinates of original and rendered image pixels. :show_ref_img: | True, optional | True: shows rendered image under reference spd. False: shows original image. :write_to_file: | None, optional | None: do nothing, else: write to filename(+path) in :write_to_file: :stack_test_ref: | 12, optional | - 12: left (test), right (ref) format for show and imwrite | - 21: top (test), bottom (ref) | - 1: only show/write test | - 2: only show/write ref | - 0: show both, write test Returns: :returns: | img_hyp, img_ren, | ndarrays with hyperspectral image and rendered images """ # Get image: #imread = lambda x: plt.imread(x) #matplotlib.pyplot if img is not None: if isinstance(img, str): img = plt.imread(img) # use matplotlib.pyplot's imread else: img = plt.imread(_HYPSPCIM_DEFAULT_IMAGE) # Convert to 2D format: rgb = img.reshape(img.shape[0] * img.shape[1], 3) * 1.0 # *1.0: make float rgb[rgb == 0] = _EPS # avoid division by zero for pure blacks. # Get unique rgb values and positions: rgb_u, rgb_indices = np.unique(rgb, return_inverse=True, axis=0) # get Ref spd: if refspd is None: refspd = _CIE_ILLUMINANTS['D65'].copy() # Convert rgb_u to xyz and lab-type values under assumed refspd: xyz_wr = spd_to_xyz(refspd, cieobs=cieobs, relative=True) xyz_ur = colortf(rgb_u, tf='srgb>xyz') # Estimate rfl's for xyz_ur: rfl_est, xyzri = xyz_to_rfl(xyz_ur, rfl = rfl, out = 'rfl_est,xyz_est', \ refspd = refspd, D = D, cieobs = cieobs, \ cspace = cspace, cspace_tf = cspace_tf,\ k_neighbours = k_neighbours, verbosity = verbosity) # Get default test spd if none supplied: if spd is None: spd = _CIE_ILLUMINANTS['F4'] # calculate xyz values under test spd: xyzti, xyztw = spd_to_xyz(spd, rfl=rfl_est, cieobs=cieobs, out=2) # Chromatic adaptation from test spd to refspd: if D is not None: xyzti = cat.apply(xyzti, xyzw1=xyztw, xyzw2=xyz_wr, D=D) # Convert xyzti under test spd to srgb: rgbti = colortf(xyzti, tf='srgb') / 255 # Reconstruct original locations for rendered image rgbs: img_ren = rgbti[rgb_indices] img_ren.shape = img.shape # reshape back to 3D size of original # For output: if show_ref_img == True: rgb_ref = colortf(xyzri, tf='srgb') / 255 img_ref = rgb_ref[rgb_indices] img_ref.shape = img.shape # reshape back to 3D size of original img_str = 'Rendered (under ref. spd)' img = img_ref else: img_str = 'Original' img = img / 255 if (stack_test_ref > 0) | show == True: if stack_test_ref == 21: img_original_rendered = np.vstack( (img_ren, np.ones((4, img.shape[1], 3)), img)) img_original_rendered_str = 'Rendered (under test spd)\n ' + img_str elif stack_test_ref == 12: img_original_rendered = np.hstack( (img_ren, np.ones((img.shape[0], 4, 3)), img)) img_original_rendered_str = 'Rendered (under test spd) | ' + img_str elif stack_test_ref == 1: img_original_rendered = img_ren img_original_rendered_str = 'Rendered (under test spd)' elif stack_test_ref == 2: img_original_rendered = img img_original_rendered_str = img_str elif stack_test_ref == 0: img_original_rendered = img_ren img_original_rendered_str = 'Rendered (under test spd)' if write_to_file is not None: # Convert from RGB to BGR formatand write: #print('Writing rendering results to image file: {}'.format(write_to_file)) with warnings.catch_warnings(): warnings.simplefilter("ignore") imsave(write_to_file, img_original_rendered) if show == True: # show images using pyplot.show(): plt.figure() plt.imshow(img_original_rendered) plt.title(img_original_rendered_str) plt.gca().get_xaxis().set_ticklabels([]) plt.gca().get_yaxis().set_ticklabels([]) if stack_test_ref == 0: plt.figure() plt.imshow(img_str) plt.title(img_str) plt.axis('off') if 'img_hyp' in out.split(','): # Create hyper_spectral image: rfl_image_2D = rfl_est[ rgb_indices + 1, :] # create array with all rfls required for each pixel img_hyp = rfl_image_2D.reshape(img.shape[0], img.shape[1], rfl_image_2D.shape[1]) # Setup output: if out == 'img_hyp': return img_hyp elif out == 'img_ren': return img_ren else: return eval(out)