def hsv2rgbArray(hsv): """ Transform an hsv image to an rgb array :param hsv: the input image. can be pil image or numpy array :return rgb: the output array This comes from scikit-image: https://github.com/scikit-image/scikit-image/blob/master/skimage/color/colorconv.py """ hsv = numpyArray(hsv) if len(hsv.shape) == 2: # this is a black and white image, so treat it as value, with zero hue and saturation hs = np.empty((hsv.shape)) return np.dstack((hs, hs, hsv)) hi = np.floor(hsv[:, :, 0] * 6) f = hsv[:, :, 0] * 6 - hi p = hsv[:, :, 2] * (1 - hsv[:, :, 1]) q = hsv[:, :, 2] * (1 - f * hsv[:, :, 1]) t = hsv[:, :, 2] * (1 - (1 - f) * hsv[:, :, 1]) v = hsv[:, :, 2] hi = np.dstack([hi, hi, hi]).astype(np.uint8) % 6 out = np.choose(hi, [ np.dstack((v, t, p)), np.dstack((q, v, p)), np.dstack((p, v, t)), np.dstack((p, q, v)), np.dstack((t, p, v)), np.dstack((v, p, q)) ]) return out
def rgb2cmykArray(rgb): """ Takes [[[r,g,b]]] colors in range 0..1 Returns [[[c,m,y,k]]] in range 0..1 """ #k=rgb.sum(-1) c = 1.0 - rgb[:, :, 0] m = 1.0 - rgb[:, :, 1] y = 1.0 - rgb[:, :, 2] minCMY = np.dstack((c, m, y)).min(-1) c = (c - minCMY) / (1.0 - minCMY) m = (m - minCMY) / (1.0 - minCMY) y = (y - minCMY) / (1.0 - minCMY) k = minCMY return np.dstack((c, m, y, k))
def convolve(img, matrix, add: float = 0, divide: float = 1, edge: str = 'clamp'): """ run a given convolution matrix :param matrix: can be a numerical matrix or an entry in CONVOLVE_MATTRICES """ if isinstance(matrix, str): if matrix.find(',') > 0: matrix = ''.join(matrix.split()) if matrix.startswith('[['): matrix = matrix[1:-1] matrix = [[float(col) for col in row.split(',')] for row in matrix[1:-1].replace('],', '').split('[')] else: matrix = CONVOLVE_MATTRICES[matrix] size = len(matrix) border = size / 2 - 1 #img=imageBorder(img,border,edge) #k=ImageFilter.Kernel((size,size),matrix,scale=divide,offset=add) #img=img.filter(k) img = numpyArray(img) if len(img.shape) > 2: ret = [] for ch in range(img.shape[2]): ret.append(ndimage.convolve(img[:, :, ch], matrix)) img = np.dstack(ret) else: img = ndimage.convolve(img, matrix) return img
def normalMapFromImage(img, zPower=0.33): """ Attempt to get a normal map from an image using edge detection. :param img: the image to create a normal map for :param zPower: percent of z estimation to allow (keep it low because this is always a total guess) NOTE: This is a common approach, but it is only an approximation, not actual height data. It will only give you something that may or may not look 3d. The real way to do this is to directly measure the height data directly using something like a kinect or 3d digitizer. An acceptable alternative is a multi-angle photo stitcher such as 123D Catch. In fact, if all you have is a 2d image, something like AwesomeBump would be more versitile. https://github.com/kmkolasinski/AwesomeBump See also: https://github.com/RobertBeckebans/gimp-plugin-insanebump """ img = normalize(grayscale(img)) x = scipy.ndimage.sobel(img, 1) y = scipy.ndimage.sobel(img, 0) mag = np.hypot(x, y) z = normalize(scipy.ndimage.distance_transform_edt(mag)) * zPower + ( 0.5 - zPower / 2) return np.dstack((x, y, z))
def cmyk2rgbArray(cmyk): """ Takes [[[c,m,y,k]]] colors in range 0..1 Returns [[[r,g,b]]] in range 0..1 """ r = 1 - (min(1, cmyk[:, :, 0] * (1 - cmyk[:, :, 3]) + cmyk[:, :, 3])) g = 1 - (min(1, cmyk[:, :, 1] * (1 - cmyk[:, :, 3]) + cmyk[:, :, 3])) b = 1 - (min(1, cmyk[:, :, 2] * (1 - cmyk[:, :, 3]) + cmyk[:, :, 3])) return np.dstack((r, g, b))
def setAlpha(image,alpha): """ sets the alpha mask regardless of image type :param image: the image to be changed :param mask: a mask image to use to cut out the image it can be: image with alpha channel to steal -or- grayscale (white=opaque, black=transparent) :returns: adjusted image (could be PIL image or numpy array, depending on what's expedient. If you need a particular one, wrap the call in pilImage() or numpyArray()) NOTE: if alpha channel exists, will be darkened such that a hole in either mask results in a hole IMPORTANT: the image bits may be altered. To prevent this, set image.immutable=True """ if image is None or alpha is None: return image if isinstance(image,Image.Image): # make sure not to smash any bits we're keeping if hasattr(image,'immutable') and image.immutable: image=image.copy() if imageMode(alpha)!='L': alpha=getAlpha(alpha,alwaysCreate=False) # make sure we have a grayscale to combine if alpha is None: return image image,alpha=resizing.makeSameSize(image,alpha,(0,0,0,0)) if hasAlpha(image): channels=np.asarray(image) alpha1=np.minimum(channels[:,:,-1],alpha) # Darken blend mode image=np.dstack((channels[:,:,0:-1],alpha1)) else: if isinstance(image,Image.Image): if not isinstance(alpha,Image.Image): alpha=pilImage(alpha) image.putalpha(alpha) else: if isinstance(alpha,Image.Image): alpha=np.asarray(alpha) image=np.dstack((channels[:,:,0:-1],alpha1)) return image
def _blendArray(front, back, fn, opacity: float = 1.0): """ :param fn: represents the B function from from the adobe blend modes documentation. It takes two parameters, Cb - the background pixels, and Cs - the source pixels The documentation originally appeared http://www.adobe.com/devnet/pdf/pdfs/blend_modes.pdf Copy included in source. (TODO: remove for copyright reasons?) :param opacity: blend mode opacity NOTE: always creates new image """ shift = 255.0 useOpacity = False # find some common ground w = max(front.width, back.width) h = max(front.height, back.height) if front.width < w or front.height < h: front = extendImageCanvas(front, (0, 0, w, h)) if back.width < w or back.height < h: back = extendImageCanvas(back, (0, 0, w, h)) mode = maxMode(front, back) if front.mode != mode: front = front.convert(mode) if back.mode != mode: back = back.convert(mode) # convert to array front = np.asarray(front) / shift back = np.asarray(back) / shift # calculate the alpha channel comp_alpha = np.maximum( np.minimum(front[:, :, 3], back[:, :, 3]) * opacity, shift) new_alpha = front[:, :, 3] + (1.0 - front[:, :, 3]) * comp_alpha np.seterr(divide='ignore', invalid='ignore') alpha = comp_alpha / new_alpha alpha[alpha == np.NAN] = 0.0 # blend the pixels combined = fn(front[:, :, :3], back[:, :, :3]) * shift combined = np.clip(combined, 0.0, 255.0) # clean up and reassemble #ratio_rs = final = np.reshape( combined, [combined.shape[0], combined.shape[1], combined.shape[2]]) #final = combined * ratio_rs + front[:, :, :3] * (1.0 - ratio_rs) if useOpacity: final = np.dstack((final, alpha)) # convert back to PIL image if useOpacity: final = Image.fromarray(final.astype('uint8'), mode) else: final = Image.fromarray(final.astype('uint8'), 'RGB') return final
def toWavelet(img,wavelet='haar',mode='symmetric',level=None): """ :param img: any supported image type to transform into wavelet space :param wavelet: any common, named wavelet, including 'Haar' (default) 'Daubechies' 'Symlet' 'Coiflet' 'Biorthogonal' 'ReverseBiorthogonal' 'DiscreteMeyer' 'Gaussian' 'MexicanHat' 'Morlet' 'ComplexGaussian' 'Shannon' 'FrequencyBSpline' 'ComplexMorlet' or a custom [ [lowpass_decomposition], [highpass_decomposition], [lowpass_reconstruction], [highpass_reconstruction] ] where each is a pair of floating point values :param mode: str or 2-tuple of str, optional Signal extension mode, see Modes (default: "symmetric"). This can also be a tuple containing a mode to apply along each axis in axes. :param level: int, optional Decomposition level (must be >= 0). If level is None (default) then it will be calculated using the dwt_max_level function. See also: https://pywavelets.readthedocs.io/en/latest/ref/index.html """ if mode is None: mode='symmetric' img=numpyArray(img) colorMode=imageMode(img) if len(colorMode)==1: return pywt.wavedec2(img,_wavelet(wavelet),mode,level) ret=[] for ch in range(len(colorMode)): ret.append(np.array(pywt.wavedec2(img[:,:,ch],_wavelet(wavelet),mode,level))) ret=np.dstack(ret) return ret
def generalBlend(topImage, mathStr, botImage, opacity=1.0, position=(0, 0), resize=True): """ mathstr - operators: basic math symbols ()*/%-+&| comparison operators == <= >= && || != comma as combining operator functions: abs() sqrt() pow() min() max() count() sum() sin() cos() tan() if(condition,then,else) images: top, bot channel: RGB, CMYK, HSV, A ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Notes: * Case sensitive * All values are percent values from 0..1 * After this operation, values will be cropped to 0..1 * Functions have two modes. They work between two channels if two given. If one given, they work on all values of that channel. ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Examples: RGB=top.RGB/bottom.RGB ... simple divide blend mode RGB=min(topRGB,bottomRGB) ... min blend mode RGB=(topRGB-min(topRGB)) ... use min in the other mode to normalize black values A=1-topV ... extract inverted black levels to alpha channel RGBA=topRGB/bottomRGB,1-topV ... use commas to specify different operations for different channels """ shift = 255.0 mathStr = mathStr.replace('\n', '').replace(' ', '').replace('\t', '') resultForm, equation = mathStr.split('=', 1) # find some common ground w = max(topImage.width, botImage.width) h = max(topImage.height, botImage.height) if topImage.width < w or topImage.height < h: topImage = extendImageCanvas(topImage, (0, 0, w, h)) if botImage.width < w or botImage.height < h: botImage = extendImageCanvas(botImage, (0, 0, w, h)) mode = 'RGBA' #maxMode(topImage,botImage) if topImage.mode != mode: topImage = topImage.convert(mode) if botImage.mode != mode: botImage = botImage.convert(mode) # convert to arrays topRGBA = np.asarray(topImage) / shift for tag in 'HSV': if equation.find('top' + tag) >= 0: topHSV = rgb2hsvArray(topRGBA) break for tag in 'CMYK': if equation.find('top' + tag) >= 0: topCMYK = rgb2cmykArray(topRGBA) break botRGBA = np.asarray(botImage) / shift for tag in 'HSV': if equation.find('bottom' + tag) >= 0: botHSV = rgb2hsvArray(botRGBA) break for tag in 'CMYK': if equation.find('bottom' + tag) >= 0: botCMYK = rgb2cmykArray(botRGBA) break # convert the equation into python code import re tokenizer = re.compile(r"([!<>=|&]+|[,()%*-+/])") equation = tokenizer.split(equation) replacements = { 'min': 'np.minimum', 'max': 'np.maximum', 'abs': 'np.abs', 'sqrt': 'np.sqrt', 'pow': 'np.pow', 'count': 'np.count', 'sum': 'np.sum', 'sin': 'np.sin', 'cos': 'np.cos', 'tan': 'np.tan', 'if': 'np.where', 'top.RGBA': 'topRGBA[:,:,:]', 'top.RGB': 'topRGBA[:,:,:3]', 'top.R': 'topRGBA[:,:,0]', 'top.G': 'topRGBA[:,:,1]', 'top.B': 'topRGBA[:,:,2]', 'top.A': 'topRGBA[:,:,3]', 'top.CMYK': 'topCMYK[:,:,:]', 'top.CMY': 'topCMYK[:,:,:3]', 'top.C': 'topCMYK[:,:,0]', 'top.M': 'topCMYK[:,:,1]', 'top.Y': 'topCMYK[:,:,2]', 'top.K': 'topCMYK[:,:,3]', 'top.HSV': 'topHSV[:,:,:]', 'top.H': 'topHSV[:,:,0]', 'topS': 'topHSV[:,:,1]', 'topV': 'topHSV[:,:,2]', 'bottom.RGBA': 'bottomRGBA[:,:,:]', 'bottom.RGB': 'bottomRGBA[:,:,:3]', 'bottom.R': 'bottomRGBA[:,:,0]', 'bottom.G': 'bottomRGBA[:,:,1]', 'bottom.B': 'bottomRGBA[:,:,2]', 'bottom.A': 'bottomRGBA[:,:,3]', 'bottom.CMYK': 'bottomCMYK[:,:,:]', 'bottom.CMY': 'bottomCMYK[:,:,:3]', 'bottom.C': 'bottomCMYK[:,:,0]', 'bottom.M': 'bottomCMYK[:,:,1]', 'bottom.Y': 'bottomCMYK[:,:,2]', 'bottom.K': 'bottomCMYK[:,:,3]', 'bottom.HSV': 'bottomHSV[:,:,:]', 'bottom.H': 'bottomHSV[:,:,0]', 'bottom.S': 'bottomHSV[:,:,1]', 'bottom.V': 'bottomHSV[:,:,2]', } for i, val in enumerate(equation): if val and val[0] not in r'0123456789,()%*-+/!<>=|&': if val not in replacements: raise Exception('ERR: illegal value in equation "' + val + '"') equation[i] = replacements[val] equation = '(' + (''.join(equation)) + ')' # run the operation and join the results with dstack() final = None for channelSet in eval(equation): if final is None: final = channelSet else: final = np.dstack((final, channelSet)) # convert to RGB colorspace if necessary if resultForm == 'HSV': final = hsv2rgbArray(final) elif resultForm == 'CMYK': final = cmyk_to_rgb(final) final = final * shift # if alpha channel was missing, add one if len(final[0][1]) < 4: # calculate the alpha channel comp_alpha = np.maximum( np.minimum(topRGBA[:, :, 3], botRGBA[:, :, 3]) * opacity, shift) new_alpha = topRGBA[:, :, 3] + (1.0 - topRGBA[:, :, 3]) * comp_alpha np.seterr(divide='ignore', invalid='ignore') alpha = comp_alpha / new_alpha alpha[alpha == np.NAN] = 0.0 # blend the pixels combined = final combined = np.clip(combined, 0.0, 255.0) # clean up and reassemble #ratio_rs= final = np.reshape( combined, [combined.shape[0], combined.shape[1], combined.shape[2]]) #final=combined*ratio_rs+topRGBA[:,:,:3]*(1.0-ratio_rs) final = np.dstack((final, alpha)) # convert the final result back into a PIL image final = Image.fromarray(final.astype('uint8'), mode) return final
def _color(bottom, top): # TODO: Very close, but seems to lose some blue values compared to gimp bottomH = rgb2hsvArray(bottom) topH = rgb2hsvArray(top) return hsv2rgbArray(np.dstack((bottomH[:, :, :2], topH[:, :, 2])))
def _value(bottom, top): bottomH = rgb2hsvArray(bottom) topH = rgb2hsvArray(top) return hsv2rgbArray(np.dstack((topH[:, :, :2], bottomH[:, :, 2])))
def _saturation(bottom, top): bottomH = rgb2hsvArray(bottom) topH = rgb2hsvArray(top) return hsv2rgbArray( np.dstack((topH[:, :, 0], bottomH[:, :, 1], topH[:, :, 2])))
def _hue(bottom, top): bottomH = rgb2hsvArray(bottom) topH = rgb2hsvArray(top) return hsv2rgbArray(np.dstack((bottomH[:, :, 0], topH[:, :, 1:])))