def __init_image(self, image, image_offset=(0, 0), image_reference=None, **kwargs): self._image = None self._drawshape = None if image is not None: # Get all image parameters img_kwargs = {} for k, v in kwargs.items(): if k.startswith('image_'): img_kwargs[k[6:]] = v if isinstance(image, str): image = Image(image, self.pos, **img_kwargs) else: raise NotImplementedError self._image = image offset = asvector(image_offset) if image_reference in ['pos_ne', 'pos_nw', 'pos_se', 'pos_sw', 'pos_left', 'pos_right', 'pos_up', 'pos_down']: pos_ref = getattr(self, image_reference) pos_img_ref = getattr(image, image_reference) offset += pos_ref - pos_img_ref elif image_reference not in ['middle', None]: raise ValueError( 'invalid image reference: %r' % image_reference) ImageChops.offset(image.texture._pil, int(offset[0]), int(offset[1])) else: self._drawshape = self._init_drawshape(color=self.color or black, linecolor=self.linecolor, linewidth=self.linewidth)
def trim(_directory, legend = True): os.chdir(_directory) for subdir, dirs, files in os.walk(_directory): for fn in files: fn = os.path.join(subdir, fn) if os.path.splitext(fn)[1] == '.png': im = Image.open(fn) bg = Image.new(im.mode, im.size, im.getpixel((0,0))) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() im = im.crop(bbox) data = np.asarray(im) if legend == True and (np.sum(data[::, data.shape[1] - 1,0]) > 255*1*data.shape[0]*.1 and \ np.sum(data[::, data.shape[1] - 1,0]) < 255*1*data.shape[0]*.75) or \ np.sum(data[::, data.shape[1] - 1,0]) == 0 or \ re.search("TAT", os.path.splitext(fn)[0]): i = data.shape[1] - 1 while np.sum(data[::,i,::]) <> 255*data.shape[2]*data.shape[0]: i = i - 1 while np.sum(data[::,i,::]) == 255*data.shape[2]*data.shape[0]: i = i - 1 w, h = im.size im = im.crop((0,0,i,h)) im.save(os.path.splitext(fn)[0] + "_cut" + os.path.splitext(fn)[1])
def trim(im, color): bg = Image.new(im.mode, im.size, 0) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: return color.crop(bbox)
def trim(origin_im, blur=True, pre_percentage=PERCENTAGE_TO_CROP_SCAN_IMG, upper_lower_cut=True): im = crop_by_percentage(origin_im, pre_percentage) if blur: im_blurred = im.filter(ImageFilter.GaussianBlur(radius=2)) bg = Image.new(im_blurred.mode, im_blurred.size, im.getpixel((0, 0))) diff = ImageChops.difference(im_blurred, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() else: bg = Image.new(im.mode, im.size, im.getpixel((0, 0))) diff = ImageChops.difference(im, bg) bbox = diff.getbbox() if bbox: if upper_lower_cut: return im.crop(bbox) else: width, height = im.size bbox = list(bbox) bbox[0] = max(0, bbox[0] - int(width * 0.05)) bbox[1] = 0 bbox[2] = min(width, bbox[2] + int(width * 0.05)) bbox[3] = height return im.crop(bbox) else: # raise Exception("Error while cropping image, there is no bounding box to crop") return im
def getAdjacent(self, provinceID): """ Returns a list of adjacent provinceIDs. """ # get province bounding box provinceColorImage = ImageChops.constant(self.provinceImage, self.provinceColorFromID[provinceID]) mask = ImageChops.invert(neImage(self.provinceImage, provinceColorImage)) xMin, yMin, xMax, yMax = Image.getbbox(mask) # grow box #TODO: wraparound xMin = max(0, xMin - 1) yMin = max(0, yMin - 1) xMax = min(self.provinceImage.size[0]-1, xMax + 1) yMax = min(self.provinceImage.size[1]-1, yMax + 1) box = (xMin, yMin, xMax, yMax) # crop to area mask = mask.crop(box) growFilter = ImageFilter.Kernel((3, 3), (0, 1, 0, 1, 0, 1, 0, 1, 0)) mask = mask.filter(growFilter) provinceColorImage = provinceColorImage.crop(box) blackImage = ImageChops.constant(provinceColorImage, (0, 0, 0)) provinceColorImage = Image.composite(blackImage, provinceColorImage, mask) borderColors = provinceColorImage.getcolors() result = [color for (count, color) in borderColors]
def crop_image(self): try: image = Image.open(StringIO(urllib.urlopen(self.image).read())) except: raise else: image_out = StringIO() border = Image.new(image.mode, image.size, image.getpixel((0, 0))) diff = ImageChops.difference(image, border) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: image = image.crop(bbox) image.save(image_out, 'JPEG') else: raise ValueError('Unable to determine image bounding box') s3 = boto3.resource('s3') region = os.environ.get('AWS_DEFAULT_REGION') bucket = s3.Bucket(os.environ.get('AWS_S3_BUCKET')) key = self.venue.replace(" ", "_") + '-' + self.title_raw.replace(" ", "_") + '-' + self.start.strftime("%y-%m-%d") + '.jpg' try: bucket.put_object(Key = key, Body = image_out.getvalue(), ACL='public-read') except: raise else: self.image = 'http://s3-{}.amazonaws.com/{}/{}'.format(region, bucket.name, key) self.save()
def are_images_equal(img_actual, img_expected, result): """ :param img_actual: the image we want to compare :param img_expected: the base image :param result: path Result image will look black in places where the two images match :return: true if the images are identical(all pixels in the difference image are zero) """ result_flag = False # Check that img_actual exists if not os.path.exists(img_actual): print('Could not locate the generated image: %s' % img_actual) # Check that img_expected exists if not os.path.exists(img_expected): print('Could not locate the baseline image: %s' % img_expected) if os.path.exists(img_actual) and os.path.exists(img_expected): actual = Image.open(img_actual) expected = Image.open(img_expected) result_image = ImageChops.difference(actual, expected) # Where the real magic happens if (ImageChops.difference(actual, expected).getbbox() is None): result_flag = True # code to store the overlay # Result image will look black in places where the two images match color_matrix = ([0] + ([255] * 255)) result_image = result_image.convert('L') result_image = result_image.point(color_matrix) result_image.save(result) # Save the result image return result_flag
def DetectBorder(image): """Determine whether or not an image contains a border. Args: image: A PIL Image object. Returns: A boolean representing whether or not the image has a border. """ image_width, image_height = image.size bg = Image.new(image.mode, image.size, image.getpixel((0, 0))) diff = ImageChops.difference(image, bg) # Adjusting the offset to 115 correctly identified one of the clear cases. # Could potentially make this a flag in order to test thresholds. diff = ImageChops.add(diff, diff, 2.0, -115) bbox = diff.getbbox() return all(( # Ensure that the upper-left bounding box coordinate has no value on the # X- or Y-axis (non-zero). bbox[0], bbox[1], # Ensure that the sum of the upper-left X and lower_right X is less than # the original width. (bbox[0] + bbox[2]) <= image_width, # Ensure that the sum of the upper-left Y and lower_right Y is less than # the original height. (bbox[1] + bbox[3]) <= image_height ))
def trim(im, color): #crop based on the binary image to zoom into the largest area bg = Image.new(im.mode, im.size, 0) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: return color.crop(bbox) #actually returns the cropped image color, not im
def add_images(self, im1, im2): offset= self.findOffset(im1, im2) if offset==(0,0): return ImageChops.add(im1, im2) else: return ImageChops.add(im1, ImageChops.offset(im2,offset[0], offset[1]))
def trim(im): bg = Image.new(im.mode, im.size, im.getpixel((0,0))) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 50.0, -1) bbox = diff.getbbox() if bbox: return im.crop(bbox)
def CompositeTiles(self, key): """Composite together all the tiles in this cell into a single image.""" composite = None numLayers = len(self.tiles[key]) numOverlays = len(self.overlays) #if numLayers > numOverlays: # print 'numLayers = ' + str(numLayers) # print 'numOverlays = ' + str(numOverlays) for layer in sorted(self.tiles[key]): image = self.tiles[key][layer] if not composite: composite = image.copy() # Create output image buffer else: #composite = Image.blend(composite, image, self.overlays[layer].opacity)#composite.paste(image, (0, 0), image) #if layer >= len(self.overlays): # print 'Error coming!' # print key try: composite.paste(image, (0, 0), ImageChops.multiply(image.split()[3], ImageChops.constant(image, int(self.overlays[layer].opacity * 255)))) except: # TODO: Why do we get errors here after deleting overlays? pass #print 'CompositeTiles Exception caught!' #print image.split() #print layer #print self.overlays #print '========================' return composite
def chops_filter(img): chops = ["invert","offset"] ch = random.choice(chops) if ch == "invert": return ImageChops.invert(img) if ch == "offset": return ImageChops.offset(img,random.randint(0,img.size[0]))
def meteorStack(): files = glob.glob("./IMG*.*.JPG") files.sort() finalimage = Image.open(files[0]) initialFrames = 20 for i in range(0,initialFrames): print str(i) + " of " + str(initialFrames) currentimage=Image.open("./"+files[i]) finalimage=ImageChops.lighter(finalimage, currentimage) filename = "stackmax" + str(i).zfill(3) +".jpg" finalimage.save(filename,"JPEG") start = initialFrames stop = initialFrames * 2 for i in range(initialFrames, len(files)): finalimage = Image.open(files[i]) print 'start stop: ' + str(start) + ' ' + str(stop) for j in range(start, stop): print str(j) # currentimage=Image.open("./"+files[start-j]) currentimage = np.asarray(Image.open("./"+files[start-j])) currentimage = currentimage.astype('uint32') # currentimage = currentimage/j currentimage = Image.fromarray(currentimage.astype('uint8')) finalimage=ImageChops.lighter(finalimage, currentimage) filename = "stackmax" + str(i).zfill(3) +".jpg" finalimage.save(filename,"JPEG") start += 1 stop = start + initialFrames print 'havent quite got start stop and the iteration direction correct yet!'
def makeDiffImages(self, aDestDir): diffBands = list(self._getDiff().split()) assert (len(diffBands) == 3 or len(diffBands) == 1) diffs = {} baseDiffName = "Diff_" + self.source + "_" + self.targetImage # Invert the diffs. for i in range(len(diffBands)): #for i in range(4): diffBands[i] = ImageChops.invert(diffBands[i]) temp = ["R", "G", "B"] for i in range(len(diffBands)): name = temp[i] + baseDiffName # Highlight the differing pixels if not self.PYRAMID_DIFF in self.diffsInUse and not self.DIFF_SCORE in self.diffsInUse: diffBands[i] = Image.eval(diffBands[i], lambda x: (x / (255 - self.PIXEL_DIFF_PASS_LIMIT)) * 255) # Following line commented as we don't need to save bitmaps for the separate R,G or B channels. #diffBands[i].save(aDestDir + name, "BMP") diffs[temp[i]] = name if len(diffBands) == 3: rgbDiff = ImageChops.darker(diffBands[0], ImageChops.darker(diffBands[1], diffBands[2])) else: rgbDiff = diffBands[0] rgbDiffName = "RGB" + baseDiffName rgbDiff.save(aDestDir + rgbDiffName, "BMP") diffs["RGB"] = rgbDiffName self.diffImages = diffs return diffs
def diff_images(self, im1, im2): offset= self.findOffset(im1, im2) if offset==(0,0): return ImageChops.difference(im1, im2) else: return ImageChops.difference(im1, ImageChops.offset(im2,offset[0], offset[1]))
def autocrop_image(inputfilename, outputfilename = None, color = 'white', newWidth = None, doShow = False ): im = Image.open(inputfilename) try: # get hex colors rgbcolor = hex_to_rgb( color ) except Exception: if color not in _all_possible_colornames: raise ValueError("Error, color name = %s not in valid set of color names.") rgbcolor = webcolors.name_to_rgb(color) bg = Image.new(im.mode, im.size, rgbcolor) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: cropped = im.crop(bbox) if newWidth is not None: height = int( newWidth * 1.0 / cropped.size[0] * cropped.size[1] ) cropped = cropped.resize(( newWidth, height )) if outputfilename is None: cropped.save(inputfilename) else: cropped.save(os.path.expanduser(outputfilename)) if doShow: cropped.show( ) return True else: return False
def _apply_rad_mask(self, kaleidoscope_image): msf = self.mask_size_factor mbl = self.mask_blur mstr = self.mask_strength # generate mask and fill white filter = Image.new(kaleidoscope_image.mode, kaleidoscope_image.size) draw = ImageDraw.Draw(filter) draw.rectangle([(0, 0), kaleidoscope_image.size], fill=(255, 255, 255, 255)) # draw filled circle f_size = (1 - msf) / 2 size = min(kaleidoscope_image.size) low = size * f_size high = size - (size * f_size) grey_val = int((1 - mstr) * 255) grey_col = (grey_val, grey_val, grey_val, 255) draw.pieslice([(low, low), (high, high)], 0, 360, fill=grey_col) # blur, apply and return filter = filter.filter(ImageFilter.GaussianBlur(radius=mbl)) enh = ImageEnhance.Contrast(kaleidoscope_image) im_contrast = enh.enhance(0.7) kaleidoscope_image = ImageChops.darker(im_contrast, kaleidoscope_image) multiplied_image = ImageChops.multiply(kaleidoscope_image, filter) return ImageChops.multiply(multiplied_image, kaleidoscope_image)
def show_heat_map(image, patches, color, output_filename): scale = 4 image_dim = 200 * scale im = image.convert("RGB") if max(im.size) != image_dim: if im.size[0] > im.size[1]: new_height = (image_dim * im.size[1]) / im.size[0] new_dim = (image_dim, new_height) else: new_width = (image_dim * im.size[0]) / im.size[1] new_dim = (new_width, image_dim) print 'Resizing image from (%d, %d) to (%d, %d).' % (im.size[0], im.size[1], new_dim[0], new_dim[1]) im = im.resize(new_dim, Image.ANTIALIAS) base_c = 40 heatmap = Image.new("RGB", im.size, (base_c, base_c, base_c)) draw = ImageDraw.Draw(heatmap, "RGBA") coeff = 2 * (255 - base_c) / len(patches) coeff = max(coeff, 160) for patch in patches: pos = patch[0] * scale size = patch[1] * scale endPos = pos + size draw.rectangle([pos[0], pos[1], endPos[0], endPos[1]], fill=(255, 255, 255, coeff)) #draw.rectangle([pos[0], pos[1], endPos[0], endPos[1]], outline=color) #im.show() ImageChops.multiply(im, heatmap).save(output_filename, "jpeg") print "Saved image " + output_filename
def write_image(image): """Writes a 1024x1280 PIL.Image object to EPD format.""" config = load_config() clean_output_dir() output_path = os.path.join(config.get("output_dir"), "out.epd") panel_header_data = [ 0x3D, # panel_type 0x0400, # x_res 0x0500, # y_res 0x01, # color_depth 0x00 # pixel_data_format_type ] panel_header_data_reserved_filler = 0x00 # RFU - 9 bits with open(output_path, "bw") as epdfile: bits = panel_header_data + [panel_header_data_reserved_filler for i in range(9)] header = struct.pack( 'b h h b b b b b b b b b b b', *bits ) payload = header + image.tobytes() try: epdfile.write(payload) ImageChops.invert(image).save(os.path.join("output", "preview.png")) except (IOError, OSError): print("Error writing output image to epd file.") raise
def subtract_grayscale(img, threshold=20): # Threshold ~= max( max(R,G,B) - min(R,G,B) ) of grays. # For a mapimage (high quality image), threshold of 14 seems to work # well. # A value of 17 seems good and conservative for an indoor iPhoto. # While 20, or 22, should be the max threshold even for poor quality # photos # split the image into individual bands source = img.split() R, G, B, A = 0, 1, 2, 3 # Create gray subtraction mask using "difference" and "point" methods. # The function given in the point method sets all pixel values # satisfying the expression (i <= threshold) to 255, else 0. maskRG = ImageChops.difference( source[R], source[G]).point(lambda i: i <= threshold and 255) maskGB = ImageChops.difference( source[G], source[B]).point(lambda i: i <= threshold and 255) maskBR = ImageChops.difference( source[B], source[R]).point(lambda i: i <= threshold and 255) maskRGB = ImageChops.multiply(maskRG, maskGB) maskRGBR = ImageChops.multiply(maskRGB, maskBR) # Paste transparent white into gray areas of original image, as # represented by mask img.paste((255, 255, 255, 0), None, maskRGBR) return img
def __rgbColorFinder(rgbImg, colormin=(0,0,0), colormax=(255,255,255), allbands=1, rmode='1'): '''analyzes an RGB image, returns an image of the same size where each pixel is WHITE if the pixel in rgbImage MATCHES the color range colormin-to-colormax, or BLACK if the pixel in rgbImage DOES NOT MATCH the color range. a pixel is MATCHES the color range if allbands!=0 and if for EVERY color pixel[i], colormin[i]<=pixel[i] and pixel[i]<=colormax[i], or if allbands==0 and if for ANY color pixel[i], colormin[i]<=pixel[i] and pixel[i]<=colormax[i]. rmode determines the mode of the returned image ("1", "L" or "RGB") ''' rgbImg.load() inbands = rgbImg.split() outbands = [] for srcband, cmin, cmax in zip(inbands, colormin, colormax): outbands.append(srcband.point(lambda v1, v2=cmin, v3=cmax: v2<=v1 and v1<=v3 and 255)) if allbands==0: tband = ImageChops.lighter(ImageChops.lighter(outbands[0], outbands[1]), outbands[2]) else: tband = ImageChops.darker(ImageChops.darker(outbands[0], outbands[1]), outbands[2]) if rmode=='L': return tband elif rmode=='RGB': return Image.merge('RGB', (tband, tband, tband)) # 'RGB' else: # rmode=='1' return tband.convert('1')
def get_adjacent(self, province_id): """ Returns a list of adjacent province_ids. """ # get province bounding box province_color_image = ImageChops.constant(self.province_image, self.province_color_by_id[province_id]) mask = ImageChops.invert(ne_image(self.province_image, province_color_image)) x_min, y_min, x_max, y_max = Image.getbbox(mask) # grow box #TODO: wraparound x_min = max(0, x_min - 1) y_min = max(0, y_min - 1) x_max = min(self.province_image.size[0]-1, x_max + 1) y_max = min(self.province_image.size[1]-1, y_max + 1) box = (x_min, y_min, x_max, y_max) # crop to area mask = mask.crop(box) grow_filter = ImageFilter.Kernel((3, 3), (0, 1, 0, 1, 0, 1, 0, 1, 0)) mask = mask.filter(grow_filter) province_color_image = province_color_image.crop(box) black_image = ImageChops.constant(province_color_image, (0, 0, 0)) province_color_image = Image.composite(black_image, province_color_image, mask) border_colors = province_color_image.getcolors() result = [color for (count, color) in border_colors]
def matchTemplate(searchImage, templateImage): minScore = -1000 matching_xs = 0 matching_ys = 0 # convert images to "L" to reduce computation by factor 3 "RGB"->"L" searchImage = searchImage.convert(mode="L") templateImage = templateImage.convert(mode="L") searchWidth, searchHeight = searchImage.size templateWidth, templateHeight = templateImage.size # make a copy of templateImage and fill with color=1 templateMask = Image.new(mode="L", size=templateImage.size, color=1) #loop over each pixel in the search image for xs in range(searchWidth-templateWidth+1): for ys in range(searchHeight-templateHeight+1): #for ys in range(10): #set some kind of score variable to "All equal" score = templateWidth*templateHeight # crop the part from searchImage searchCrop = searchImage.crop((xs,ys,xs+templateWidth,ys+templateHeight)) diff = ImageChops.difference(templateImage, searchCrop) notequal = ImageChops.darker(diff,templateMask) countnotequal = sum(notequal.getdata()) score -= countnotequal if minScore < score: minScore = score matching_xs = xs matching_ys = ys print "Location=",(matching_xs, matching_ys), "Score=",minScore im1 = Image.new('RGB', (searchWidth, searchHeight), (80, 147, 0)) im1.paste(templateImage, ((matching_xs), (matching_ys))) #searchImage.show() #im1.show() im1.save('template_matched_in_search.png')
def autoCrop(self, image): bg = Image.new(image.mode, image.size, image.getpixel((0,0))) diff = ImageChops.difference(image, bg) diff = ImageChops.add(diff, diff, 2.0, -200) bbox = diff.getbbox() if bbox: return image.crop(bbox)
def merge_stack(images): ''' Merges a series of *images* using ***math***. Parameters ---------- images : iterable of :py:class:`PIL.Image.Image` Image stack Returns ------- :py:class:`PIL.Image.Image` Merged image ''' # Guards if not images: raise ValueError('No images provided') elif len(images) == 1: return images[0] # Convert Image # - compress first #print images[0].histogram() #lut = equalize(images[0].histogram()) # stack up, keeping darkest pixels composite = ImageChops.darker(images[0], images[1]) for im in images[2:]: composite = ImageChops.darker(composite, im) return composite
def main(): # on crée une image à partir de la librairie Image image = Image.new("RGB", (W, H), "black") # on crée la scène lumiere = Lumiere(Vector(-10, -20, 50), 1000000000) origin = Vector(0, 0, 55) fov = 90 * 3.14 / 180 direction = Vector(0, 0, 1).getNormalized up = Vector(0, 1, 0).getNormalized right = direction.cross(up) * (-1) camera = Camera(origin, fov, direction, up, right) materiau_opaque1 = Materiau([1, 0, 0], False, 0) # rouge materiau_opaque2 = Materiau([0, 1, 0], False, 0) # vert materiau_opaque3 = Materiau([0, 0, 1], False, 0) # bleu materiau_opaque4 = Materiau([0, 1, 1], False, 0) # cyan materiau_opaque5 = Materiau([1, 1, 1], False, 0) # blanc materiau_opaque6 = Materiau([1, 1, 0], False, 0) # jaune materiau_reflechissant = Materiau([1, 1, 1], True, 0) materiau_transparent = Materiau([1, 1, 1], False, 1.5) s1 = Sphere(Vector(0, -2, 25), 10, materiau_transparent) s2 = Sphere(Vector(0, 0, 1000), 940, materiau_opaque5) # arrière s3 = Sphere(Vector(0, 0, -1000), 940, materiau_opaque5) # devant s4 = Sphere(Vector(1000, 0, 0), 940, materiau_opaque3) # droite s5 = Sphere(Vector(-1000, 0, 0), 940, materiau_opaque1) # gauche s6 = Sphere(Vector(0, 1000, 0), 990, materiau_opaque6) # dessous s7 = Sphere(Vector(0, -1000, 0), 940, materiau_opaque2) # dessus scene = Scene([s1, s2, s3, s4, s5, s6, s7], lumiere, camera) # variable pour le multiprocessing out_q = Queue() imageProcess = [] imageQuadrant = [] # on lance des process pour chaque quadran for i in xrange(n_quadrants): imageProcess.append(Process(target=scene.getImage, args=(image, n_rebonds, i, out_q, n_echantillons, diffus))) imageProcess[i].start() for i in xrange(n_quadrants): imageQuadrant.append(out_q.get()) for i in xrange(n_quadrants): imageProcess[i].join() # on reconstruit l'image depuis les quadrans image_haute = ImageChops.add(imageQuadrant[0], imageQuadrant[1]) image_basse = ImageChops.add(imageQuadrant[2], imageQuadrant[3]) image = ImageChops.add(image_haute, image_basse) image.show() image.save("image.jpg")
def multiply_image(img, file, amount=1.0): path = "%s%s" % (filter_path, file) filterImg = Image.open(path) if (amount != 1.0): filterImg = ImageChops.add(filterImg, ImageChops.constant(filterImg, (1-amount)*255).convert("RGB")) return ImageChops.multiply(filterImg, img)
def trim(im): """Trim whitespace from image.""" bg = Image.new(im.mode, im.size, im.getpixel((0,0))) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: return im.crop(bbox)
def MakeGlitchGifVSH(image, len_=60, blockSize=16, sigma=10, iterations=300, random_=True, Glitch_=False): im = Image.open(image) VSH = imageio.mimread(os.path.join('IMAGES', 'vsh.gif')) VSH = extendgif(VSH, len_) nFrames = [] glitchVar = 0 path = '/'.join(image.split('/')[:-1]) pathT = '/'.join(image.split('/')[:-1]) name = image.split('/')[-1] fname = name.split('.')[0] path += '/glitch_' + fname + '.gif' frames = [im.copy() for a in range(len_)] i = 0 for frame in frames: i += 1 if random.randint(0, 15) >= 10 and glitchVar == 0: glitchVar = random.randint(1, sigma) if glitchVar != 0: frame = GlitchRet(frame.convert('RGB'), Glitch_=Glitch_, sigma=glitchVar, blockSize=blockSize, iterations=iterations, random_=random_) glitchVar -= 1 frame = ImageChops.multiply( frame, Image.fromarray(VSH[i]).resize(frame.size).convert('RGB')) nFrames.append(np.asarray(frame.convert('RGB'))) i = 0 imageio.mimwrite( path, nFrames, ) return path
def notBlack(img): notblack={} i=0 while i<len(img): x=0 y=0 for pixel in ImageChops.difference(img[i-1], img[i]).getdata(): if greyscale(pixel) >50: if (x,y) in notblack: notblack[(x,y)] = notblack[(x,y)] + 1 else: notblack[(x,y)] = 1 if(x<352): x+=1 else: x=0 y+=1 i+=1 return notblack
def visual_diff(self, name): if vdiffs is not True: return path = os.path.join(self.path, 'selenium') file = os.path.join(path, name) if os.path.exists(file): path = self.take_screenshot(name) original = Image.open(file) current = Image.open(path) diff = ImageChops.difference(original, current) if diff.getbbox(): path = os.path.join(os.path.dirname(path), 'vdiff.' + name) diff.save(path) raise Exception('Visual difference found, saved to ' + path) os.remove(path) else: self.take_screenshot(name, path, False)
def compare_images(image_a, image_b): """! Compare pillow image objects. Returns difference image object if there are differences or None if not. """ diff_count = 0 image_diff = ImageChops.difference(image_a, image_b) nx, ny = image_diff.size for x in range(0, int(nx)): for y in range(0, int(ny)): pixel = image_diff.getpixel((x, y)) if pixel != 0 and pixel != (0, 0, 0, 0) and pixel != (0, 0, 0): print(f"Difference pixel: {pixel}") diff_count += 1 if diff_count: print(f"ERROR: Found {diff_count} differences between images") return image_diff return None
def _compute_diff_box(cls, a, b, round_to=4): ''' Find the four coordinates giving the bounding box of differences between a and b making sure they are divisible by round_to Parameters ---------- a : PIL.Image The first image b : PIL.Image The second image round_to : int The multiple to align the bbox to ''' box = ImageChops.difference(a, b).getbbox() if box is None: return None return cls._round_bbox(box, a.width, a.height, round_to)
def make_thumb(image, size=(80, 80), pad=False): # http://stackoverflow.com/questions/9103257/resize-image- # maintaining-aspect-ratio-and-making-portrait-and-landscape-images-e image.thumbnail(size, Image.BILINEAR) image_size = image.size if pad: thumb = image.crop((0, 0, size[0], size[1])) offset_x = max((size[0] - image_size[0]) / 2, 0) offset_y = max((size[1] - image_size[1]) / 2, 0) thumb = ImageChops.offset(thumb, offset_x, offset_y) else: thumb = ImageOps.fit(image, size, Image.BILINEAR, (0.5, 0.5)) return thumb
def thumb_resize(f_in, f_out, size=(img_width, img_height), pad=False): image = Image.open(f_in) image.thumbnail(size, Image.ANTIALIAS) image_size = image.size if pad: thumb = image.crop((0, 0, size[0], size[1])) offset_x = max((size[0] - image_size[0]) / 2, 0) offset_y = max((size[1] - image_size[1]) / 2, 0) thumb = ImageChops.offset(thumb, offset_x, offset_y) else: thumb = ImageOps.fit(image, size, Image.ANTIALIAS, (0.5, 0.5)) thumb.save(f_out)
def track(somlist): max = 250 for i in range(24, max): path = "./Data/images/tracking/ducks/ducks" + "{0:0=5d}".format( i) + ".png" current = ColoredImage(path) imglist = (ColoredImage(path, channel=0), ColoredImage(path, channel=1), ColoredImage(path, channel=2)) img_compressed = compress_all(somlist, imglist) out = ImageChops.difference(current.im, img_compressed) out = out.convert("L") tile1.set_image(ImageQt(current.im)) tile2.set_image(ImageQt(img_compressed)) tile4.set_image(ImageQt(out)) dnf_update(out) out.save(output_path + "out.png")
def flairAdded(newImage): """Compare fetch of flairs with an existing copy to see if new flairs were added. Args: newImage (Image): picture containing result of request for flairs. Returns: Boolean: result of comparing images. """ # Look for differences between images. diff = ImageChops.difference(newImage, Image.open('original.png')) # New flair was added. if diff.getbbox(): # Overwrite backup with new image. newImage.save('original.png', 'PNG') return True # No difference between the images. else: return False
def __getitem__(self, idx): imgname = os.path.join(self.root_dir, self.files[idx]) image_input = Image.open(imgname) try: image_seg_target = Image.open(imgname.split('.')[0] + '_mask.png') image_seg_target = image_seg_target.convert('1') except: image_seg_target = Image.new(mode='1', size=image_input.size, color=0) image_input = resize(image_input) image_seg_target = resize(image_seg_target) image_input = center_crop(image_input) image_seg_target = center_crop(image_seg_target) image_seg_target = ImageChops.invert(image_seg_target) image_missed_input = self.mask_image(image_input, image_seg_target) image_missed_input = np.array(image_missed_input) for i in range(image_missed_input.shape[0]): for j in range(image_missed_input.shape[1]): #print(image_missed_input[i,j,:]) if (image_missed_input[i, j, :] == [0, 0, 0]).all(): image_missed_input[i, j, :] = [255, 255, 255] image_missed_input = Image.fromarray(image_missed_input) image_seg_target = to_tensor(image_seg_target) image_missed_input = to_tensor(image_missed_input) image_missed_input = normalize(image_missed_input) image_input = to_tensor(image_input) image_input = normalize(image_input) image_seg_target = image_seg_target.expand_as(image_missed_input) sample = { 'input': image_missed_input, 'mask': image_seg_target, 'target': image_input, 'name': self.files[idx] } return sample
def event(self): ''' Determines if there is an event going on in the quad. Based on the color and euclidean distance of two images in the quad :return: True if there is an event, false if otherwise Also displays the test cases. :return The image with the grey square is the baseline from which everything is compared to. The grey was the most common color in a cropped version of the size of the square :return The image with the white square is the case where there is an event ''' while len(self.img_intensity) < 1: pass pxl_coor = (250, 365, 500, 470) img_grey_large = np.asarray(self.img[-1]) img_event = np.asarray(self.img[-1]) img = self.img[-1].crop(pxl_coor) baseline = np.asarray(img) baseline.setflags(write=1) img_grey_large.setflags(write=1) img_event.setflags(write=1) for i in range(len(baseline[1, :])): for j in range(len(baseline[:, i])): baseline[j, i] = [170, 170, 168] for i in range(249, 500): for j in range(365, 470): img_grey_large[j, i] = [170, 170, 168] img_event[j, i] = [255, 255, 255] img_grey = Image.fromarray(baseline, 'RGB') img_grey_large = Image.fromarray(img_grey_large, 'RGB') img_event = Image.fromarray(img_event, 'RGB') img_compare = ImageChops.subtract(img, img_grey) euclidean_dist = mth.sqrt(np.sum(np.array(img_compare.getdata())**2)) img_grey_large.show() img_event.show() if euclidean_dist > 8000: return True else: return False
def UpdateTrainerLevel(): img = GetScreen() TrainerLevelZone = (65, 730, 65 + 45, 730 + 25) img = img.crop((TrainerLevelZone)) img = ImageOps.grayscale(img) HighContrast(img, 220) img = ImageChops.invert(img) NewTrainerLevel = ImgToString(img, "0123456789") try: NewTrainerLevel = int(NewTrainerLevel) if NewTrainerLevel >= 1 and NewTrainerLevel <= 40 and NewTrainerLevel >= GetTrainerLevel( ): SetTrainerLevel(NewTrainerLevel) return True except: pass return False
def compareTwoImages(pathToImgTest, pathToImgSource, mode): global TRIGGER rms = 0 imgTest = Image.open(pathToImgTest) imgSource = Image.open(pathToImgSource) h = ImageChops.difference(imgTest, imgSource).histogram() rms = math.sqrt( reduce(operator.add, map(lambda h, i: h * (i**2), h, range(256))) / (float(imgTest.size[0]) * imgTest.size[1])) imgTest.close() imgSource.close() if (mode == "watching"): return rms elif (mode == "analysis"): if (rms > 19): TRIGGER = TRIGGER + 1 return rms
def get_differences(self, screenshots): from PIL import Image, ImageChops, ImageStat lhs = Image.open(io.BytesIO(base64.b64decode( screenshots[0]))).convert("RGB") rhs = Image.open(io.BytesIO(base64.b64decode( screenshots[1]))).convert("RGB") diff = ImageChops.difference(lhs, rhs) minimal_diff = diff.crop(diff.getbbox()) mask = minimal_diff.convert("L", dither=None) stat = ImageStat.Stat(minimal_diff, mask) per_channel = max(item[1] for item in stat.extrema) count = stat.count[0] self.logger.info( "Found %s pixels different, maximum difference per channel %s" % (count, per_channel)) return per_channel, count
def diff_offset(driver): pic1 = get_image(driver, 'geetest_canvas_bg geetest_absolute') pic2 = get_image(driver, 'geetest_canvas_fullbg geetest_fade geetest_absolute') captcha_bg = Image.open(pic1) captcha = Image.open(pic2) diff = ImageChops.difference(captcha, captcha_bg) im = np.array(diff) width, height = diff.size diff = [] for i in range(height): for j in range(width): # black is not only (0,0,0) if im[i, j, 0] > 15 or im[i, j, 1] > 15 or im[i, j, 1] > 15: diff.append(j) break return min(diff)
def lazybrush(self): (img, result) = lazybrush(self.sketch, self.colors, self.sigmabar.get(), self.lambdabar.get(), self.Kbar.get(), self.uselogsketch.get()) print result.shape mode = "RGBA" if self.showlogsketch.get() else "RGB" self.output_img = Image.fromarray(result, "RGBA").convert(mode) wdt = self.image.width() hgt = self.image.height() if self.showlogsketch.get(): img = Image.fromarray(img, "L") else: img = self.sketch print(wdt, hgt) self.result = ImageChops.multiply(self.output_img, img.convert(mode)) self.resultTk = ImageTk.PhotoImage(self.result) self.resultsprite.create_image((wdt / 2, hgt / 2), image=self.resultTk)
def __init__(self, ref_image, src_image, alpha=False): if isinstance(ref_image, basestring): self.ref_im = Image.open(ref_image) else: self.ref_im = ref_image if isinstance(src_image, basestring): self.src_im = Image.open(src_image) else: self.src_im = src_image # Ignore if not alpha: self.ref_im = self.ref_im.convert('RGB') self.src_im = self.src_im.convert('RGB') self.diff = ImageChops.difference(self.src_im, self.ref_im)
def image_voffset(generator, prob=0.4, max_pixels=5): ''' Generator that vertically offsets an image ''' for inp, out in generator: img = inp[1] if prob < random.uniform(0, 1): image_offset = random.randrange(-max_pixels, max_pixels) dst_im = Image.new( "RGBA", img.size, (ri( 0, 255), ri( 0, 255), ri( 0, 255))) rot = ImageChops.offset(img, 0, image_offset) dst_im.paste(rot) img = dst_im.convert('RGB') yield [inp[0],img], out
def redraw_required(self, image): """ Calculates the difference from the previous image, return a boolean indicating whether a redraw is required. A side effect is that ``bounding_box`` and ``image`` attributes are updated accordingly, as is priming :py:func:`getdata`. :param image: An image to render :type image: PIL.Image.Image :returns: ``True`` or ``False`` """ self.bounding_box = ImageChops.difference(self.image, image).getbbox() if self.bounding_box is not None: self.image = image.copy() return True else: return False
def do_it(): nonlocal img im = Image.new('RGBA', img.size, color='#7289DA') img = img.convert('RGBA') if img.format == 'GIF': def multiply(frame): return ImageChops.multiply(frame, im) data = func_to_gif(img, multiply, get_raw=True) name = 'blurple.gif' else: img = ImageChops.multiply(img, im) data = self.save_image(img) name = 'blurple.png' return data, name
def image_combo(self, prob_fig, prob_array, ans_fig, ans_array): AB = self.centerImageArray( np.array(ImageChops.darker(prob_fig['A'], prob_fig['B']))) BC = self.centerImageArray( np.array(ImageChops.darker(prob_fig['B'], prob_fig['C']))) AC = self.centerImageArray( np.array(ImageChops.darker(prob_fig['B'], prob_fig['C']))) GH = [] HI = [] GI = [] i = 0 while i < len(ans_array): GH.append( self.centerImageArray( np.array(ImageChops.darker(prob_fig['G'], ans_fig[str(i)])))) HI.append( self.centerImageArray( np.array(ImageChops.darker(prob_fig['H'], ans_fig[str(i)])))) GI.append( self.centerImageArray( np.array(ImageChops.darker(prob_fig['G'], ans_fig[str(i)])))) i += 1 if AB == prob_array['C']: g = 0 while g < len(GH): if GH[g] == ans_array[str(g + 1)]: answer = g + 1 print(answer) g += 1 return answer elif BC == prob_array['A']: g = 0 while g < len(HI): if HI[g] == ans_array[str(g + 1)]: answer = g + 1 print(answer) g += 1 return answer elif AC == prob_array['B']: g = 0 while g < len(GI): if GI[g] == ans_array[str(g + 1)]: answer = g + 1 print(answer) g += 1 return answer
def run(self): f = self.sock.makefile('rwb') camera = PiCamera() camera.resolution = (1280,720) camera.start_preview() time.sleep(2) camera.stop_preview() camera.capture('foo.jpg') stream = io.BytesIO() count = 0 print('start sending image') try: for frame in camera.capture_continuous(stream,'jpeg'): #check whether image has changed prev_image = Image.open('foo.jpg') curr_image = Image.open(stream) diff = ImageChops.difference(prev_image,curr_image) if True:#diff.getbbox(): #if images are different, send size of file print("images are different") f.write(struct.pack('<L',stream.tell())) #get size in little endian unsigned long f.flush() stream.seek(0) #waiting for request from receiver request = f.read(struct.calcsize('<L')) if(request == struct.pack('<L',ack)): #get request f.write(stream.read()) #send simage data curr_image.save('foo.jpg') print('sending image ',count) stream.seek(0) stream.truncate() count+=1 else: #no request, buffer file print('no request received, buffer file') else: print("images are the same") except: print('Oops!',sys.exc_info()[0],'occured') f.close() self.sock.close()
def process(self, image): """ @param image -- The image to process. Returns a single image, or a list containing one or more images. """ BaseFilter.process(self, image) if self.mode != 'gray': raise RuntimeError( "NormalizeContrast only supports grayscale images.") if self.region == 'bbox': bbox = image.split()[1].getbbox() croppedImage = image.crop(bbox) croppedImage.load() alpha = croppedImage.split()[1] croppedImage = \ ImageOps.autocontrast(croppedImage.split()[0], cutoff=self.cutoff) croppedImage.putalpha(alpha) image.paste(croppedImage, image.bbox) elif self.region == 'mask': bbox = image.split()[1].getbbox() croppedImage = image.crop(bbox) croppedImage.load() alpha = croppedImage.split()[1] # Fill in the part of the cropped image outside the bounding box with a # uniform shade of gray grayImage = ImageChops.constant(croppedImage, 128) compositeImage = Image.composite(croppedImage, grayImage, alpha) # Equalize the composite image compositeImage = \ ImageOps.autocontrast(compositeImage.split()[0], cutoff=self.cutoff) # Paste the part of the equalized image within the mask back # into the cropped image croppedImage = Image.composite(compositeImage, croppedImage, alpha) croppedImage.putalpha(alpha) # Paste the cropped image back into the full image image.paste(croppedImage, bbox) elif self.region == 'all': alpha = image.split()[1] image = ImageOps.autocontrast(image.split()[0], cutoff=self.cutoff) image.putalpha(alpha) return image
def load_glyph_tiles_from(filename, cellw, cellh, fmt): """Load tiles from glyphs in filename. im -- PIL image object in indexed color or filename to load cellw -- width of each glyph box, multiple of 8 cellh -- height of glyph box, multiple of 8 fmt -- a pilbmp2nes planemap string, such as "0,1" for GB or "0;1" for NES The glyphs in `im` are left-aligned in their boxes, using color 0 for transparent and the highest color value for space between glyphs. Return a list of lists of tiles in column-major order. """ if isinstance(filename, str): im = Image.open(filename) else: im, filename = filename, '<image>' if im.mode != 'P': raise ValueError("%s: not indexed color" % filename) st = ImageStat.Stat(im) bordercolor = st.extrema[0][1] # Crop each glyph out of the image portions = (im.crop((x, y, x + cellw, y + cellh)) for y in range(0, im.size[1], cellh) for x in range(0, im.size[0], cellw)) # Crop out the internal border to right of each glyph # ImageChops.difference(im, flat).getbbox(): thanks Eugene Nagorny # http://stackoverflow.com/q/10615901/2738262 flatborder = Image.new(im.mode, (cellw, cellh), bordercolor) bboxes = ((portion, ImageChops.difference(portion, flatborder).getbbox()) for portion in portions) portionsC = (portion.crop(bb) if bb is not None else None for portion, bb in bboxes) # Now break each glyph down into tiles fmtTile = lambda im: formatTilePlanar(im, fmt) portionsT = [ pilbmp2chr(portion, 8, 32, fmtTile) if portion is not None else [] for portion in portionsC ] return portionsT
def smart_update(self, image): """ Display a frame, automatically deciding which refresh method to use. If `fast_frefresh` is enabled, it would use optimized LUTs that shorten the refresh cycle, and don't do the full "inverse,black,white,black again, then draw" flush cycle. The fast refresh mode is much faster, but causes the image to apper gray instead of black, and can cause burn-in if it's overused. It's recommended to do a full flush "soon" after using the fast mode, to avoid degrading the panel. You can tweak `partial_refresh_limit` or """ if self._last_frame is None or self._partial_refresh_count == self.partial_refresh_limit: # Doing a full refresh when: # - No frame has been displayed in this run, do a full refresh # - The display has been partially refreshed more than LIMIT times # the last full refresh (to prevent burn-in) self.display_frame(image) else: # Partial update. Let's start by figuring out the bounding box # of the changed area difference = ImageChops.difference(self._last_frame, image) bbox = difference.getbbox() if bbox is not None: # the old picture and new picture are different, partial # update is needed. # Get the update area. x and w have to be multiples of 8 # as per the spec, so round down for x, and round up for w x = _nearest_mult_of_8(bbox[0], False) y = bbox[1] w = _nearest_mult_of_8(bbox[2] - x) if w > self.width: w = self.width h = bbox[3] - y if h > self.height: h = self.height # now let's figure out if fast mode is an option. # If the area was all white before - fast mode will be used. # otherwise, a slow refresh will be used (to avoid ghosting). # Since the image only has one color, meaning each pixel is either # 0 or 255, the following convinent one liner can be used fast = 0 not in self._last_frame.crop( bbox).getdata() and self.fast_refresh self.display_partial_frame(image, x, y, h, w, fast)
def compareUnequal(image1, image2, numberOfRequiredUnequalPixels, percentageOfRGBDifferenceRequiredPerPixel): log.info( "Requiring {}% difference on RGBA value per pixel, and require {} really distinct pixels" .format(percentageOfRGBDifferenceRequiredPerPixel * 100, numberOfRequiredUnequalPixels)) if not _checkImageSizeEqual(image1, image2): return False nrEqualPixels = 0 nrTooSimilarPixels = 0 totalNumberOfPixels = image1.width * image1.height # PIL image comparison is well optimized -> early out if images are identical if image1 == image2: nrEqualPixels = totalNumberOfPixels nrTooSimilarPixels = totalNumberOfPixels else: imageDiff = ImageChops.difference(image1.convert("RGBA"), image2.convert("RGBA")) imageData = imageDiff.getdata() percentageOfRGBDifferenceRequiredPerPixelScaled = int( percentageOfRGBDifferenceRequiredPerPixel * 255) for i in range(0, image1.width * image1.height): chMax = max(imageData[i]) if chMax < percentageOfRGBDifferenceRequiredPerPixelScaled: nrTooSimilarPixels += 1 if chMax == 0: nrEqualPixels += 1 log.important_info( "Comparison stats: Percentage of too similar pixels: {}% ({})".format( float(nrTooSimilarPixels) / totalNumberOfPixels * 100, nrTooSimilarPixels)) log.important_info( "Comparison stats: Percentage of exactly equal pixels: {}% ({})". format( float(nrEqualPixels) / totalNumberOfPixels * 100, nrEqualPixels)) if totalNumberOfPixels - nrTooSimilarPixels < numberOfRequiredUnequalPixels: log.error("compareUnequal: Not enough unequal pixels, aborting...") return False return True
def testFoo(self): for image_number_1 in range(9): for image_number_2 in range(9): if image_number_1 != image_number_2: im1 = self.get_item("randoms", image_number_1) im2 = self.get_item("randoms", image_number_2) # Copy pasta from guide. No idea hos it works. h = ImageChops.difference(im1, im2).histogram() sq = (value * ((idx % 256)**2) for idx, value in enumerate(h)) sum_of_squares = sum(sq) rms = math.sqrt(sum_of_squares / float(im1.size[0] * im1.size[1])) print(rms)
def calculate_fitness_contrast_mean_difference(original_image, generated_image): if original_image.width != generated_image.width: print('FITNESS ERROR! Images are different.') # Increase contrast of images original_image = ImageEnhance.Contrast(original_image).enhance(1.5) generated_image = ImageEnhance.Contrast(generated_image).enhance(1.5) # Calculate difference between two images difference = ImageChops.difference(original_image, generated_image) # Calculate average brightness of differential image, this tells us how similar those images are stats = ImageStat.Stat(difference) average_brightness = (stats.rms[0] + stats.rms[1] + stats.rms[2]) / 3 # Return fitness value return (255 - average_brightness) / 255
def test_scroll_wrap_return_to_start(makeScroll): sw = makeScroll('High', (19, 8)) startImg = sw.render()[0] images = [] for i in range(19): img, res = sw.render() if res: images.append(img) flag = False for img in images: bbox = ImageChops.difference(img, startImg).getbbox() if not bbox: Flag = True break assert not flag, 'scroll didn\'t return to start'
def apply(self, image, **kwargs): super(XORTransformation, self)._validate(**kwargs) # Convert each image into a black and white bi-level representation # Use a custom threshold since Pillow dithers the image adding noise to it # Reference: https://stackoverflow.com/a/50090612 image = image.copy().point(lambda x: 255 if x > 200 else 0).convert('1') other = kwargs['other'].copy().point(lambda x: 255 if x > 200 else 0).convert('1') # Apply the logical XOR to find all pixels that are not present between the two frames, # convert it back to grayscale and apply a Gaussian blur to emphasize the differences # The blur is useful because some random pixels might be flagged as not being present # though these difference are not perceived by the human eye; but with the blur, only # the strongest differences are kept, e.g. a full white or black shape return ImageChops.logical_xor(image, other).convert('L').filter( ImageFilter.GaussianBlur(radius=10))
def compare(self, cam_ip): red = '\033[91m' green = '\033[92m' reset = '\033[0m' for i in range(1,120,2): if self.kill == True: return 0 time.sleep(2) imgno1, imgno2 = "%03d" % (i,), "%03d" % (i+1,) img1 = Image.open('image_{0}-{1}.jpg'.format(cam_ip.split('.')[-1], imgno1) ) img2 = Image.open('image_{0}-{1}.jpg'.format(cam_ip.split('.')[-1], imgno2) ) img = ImageChops.difference(img1,img2) #img.save('{0}_diff.png'.format(cam_ip.split('.')[-1])) image_ent = self.image_entropy(img) if image_ent < 0.8: colour = green else: colour = red motionTime = int(time.time()) diff = ImageChops.difference(img1, img2) print diff.getbbox() # Copy/Save diff images img.save('{0}_{1}.png'.format(cam_ip.split('.')[-1], motionTime )) shutil.copy2('image_{0}-{1}.jpg'.format(cam_ip.split('.')[-1], imgno1), '{0}_1_{1}.jpg'.format(cam_ip.split('.')[-1], motionTime )) shutil.copy2('image_{0}-{1}.jpg'.format(cam_ip.split('.')[-1], imgno2), '{0}_1_{1}.jpg'.format(cam_ip.split('.')[-1], motionTime )) print '{0}Motion detected - saving image to {1}_{2}.png{3}'.format(red, cam_ip.split('.')[-1], int(motionTime), reset) print "IP: {0}\t Entropy: {1}{2}{3}".format(cam_ip, colour, image_ent, reset) # Remove images os.unlink('image_{0}-{1}.jpg'.format(cam_ip.split('.')[-1], imgno1)) os.unlink('image_{0}-{1}.jpg'.format(cam_ip.split('.')[-1], imgno2))