def make(src_path, out_path): src = Image.open(src_path) src = src.copy() (srcr, srcg, srcb, srca) = src.split() white = ImageChops.constant(src, 255) outr = cast_gradation(srcr, 0, 90) outg = cast_gradation(srcg, 0, 90) outb = cast_gradation(srcb, 0, 90) outa = srca.copy() outr = ImageChops.composite(srcr, white, srca) outg = ImageChops.composite(srcg, white, srca) outb = ImageChops.composite(srcb, white, srca) (shadow_a, shadow) = make_inset_shadow(srca) outr = ImageChops.subtract(outr, shadow, 1, 0) outg = ImageChops.subtract(outg, shadow, 1, 0) outb = ImageChops.subtract(outb, shadow, 1, 0) outa = ImageChops.lighter(outa, shadow_a) (highlight_a, highlight) = make_highlight(srca) outa = ImageChops.lighter(outa, highlight) outa = ImageChops.subtract(outa, ImageChops.constant(outa, 25), 1, 0) out = Image.merge('RGBA', (outr, outg, outb, outa)) out.save(out_path)
def generate_cloud_nm_channel(srcImg): # channels r, g, b, a = srcImg.split() # helper images gray = Image.new('L', srcImg.size, (127)) yellowRGB = Image.new('RGB', srcImg.size, (255, 255, 0)) # discard 'too yellow' values oneMinusYellowness = ImageChops.difference(Image.merge('RGB', (r, g, b)), yellowRGB) yR, yG, yB = oneMinusYellowness.split() oneMinusYellowness = ImageChops.lighter(yR, yG) yellowness = ImageChops.invert(oneMinusYellowness) yellowness = ImageChops.lighter(yellowness, gray) yellowness = ImageChops.subtract(yellowness, gray) yellowness = ImageChops.add(yellowness, yellowness) #yellowness.save("Y:/art/source/particles/textures/clouds/yellowness.png") halfRed = ImageChops.multiply(r, gray) # 50% red halfGreen = ImageChops.multiply(g, gray) # 50% green # compose dstImg = ImageChops.subtract(ImageChops.add(gray, halfRed), halfGreen) dstImg = ImageChops.composite(gray, dstImg, yellowness) return dstImg
def test_uri(self, name, uri): filename = os.path.join(self.directory, name) file1 = filename+'.runner1.png' file2 = filename+'.runner2.png' self.c1.doURI(uri, file1) self.c2.doURI(uri, file2) while ( file1 not in self.saved_release_images and file2 not in self.saved_nightly_images ) and ( uri not in self.framebusters) and (uri not in self.timeouts): sleep(1) if (uri in self.framebusters): print "FrameBusted "+uri return if (uri in self.timeouts): print "Timeout "+uri return rms = None counter = 0 while rms is None and counter < 6: try: rms, image1, image2, hist1, hist2 = diff_images(file1, file2) except: print 'Image is not ready, waiting 10 seconds.' sleep(10) counter += 1 if counter >= 6: print "Timeout exceeded waiting for image to be saved" return result = {"uri":uri, "release_image":file1, "nightly_image":file2, "difference":rms} if rms != 0: result["images_differ"] = True image1RGB = image1.convert('RGB') image2RGB = image2.convert('RGB') ImageChops.difference(image1RGB, image2RGB).save(filename+'.diff.difference.png') result["diff_difference_image"] = filename+'.diff.difference.png' ImageChops.multiply(image1, image2).save(filename+'.diff.multiply.png') result["diff_multiply_image"] = filename+'.diff.multiply.png' ImageChops.screen(image1, image2).save(filename+'.diff.screen.png') result["diff_screen_image"] = filename+'.diff.screen.png' ImageChops.add(image1, image2).save(filename+'.diff.add.png') result["diff_add_image"] = filename+'.diff.add.png' ImageChops.subtract(image1RGB, image2RGB).save(filename+'.diff.subtract.png') result["diff_subtract_image"] = filename+'.diff.subtract.png' ImageChops.lighter(image1, image2).save(filename+'.diff.lighter.png') result["diff_lighter_image"] = filename+'.diff.lighter.png' ImageChops.darker(image1, image2).save(filename+'.diff.darker.png') result["diff_darker_image"] = filename+'.diff.darker.png' else: result["images_differ"] = False return result
def cameraRun(): log("Will Now Wait Ten Seconds") time.sleep(DELAY_RUN) # Delay to take new pictures camera.capture('dump.jpg') time.sleep(3) camera.capture('orig.jpg') time.sleep(3) camera.capture('update.jpg') # capture new image whenever there is a change img1 = Image.open('orig.jpg') img2 = Image.open('update.jpg') log("Captured Images") img1 = img1.filter(ImageFilter.FIND_EDGES) img1.save('orig.jpg') img2 = img2.filter(ImageFilter.FIND_EDGES) img2.save('update.jpg') diff = ImageChops.subtract(img2, img1) diff.save('diff.jpg') diff = Image.open("diff.jpg").convert('1') black, white = diff.getcolors() finalThresholdValue = thresholdCalc(white[0]) # Calling thresholdCalc() log("finalThresholdValue : ", str(finalThresholdValue)) RDVtoPost = roomDeterminedValue(finalThresholdValue, white[0]) # Getting the value to post to the view log("RDVtoPost : ", str(RDVtoPost)) log("Number of Black Pixels: ", str(black[0])) log("Number of White Pixels: ", str(white[0])) myData = {'pixels': RDVtoPost, 'roomid':0} # The following try block makes sure the server is up and running to prevent program crashes try: rsp = requests.post('http://trambel.us/ustat/upload', data=myData, headers=HEADERS) # graph data log("Posted New Pixel Count To Server") except: log("Connection Failed: Check Server Status") pass
def generateData(root, browser, line): for i in range(case_number): img = Image.open(open_root + "images/origins/" + str(line) + '_' + str(i) + '.png') img.save(root + str(browser) + '_' + str(i) + '_0.png') #origin picture sub = ImageChops.difference(standard_pics[i],img) sub = sub.convert('RGB') sub.save(root + str(browser) + '_' + str(i) + '_1.png') #edge picture sub = ImageChops.subtract(standard_pics[i],img, 0.005) sub = sub.convert('RGB') sub.save(root + str(browser) + '_' + str(i) + '_2.png') #standard - img sub = ImageChops.subtract(img, standard_pics[i], 0.005) sub = sub.convert('RGB') sub.save(root + str(browser) + '_' + str(i) + '_3.png') #img - standard
def apply_mask(frame, bg, key_color, tolerance): # open files fg_img = frame.convert('YCbCr') bg_img = Image.open(bg).convert('RGB') img_size = 400, 300 # resize image to fit homer.gif pixel size if bg_img.size != img_size: bg_img = bg_img.resize((img_size), Image.ANTIALIAS) bg_img = bg_img.crop((0, 0)+img_size) [Y_key, Cb_key, Cr_key] = key_color [tol_a, tol_b]= tolerance (x,y) = fg_img.size fg_data = numpy.array(fg_img.getdata()) mask_vector = numpy.vectorize(color_close) alpha_mask = mask_vector(fg_data[:,1], fg_data[:,2], Cb_key, Cr_key, tol_a, tol_b) alpha_mask.shape = (y,x) img_mask = Image.fromarray(numpy.uint8(alpha_mask)) invert_mask = Image.fromarray(numpy.uint8(255-255*(alpha_mask/255))) # create images for color mask color_mask = Image.new('RGB', (x,y), (0,0,0)) all_green = Image.new('YCbCr', (x,y), key_color) color_mask.paste(all_green, invert_mask) fg_img = fg_img.convert('RGB') cleaned = ImageChops.subtract(fg_img, color_mask) bg_img.paste(cleaned, img_mask) return bg_img
def makeAlpha(whiteFile, blackFile): from PIL import Image, ImageChops W = Image.open(whiteFile) Wdata = W.getdata() B = Image.open(blackFile) Bdata = B.getdata() alpha = ImageChops.subtract(W, B) alpha = alpha.convert("L") alphaData = alpha.getdata() out = Image.new("RGBA", W.size) outData = list(out.getdata()) size = W.size count = 0 for y in range(size[1]): for x in range(size[0]): Bp = B.getpixel((x,y)) a = alpha.getpixel((x,y)) r = Bp[0]/255. g = Bp[1]/255. b = Bp[2]/255. a = a / 255. a = 1 - a if a > 0.0: r = r / a g = g / a b = b / a r = int(r*256) g = int(g*256) b = int(b*256) a = int(a*256) outData[count] = (r, g, b, a) count += 1 out.putdata(outData) return out
def CmpTrackerSubtr(fileName1 = "get_plot1.png", fileName2 = "get_plot2.png", result = "difference.png", offset = 0, debug = False): """ Compares two images """ if debug: print "open ", fileName1 im1 = Image.open(fileName1) if debug: print "open ", fileName2 im2 = Image.open(fileName2) if debug: print "sizes: ", im1.size, " " , im2.size print "info: " ,im1.info , " " , im2.info print "mode: " ,im1.mode , " ", im2.mode im1replaced = __rgbColorReplacer(im1, colormin=(0,0,0), colormax=(0,0,0), colornew=(255,255,255), allbands=1) im3 = ImageChops.subtract(im1replaced, im2, offset=offset) im3.save(result) __txt2img(result, "The more lighter plots, the higher the difference." ,FontSize=15, pos = (530, 65)) __txt2img(result, "More RED = First plot values higher, more BLUE = Second plot values higher.", FontSize=15, pos = (530, 85)) return result
def refresh(self, matrix): if self.terminated: raise KeyboardInterrupt self._receive_events() frame = get_frame() frame = frame.convert("L") frame = frame.resize((self.width, self.height), PIL.Image.BILINEAR) last_frame = self.last_orig_frame self.last_orig_frame = frame if last_frame: frame = ImageChops.subtract(last_frame, frame, 0.05) r, g, b = colorsys.hsv_to_rgb(self.hue, 1.0, 1.0) self.hue += self.hue_rotation if self.hue > 1.0: self.hue -= 1.0 if self.hue < 0.0: self.hue += 1.0 r = int(r * 256) g = int(g * 256) b = int(b * 256) frame = ImageOps.colorize(frame, (0, 0, 0), (r, g, b)).convert("RGB") image = numpy.asarray(frame) faded = (self.last_final_array * self.fade).astype(matrix_module.DTYPE) image_mask = numpy.any(image > self.brightness_threshold, axis=2, keepdims=True) movement = numpy.count_nonzero(image_mask) >= self.min_move_count now = time.time() if movement: self.last_move = now seconds_since_movement = now - self.last_move seconds_since_sleep = now - self.last_sleep saw_movement = seconds_since_movement < self.movement_timeout and seconds_since_sleep >= self.min_sleep if saw_movement: self.move_count += 1 else: self.move_count = 0 enough_movement = self.showing or self.move_count >= self.min_wake_move show_mirror = enough_movement and saw_movement if show_mirror and not self.screen_save: if not self.showing: pass # Wake up self.showing = True self.last_final_array = numpy.where(image_mask, image, faded) matrix.buf.buf = numpy.copy(self.last_final_array) controls = self._render_controls() if controls is not None: matrix.buf.buf |= numpy.asarray(controls) else: if self.showing: # Enter dream state. matrix.buf.buf = numpy.empty(shape=(self.height, self.width, 3), dtype=buffer.DTYPE) self.last_sleep = now self.showing = False self.rotator.refresh(matrix)
def make_inset_shadow(alpha): mc = alpha.copy() for i in xrange(6): mc = mc.filter(ImageFilter.SMOOTH_MORE) mc = ImageChops.subtract(alpha, mc) mcb = ImageEnhance.Brightness(mc).enhance(0.35) m1 = alpha.copy() for i in xrange(6): m1 = m1.filter(ImageFilter.SMOOTH_MORE) m1 = ImageChops.offset(m1, 0, OFFSET_S) m1 = ImageChops.subtract(alpha, m1) m1b = ImageEnhance.Brightness(m1).enhance(0.35) m = ImageChops.lighter(mc, m1) mb = ImageChops.lighter(mcb, m1b) return (m, mb)
def Cimg_images(): img1 = Image.open('/Users/enochbyers/Desktop/GONetwork/DarkFrames/darkframe14.png') i = 0 for filename in os.listdir('/Users/enochbyers/Desktop/GONetwork/RawImages'): i += 1 if filename.endswith(".png"): img2 = Image.open('/Users/enochbyers/Desktop/GONetwork/RawImages/%s' %filename) Cimg = ImageChops.subtract(img2, img1) Cimg.save("CorrectedImage%s.png" %i)
def subtractImage(self, im1, im2): imagediff = ImageChops.subtract(im1, im2) """Return the number of pixels in img that are not black. img must be a PIL.Image object in mode RGB. """ bbox = imagediff.getbbox() if not bbox: return 0 return sum(imagediff.crop(bbox).point(lambda x: 255 if x else 0).convert("L").point(bool).getdata())
def live_judge(name1, name2, object_list, save_flag, save_location=''): from PIL import Image,ImageChops im1 = Image.open(name1) im2 = Image.open(name2) im0 = ImageChops.subtract(im1, im2) im3 = two_convert(im0, 30,230) if save_flag: im3.save(save_location) return dead_or_live(object_list, im3)
def test_subtract_clip(self): # Arrange im1 = hopper() im2 = Image.open("Tests/images/imagedraw_chord_RGB.png") # Act new = ImageChops.subtract(im1, im2) # Assert self.assertEqual(new.getpixel((50, 50)), (0, 0, 127))
def do_subtract(self): """usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale> Pop the two top images, produce the scaled difference with offset. """ from PIL import ImageChops image1 = self.do_pop() image2 = self.do_pop() scale = float(self.do_pop()) offset = int(self.do_pop()) self.push(ImageChops.subtract(image1, image2, scale, offset))
def test_subtract_scale_offset(self): # Arrange im1 = Image.open("Tests/images/imagedraw_chord_RGB.png") im2 = Image.open("Tests/images/imagedraw_outline_chord_RGB.png") # Act new = ImageChops.subtract(im1, im2, scale=2.5, offset=100) # Assert self.assertEqual(new.getbbox(), (0, 0, 100, 100)) self.assertEqual(new.getpixel((50, 50)), (100, 202, 100))
def test_subtract(self): # Arrange im1 = Image.open("Tests/images/imagedraw_chord_RGB.png") im2 = Image.open("Tests/images/imagedraw_outline_chord_RGB.png") # Act new = ImageChops.subtract(im1, im2) # Assert self.assertEqual(new.getbbox(), (25, 50, 76, 76)) self.assertEqual(new.getpixel((50, 50)), GREEN) self.assertEqual(new.getpixel((50, 51)), BLACK)
def test_sanity(self): im = hopper("L") ImageChops.constant(im, 128) ImageChops.duplicate(im) ImageChops.invert(im) ImageChops.lighter(im, im) ImageChops.darker(im, im) ImageChops.difference(im, im) ImageChops.multiply(im, im) ImageChops.screen(im, im) ImageChops.add(im, im) ImageChops.add(im, im, 2.0) ImageChops.add(im, im, 2.0, 128) ImageChops.subtract(im, im) ImageChops.subtract(im, im, 2.0) ImageChops.subtract(im, im, 2.0, 128) ImageChops.add_modulo(im, im) ImageChops.subtract_modulo(im, im) ImageChops.blend(im, im, 0.5) ImageChops.composite(im, im, im) ImageChops.offset(im, 10) ImageChops.offset(im, 10, 20)
def postProcess(filename, dst, dt, tv, iso): ### edit exif / get exif data exif = GExiv2.Metadata(filename) av = exif['Exif.Photo.FNumber'] img = Image.open(filename) ### subtract dark try: dark = Image.open(os.path.join(DARKDIR, 'dark.png')) img = ImageChops.subtract(img, dark) del dark except Exception, e: print e
def event(self): ''' Determines if there is an event going on in the quad. Based on the color and euclidean distance of two images in the quad :return: True if there is an event, false if otherwise Also displays the test cases. :return The image with the grey square is the baseline from which everything is compared to. The grey was the most common color in a cropped version of the size of the square :return The image with the white square is the case where there is an event ''' while len(self.img_intensity) < 1: pass pxl_coor = (250, 365, 500, 470) img_grey_large = np.asarray(self.img[-1]) img_event = np.asarray(self.img[-1]) img = self.img[-1].crop(pxl_coor) baseline = np.asarray(img) baseline.setflags(write=1) img_grey_large.setflags(write=1) img_event.setflags(write=1) for i in range(len(baseline[1, :])): for j in range(len(baseline[:, i])): baseline[j, i] = [170, 170, 168] for i in range(249, 500): for j in range(365, 470): img_grey_large[j, i] = [170, 170, 168] img_event[j, i] = [255, 255, 255] img_grey = Image.fromarray(baseline, 'RGB') img_grey_large = Image.fromarray(img_grey_large, 'RGB') img_event = Image.fromarray(img_event, 'RGB') img_compare = ImageChops.subtract(img, img_grey) euclidean_dist = mth.sqrt(np.sum(np.array(img_compare.getdata())**2)) img_grey_large.show() img_event.show() if euclidean_dist > 8000: return True else: return False
def _load_img_as_tensor(file_name, r1, r2, r3, startx=0, starty=0, noiseLevel=0, size=512): output_size = 336 with Image.open(file_name) as img: img = T.functional.crop(img, startx, starty, size, size) #img = T.functional.resize(img,output_size) angle = r1 * 90 img = T.functional.rotate(img, angle) if r2 == 1: img = T.functional.hflip(img) if r3 == 1: img = T.functional.vflip(img) # remove noise (all intensity lower than 10) if noiseLevel > 0: img = ImageChops.subtract(img, ImageChops.constant(img, noiseLevel)) # Contrast stretching img = ImageOps.autocontrast(img, cutoff=1, ignore=None) # VERY VERY slow #img = ndimage.median_filter(img, 3) #m1 = np.percentile(img,1) #m99 = np.percentile(img,99) #print(f'GG: {m1} {m99}') #img = np.where(img<m1,m1,img) #img = np.where(img>m99,m99,img) # transform = T.Compose([T.ToTensor(), normalize]) # transform = T.Compose([T.RandomVerticalFlip(), T.RandomHorizontalFlip(), T.ToTensor(), normalize]) img = T.ToTensor()(img) return img
def GetSegmentsInfo_fast(filename): # Open the segmentation image containing the labels for all segments (one label per pixel) seg_img = Image.open(filename) seg_img = seg_img.convert('L') # Open the txt file to see how many segments are there in the segmentation txt_fn = filename[0:len(filename) - len('.png')] + '.txt' num_seg = sum(1 for line in open( txt_fn)) # the num. of segments is equal to that of lines seg_info_list = [SegInfo() for i in range(num_seg)] # Open the colored all segment mask image all_seg_mask = seg_img.convert('RGB') cp = sns.color_palette("hls", config.num_col) # color palette cpi = [(int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)) for x in cp] # Create a subtracter image (all 1) used to peel a segment subtracter_img = Image.new('L', size=seg_img.size, color=1) # Create a background mask: 1 for foreground (segments) and 0 for background (non-segments) background_mask = seg_img.point(lambda p: p > 0 and 255).convert( '1', dither=Image.NONE) # Loop over all segments for idx in range(num_seg): # Peel one segment by subtract the all-1 subtracter image seg_img = ImageChops.subtract(seg_img, subtracter_img) # Binarize the segmentation image with the current segment peeled bi_seg_img = seg_img.point(lambda p: p > 0 and 255) # Invert the peeled, binarized segmentation image bool_img = ImgOps.invert(bi_seg_img).convert('1', dither=Image.NONE) # Logical AND between the peeled segmentation and background mask gets the segment image (1 for the segment) bool_img = ImageChops.logical_and(bool_img, background_mask) # Obtain the bounding box of the segment region (region with non-zero pixels) bbox = bool_img.getbbox() if bbox: seg_info_list[idx].minx = bbox[0] seg_info_list[idx].miny = bbox[1] seg_info_list[idx].maxx = bbox[2] seg_info_list[idx].maxy = bbox[3] # Count the number of non-zero pixels in that region seg_info_list[idx].numpnt = sum( bool_img.crop(bbox).point(bool).getdata()) seg_info_list[idx].mask = bool_img # Update the background mask for the next loop (subtract the current segment) background_mask = bi_seg_img.convert('1') all_seg_mask.paste( Image.new('RGB', all_seg_mask.size, (cpi[idx % config.num_col])), bool_img) return seg_info_list, all_seg_mask
def main(): try: image = Image.open(sys.argv[1]) except IOError: print("Could not open the input \nUsage tick_jpg inputfile.") sys.exit() # have an open image right now. im = np.real(np.array(image.convert('L'))) b = jacobi_step(im, 10) inew = Image.fromarray(np.uint8(rescale(im,255.0))) inew.save('before.jpg') iafter = Image.fromarray(np.uint8(rescale(b,255.0))) iafter.save('after.jpg') ix = ImageChops.subtract(iafter, inew,0.1) ix.save('difference.jpg') inew = Image.fromarray(np.uint8(rescale(generate_psf(im),255.0))) inew.save('psf.jpg')
def combine_ffx_normals(fileMaskForward, fileMaskInverted): nm1 = Image.open(fileMaskForward.format("combined")) # forward nm2 = Image.open(fileMaskInverted.format("combined")) # inverted r, g, b, a = nm1.split() nm1 = Image.merge("RGB", (r, g, b)) r, g, b, a = nm2.split() nm2 = Image.merge("RGB", (r, g, b)) gray = Image.new("RGB", nm1.size, (128, 128, 128)) white = Image.new("RGB", nm1.size, (255, 255, 255)) h1 = ImageChops.multiply(nm1, gray) h2 = ImageChops.multiply(ImageChops.subtract(white, nm2), gray) r, g, b = ImageChops.add(h2, h1).split() out = Image.merge("RGBA", (r, g, b, a)) out.save("C:/Projects/ffx/images/ffx_nm_final.tga")
def create_bin_mask_Oxford_flowers_102(args): """ Create binary masks. :param args: :return: """ def get_id(pathx, basex): """ Get the id of a sample. :param pathx: :return: """ rpath = relpath(pathx, basex) basen = basename(rpath) id = splitext(basen)[0].split('_')[1] return id baseurl = args.baseurl imgs = find_files_pattern(join(baseurl, 'jpg'), '*.jpg') bin_fd = join(baseurl, 'segmim_bin') if not os.path.exists(bin_fd): os.makedirs(bin_fd) else: # End. print('Conversion to binary mask has already been done. [OK]') return 0 # Background color [ 0 0 254]. (blue) print('Start converting the provided masks into binary masks ....') for im in tqdm.tqdm(imgs, ncols=80, total=len(imgs)): id_im = get_id(im, baseurl) mask = join(baseurl, 'segmim', 'segmim_{}.jpg'.format(id_im)) assert isfile(mask), 'File {} does not exist. Inconsistent logic. .... [NOT OK]'.format(mask) msk_in = Image.open(mask, 'r').convert('RGB') arr_ = np.array(msk_in) arr_[:, :, 0] = 0 arr_[:, :, 1] = 0 arr_[:, :, 2] = 254 blue = Image.fromarray(arr_.astype(np.uint8), mode='RGB') dif = ImageChops.subtract(msk_in, blue) x_arr = np.array(dif) x_arr = np.mean(x_arr, axis=2) x_arr = (x_arr != 0).astype(np.uint8) img_bin = Image.fromarray(x_arr * 255, mode='L') img_bin.save(join(bin_fd, 'segmim_{}.jpg'.format(id_im)), 'JPEG')
def motion(self): ''' Determines whether or not motion took place between two images Waits until 25 images have been retreived to make sure the two images that are being compared are actually different. Webcam updates about once a minute so 25 images should be long enough :return: True if motion occurred, false if motion did not occur ''' while len(self.img) < 25: pass img1 = self.img[-25] img2 = self.img[-1] img3 = ImageChops.subtract(img1, img2) self.euclidean_dist = mth.sqrt(np.sum(np.array(img3.getdata())**2)) if self.euclidean_dist > 8000: return True else: return False
def imagecopy(self, outputimagedata, imagedata, option): pastex = 0 pastey = 0 if re.compile("north").search(option): pastey = 0 elif re.compile("south").search(option): pastey = self.ledoptions_rows - imagedata.height else: pastey = int((float(self.ledoptions_rows) / float(2)) - (float(imagedata.height) / float(2))) if re.compile("east").search(option): pastex = 0 elif re.compile("west").search(option): pastex = self.ledoptions_cols - imagedata.width else: pastex = int((float(self.ledoptions_cols) / float(2)) - (float(imagedata.width) / float(2))) if re.compile("alpha").search(option): outputimagedata.paste(imagedata, (pastex, pastey), mask=imagedata) elif re.compile("diff").search(option): outputimagedata = ImageChops.difference( outputimagedata.convert("RGB"), imagedata.convert("RGB")) elif re.compile("add").search(option): outputimagedata = ImageChops.add(outputimagedata.convert("RGB"), imagedata.convert("RGB")) elif re.compile("subtract").search(option): outputimagedata = ImageChops.subtract( outputimagedata.convert("RGB"), imagedata.convert("RGB")) elif re.compile("multiply").search(option): outputimagedata = ImageChops.multiply( outputimagedata.convert("RGB"), imagedata.convert("RGB")) elif re.compile("screen").search(option): outputimagedata = ImageChops.screen(outputimagedata.convert("RGB"), imagedata.convert("RGB")) elif re.compile("lighter").search(option): outputimagedata = ImageChops.lighter( outputimagedata.convert("RGB"), imagedata.convert("RGB")) elif re.compile("darker").search(option): outputimagedata = ImageChops.darker(outputimagedata.convert("RGB"), imagedata.convert("RGB")) else: outputimagedata.paste(imagedata, (pastex, pastey)) return (outputimagedata)
def refresh(self, img, draw): # type: (Widget, Image, ImageDraw) -> None buf = Image.new('1', self._size) buf_draw = ImageDraw.Draw(buf) sz = (max(0, self._size[0] - 1), max(0, self._size[1] - 1)) self._draw(buf, buf_draw) if self._draw_border: buf_draw.line([(0, 0), (sz[0], 0), sz, (0, sz[1]), (0, 0)], fill=1) if self._invert: buf2 = Image.new('1', self._size) buf2_draw = ImageDraw.Draw(buf2) buf2_draw.rectangle([(0, 0), sz], outline=255, fill=1) buf = ImageChops.subtract(buf2, buf) img.paste(buf, self._position) self._need_refresh = False
def GreenScreen(infile, inbg, outfile='/tmp/output.png', keyColor=None, tolerance=None): #open files inDataFG = Image.open(infile).convert('YCbCr') BG = Image.open(inbg).convert('RGB') #make sure values are set if keyColor == None: keyColor = inDataFG.getpixel((1, 1)) if tolerance == None: tolerance = [50, 130] [Y_key, Cb_key, Cr_key] = keyColor [tola, tolb] = tolerance (x, y) = inDataFG.size #get dimensions foreground = numpy.array(inDataFG.getdata()) #make array from image maskgen = numpy.vectorize(colorclose) #vectorize masking function alphaMask = maskgen(foreground[:, 1], foreground[:, 2], Cb_key, Cr_key, tola, tolb) #generate mask alphaMask.shape = (y, x) #make mask dimensions of original image imMask = Image.fromarray(numpy.uint8(alphaMask)) #convert array to image invertMask = Image.fromarray( numpy.uint8(255 - 255 * (alphaMask / 255))) #create inverted mask with extremes #create images for color mask colorMask = Image.new('RGB', (x, y), tuple([0, 0, 0])) allgreen = Image.new('YCbCr', (x, y), tuple(keyColor)) colorMask.paste( allgreen, invertMask) #make color mask green in green values on image inDataFG = inDataFG.convert( 'RGB') #convert input image to RGB for ease of working with cleaned = ImageChops.subtract(inDataFG, colorMask) #subtract greens from input BG.paste(cleaned, imMask) #paste masked foreground over background BG.show() #display cleaned image BG.save(outfile, "PNG") #save cleaned image
def get_game_screen_with_delta(self): curr_frame = self.vm.get_game_screen(return_im=True) if not self.last_frame: self.last_frame = curr_frame diff_frame = ImageChops.subtract(self.last_frame, curr_frame).convert("L") rgb_arr = np.array(curr_frame.getdata()).astype(np.float32) dlt_arr = np.array(diff_frame.getdata()).astype(np.float32).reshape( -1, 1) res_arr = np.concatenate((rgb_arr, dlt_arr), axis=1) res_arr *= (2.0 / 255.0) res_arr -= 1 # normalize to [-1,1] res_arr = res_arr.reshape( (1, 4, curr_frame.size[1], curr_frame.size[0])) #(bN,C,H,W) # add coordconv (they are created statically at the top of the file!) res_arr = np.concatenate((res_arr, coord_conv_layer), axis=1) self.last_frame = curr_frame return torch.tensor(res_arr)
def noise(image, minimal=0.5, maximal=1.0, palette=GRAYSCALE, mode=MULTIPLY): minimal, maximal = map(lambda v: int(v * 255), [minimal, maximal]) if palette == GRAYSCALE: noise_image = _create_gray_noise(image.size, (minimal, maximal)) if palette == RGB: if isinstance(minimal, (float, int)): minimal = (minimal, ) * 3 if isinstance(maximal, (float, int)): maximal = (maximal, ) * 3 noise_image = _create_rgb_noise(image.size, (minimal, maximal)) if mode == MULTIPLY: return ImageChops.multiply(image, noise_image) if mode == ADD: return ImageChops.add(image, noise_image) if mode == SUBTRACT: return ImageChops.subtract(image, noise_image) if mode == SCREEN: return ImageChops.screen(image, noise_image) if mode == DIFFERENCE: return ImageChops.difference(image, noise_image)
def load_img(path, target_mode=None, target_size=None, num_frames=1, frame_ind=0): from PIL import Image, ImageChops #print(path) img_orig = Image.open(path) imgs = [] width, height = img_orig.size for i in xrange(num_frames): imgs.append( img_orig.crop((i * width / num_frames, 0, (i + 1) * width / num_frames, height))) for i, img in enumerate(imgs): if target_mode: imgs[i] = img.convert(target_mode) if target_size: imgs[i] = img.resize((target_size[1], target_size[0])) imgs[frame_ind] = ImageChops.subtract(imgs[frame_ind], imgs[frame_ind + 1]) return [imgs[frame_ind], imgs[frame_ind + 1]]
def saturation(image, amount=50): """Adjust brightness from black to white - amount: -1(black) 0 (unchanged) 1(white) - repeat: how many times it should be repeated""" if amount == 0: return image amount /= 100.0 grey = image.convert("L").convert(image.mode) if amount < 0: #grayscale im = imtools.blend(image, grey, -amount) else: #overcolored = Image - (alpha * Grey) / (1 - alpha) alpha = 0.7 alpha_g = grey.point(lambda x: x * alpha) i_minus_alpha_g = ImageChops.subtract(image, alpha_g) overcolored = i_minus_alpha_g.point(lambda x: x / (1 - alpha)) im = imtools.blend(image, overcolored, amount) #fix image transparency mask if image.mode == 'RGBA': im.putalpha(imtools.get_alpha(image)) return im
def __modelFeature(self, usebase, model='vgg'): scaler = transforms.Resize((224, 224)) normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) to_tensorer = transforms.ToTensor() modal = None if model == 'vgg': modal = models.vgg16(pretrained=True) modal.classifier = nn.Sequential( *list(modal.classifier.children())[:-1]) elif model == 'resnet': modal = models.resnet152(pretrained=True) modal = nn.Sequential(*list(modal.children())[:-1]) modal.eval() for p in modal.parameters(): p.requires_grad = False base = Image.open(self.base).convert('RGB') feature_arr = [] for i in range(1, len(self.sequence)): img = Image.open(self.sequence[i]).convert('RGB') x, y, w, h = self.__extractFacial(self.sequence[i]) img = img.crop((x, y, x + w, y + h)) if x > 0 else img t_base = base.crop((x, y, x + w, y + h)) if x > 0 else base if usebase: img = ImageChops.subtract(img, t_base) t_img = Variable(normalizer(to_tensorer(scaler(img))).unsqueeze(0)) preds = modal(t_img) feature_arr.append(self.__normalize(preds.data.numpy().flatten())) if model == 'vgg': self.vgg = feature_arr elif model == 'resnet': self.resnet = feature_arr
def cameraRun(): log("Will Now Wait Ten Seconds") time.sleep(DELAY_RUN) # Delay to take new pictures camera.capture('dump.jpg') time.sleep(3) camera.capture('orig.jpg') time.sleep(3) camera.capture( 'update.jpg') # capture new image whenever there is a change img1 = Image.open('orig.jpg') img2 = Image.open('update.jpg') log("Captured Images") img1 = img1.filter(ImageFilter.FIND_EDGES) img1.save('orig.jpg') img2 = img2.filter(ImageFilter.FIND_EDGES) img2.save('update.jpg') diff = ImageChops.subtract(img2, img1) diff.save('diff.jpg') diff = Image.open("diff.jpg").convert('1') black, white = diff.getcolors() finalThresholdValue = thresholdCalc(white[0]) # Calling thresholdCalc() log("finalThresholdValue : ", str(finalThresholdValue)) RDVtoPost = roomDeterminedValue( finalThresholdValue, white[0]) # Getting the value to post to the view log("RDVtoPost : ", str(RDVtoPost)) log("Number of Black Pixels: ", str(black[0])) log("Number of White Pixels: ", str(white[0])) myData = {'pixels': RDVtoPost, 'roomid': 0} # The following try block makes sure the server is up and running to prevent program crashes try: rsp = requests.post('http://trambel.us/ustat/upload', data=myData, headers=HEADERS) # graph data log("Posted New Pixel Count To Server") except: log("Connection Failed: Check Server Status") pass
def get_object_rect_from_depth(depth, lightning_variation_threshold=24): background_depth_img = np.load('background_depth.npy') background_depth_img = cv2.applyColorMap(cv2.convertScaleAbs(background_depth_img, alpha=0.5), cv2.COLORMAP_TURBO) background_depth_img = cv2.cvtColor(background_depth_img, cv2.COLOR_BGR2RGB) background_depth_img = Image.fromarray(background_depth_img) # convert depth image to color coded image if depth.shape[0] == 1: depth = cv2.applyColorMap(cv2.convertScaleAbs(depth[0] * 430.0, alpha=0.5), cv2.COLORMAP_TURBO) else: depth = cv2.applyColorMap(cv2.convertScaleAbs(depth * 430.0, alpha=0.5), cv2.COLORMAP_TURBO) depth = cv2.cvtColor(depth, cv2.COLOR_BGR2RGB) depth_img = Image.fromarray(depth) image = ImageChops.subtract(depth_img, background_depth_img) mask1 = Image.eval(image, lambda a: 0 if a <=24 else 255) mask2 = mask1.convert('1') mask2 = np.array(mask2).astype('uint8') cnts = cv2.findContours(mask2.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) if len(cnts) < 1: return None area_array = [] for i, c in enumerate(cnts): area = cv2.contourArea(c) area_array.append(area) sorteddata = sorted(zip(area_array, cnts), key=lambda x: x[0], reverse=True) c = sorteddata[0][1] # M = cv2.moments(c) # center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) rect = cv2.minAreaRect(c) binary_mask = np.zeros((depth_img.size[1], depth_img.size[0], 1), np.uint8) cv2.drawContours(binary_mask, [c], -1, (255, 255, 255), -1) return binary_mask, rect
def process_frames(self): assert all(f.size == self.frames[0].size for f in self.frames), \ 'all animation frames must be the same size' w, h = self.frames[0].size first = self.frames[0].getdata() px_size = len(first) // (w * h) base = self.frames[0].convert('RGBA') min_x, min_y = w, h max_x, max_y = 0, 0 for f in self.frames[1:]: data = f.getdata() for i in range(w * h): if data[i] != first[i]: i //= px_size x = i % w y = i // w min_x = min(min_x, x) min_y = min(min_y, y) max_x = max(max_x, x + 1) max_y = max(max_y, y + 1) base.paste((0, 0, 0, 0), (x, y, x + 1, y + 1)) self.static_base = base self.anim_offset = (min_x, min_y) self.anim_size = (max_x - min_x, max_y - min_y) sheet = Image.new('RGBA', (len(self.frames) * (max_x - min_x), max_y - min_y)) for i, f in enumerate(self.frames): tmp = ImageChops.subtract(f, base) tmp = tmp.crop((min_x, min_y, max_x, max_y)) sheet.paste(tmp, ((max_x - min_x) * i, 0)) self.anim_sheet = sheet
def diff(root_folder, sub_path, window_lenth=1): org_path = root_folder + sub_path collude_path = root_folder + "collude/" + sub_path start_time = time.clock() print("Starting get diff for", sub_path, "at", start_time, "s") sub_folders = [f for f in os.listdir(org_path)] for f in sub_folders: file_list = [ sf for sf in listdir(org_path + f + "/") if isfile(join(org_path + f, sf)) ] for im_path in file_list: tail = f + "/" + im_path url = root_folder + "diff/" + sub_path + f + "/" im1 = Image.open(org_path + tail) im2 = Image.open(collude_path + tail) idiff = ImageChops.subtract(im1, im2) createFolder(url) idiff.save(url + im_path) end_time = time.clock() print("End at", end_time, ", completed in", end_time - start_time, "s")
def __getitem__(self, idx): target = self.target.iloc[idx].values.tolist() image = self._get_image(self.features.iloc[idx]) # If n_step = 0 just use the gray frame image otherwise take the n_step difference if self.n_steps != 0: image_diff = np.array(ImageChops.subtract(Image.fromarray(image), Image.fromarray(self.prev_frame))) self.prev_frame = image if self.augment: if random.choice([True, False]): image_diff = image_diff[:, ::-1] # im = Image.fromarray(image_diff) # im.save(f'flipped+{idx}.png') target[1] *= -1. # Add some noise to the steering. target[1] += np.random.normal(loc=0, scale=self.sigma) # Clip between -1, 1 target[1] = np.clip(target[1], -1.0, 1.0) # If DO_AUGMENTATION is true, jitter will be applied to images, else just a resize # will be applied. # ToTensor() converts into 0-1 range. image_diff = self.transform(Image.fromarray(image_diff)) if self.debug: save_image(image_diff, os.path.join(self.cwd, f'train-images/image+{idx}.png')) # Train on both throttle, steer or just steer target = target if self.throttle_include else np.array([target[1]]) target = torch.FloatTensor(target) return image_diff, target
def highlight_motion(self): ''' Creates an image that highlights the motion between 2 webcam images in red aits until 25 images have been retreived to make sure the two images that are being compared are actually different. Webcam updates about once a minute so 25 images should be long enough :return: The second picture, but with the different pixels highlighted in red ''' while len(self.img) < 25: pass img1 = self.img[-25] img2 = self.img[-1] img3 = ImageChops.subtract(img1, img2) img2_data = np.asarray(img2) img3_data = np.asarray(img3) img2_data.setflags(write=1) for i in range(len(img3_data[1, :])): for j in range(len(img3_data[:, i])): avg = np.mean(img3_data[j, i]) if avg > 35 and j > 250: img2_data[j, i] = [255, 0, 0] img_new = Image.fromarray(img2_data, 'RGB') img_new.show()
def image_compare(test_img: str, ref_img: str, tolerance: int = 2): try: out = Image.open(test_img) except Exception as ex: raise FileNotFoundError("Can't open {}".format(sanitise_filename(test_img))) try: ref = Image.open(ref_img) except Exception as ex: raise FileNotFoundError("Can't open {}".format(sanitise_filename(ref_img))) if out.mode != ref.mode or out.size != ref.size: return False # Generate the difference diff = ImageChops.difference(out, ref) # Subtract N from the difference, to allow for off-by-N errors that can be caused by rounding. # For example clearing to 0.5, 0.5, 0.5 has two valid representations: 127,127,127 and 128,128,128 # which are equally far from true 0.5. diff = ImageChops.subtract(diff, Image.new(diff.mode, (diff.width, diff.height), (tolerance, tolerance, tolerance, tolerance))) # If the diff fails, dump the difference to a file diff_file = get_tmp_path('diff.png') if os.path.exists(diff_file): os.remove(diff_file) if sum(ImageStat.Stat(diff).sum) > 0: # this does (img1 + img2) / scale, so scale=0.5 means we multiply the image by 2/0.5 = 4 diff = ImageChops.add(diff, diff, scale=0.5) diff.convert("RGB").save(diff_file) return False return True
def cropRadius(img, radius=0.0): img = img.copy() if radius > 1: radius = 1 elif radius < 0: radius = 0 width, height = img.size if radius != 0: scale = 8 # Antialiasing Drawing size_anti = width * scale, height * scale r = int(round(radius * min(width, height) * scale / 2)) old_mask = img.split()[-1].resize(size_anti) mask = Image.new('L', size_anti, 255) draw = ImageDraw.Draw(mask) draw.rectangle((0, r, size_anti[0], size_anti[1] - r), fill=0) draw.rectangle((r, 0, size_anti[0] - r, size_anti[1]), fill=0) draw.ellipse((0, 0, 2 * r, 2 * r), fill=0) draw.ellipse( (0 + size_anti[0] - 2 * r, 0, 2 * r + size_anti[0] - 2 * r, 2 * r), fill=0) draw.ellipse( (0, 0 + size_anti[1] - 2 * r, 2 * r, 2 * r + size_anti[1] - 2 * r), fill=0) draw.ellipse( (0 + size_anti[0] - 2 * r, 0 + size_anti[1] - 2 * r, 2 * r + size_anti[0] - 2 * r, 2 * r + size_anti[1] - 2 * r), fill=0) try: mask = ImageChops.subtract(old_mask, mask) except Exception as e: print(e) mask = mask.resize((width, height), Image.ANTIALIAS) img.putalpha(mask) return img else: return img
def main(): try: image = Image.open(sys.argv[1]) except IOError: print("Could not open the input \nUsage tick_jpg inputfile.") sys.exit() # have an open image right now. # old b&w code # im = np.real(np.array(image.convert('L'))) # b = jacobi_step(im, 10) # inew = Image.fromarray(np.uint8(rescale(im,255.0))) # inew.save('before.jpg') # iafter = Image.fromarray(np.uint8(rescale(b,255.0))) # iafter.save('after.jpg') # ix = ImageChops.subtract(iafter, inew,0.1) # ix.save('difference.jpg') # inew = Image.fromarray(np.uint8(rescale(generate_psf(im),255.0))) # inew.save('psf.jpg') # # imshow(im) r, g, b = image.split() rr = np.real(np.array(r)) gr = np.real(np.array(g)) br = np.real(np.array(b)) rp = jacobi_step(rr, 5) gp = jacobi_step(gr, 5) bp = jacobi_step(br, 5) rn = Image.fromarray(np.uint8(rescale(rp, 255.0))) gn = Image.fromarray(np.uint8(rescale(gp, 255.0))) bn = Image.fromarray(np.uint8(rescale(bp, 255.0))) inew = Image.merge("RGB", (rn, gn, bn)) inew.save('after.jpg') ix = ImageChops.subtract(inew, image, 0.1) ix.save('difference.jpg')
def draw_vector_mask(layer): from PIL import Image, ImageChops width = layer._psd.width height = layer._psd.height color = 255 * layer.vector_mask.initial_fill_rule mask = Image.new('L', (width, height), color) first = True for subpath in layer.vector_mask.paths: plane = _draw_subpath(subpath, width, height) if subpath.operation == 0: mask = ImageChops.difference(mask, plane) elif subpath.operation == 1: mask = ImageChops.lighter(mask, plane) elif subpath.operation == 2: if first: mask = ImageChops.invert(mask) mask = ImageChops.subtract(mask, plane) elif subpath.operation == 3: if first: mask = ImageChops.invert(mask) mask = ImageChops.darker(mask, plane) first = False return mask.crop(layer.bbox)
def __getitem__(self, index): self.lastLabel = self.label_list[index] img0_tuple = self.ref_list[index] img1_tuple = self.sim_list[index] stable = Image.open( '/home/jasper/Documents/BP_Jasp/project/util/stable.png') img0 = Image.open(IMAGE_DIR + img0_tuple) img1 = Image.open(IMAGE_DIR + img1_tuple) diff = preProcess(img0, img1) if (GrayScale): diff = diff.convert("L") stable = stable.convert("L") img1 = ImageChops.subtract(img0, img1) if self.should_invert: stable = PIL.ImageOps.invert(stable) diff = PIL.ImageOps.invert(diff) if self.transform is not None: stable = self.transform(stable) diff = self.transform(diff) return stable, diff, torch.from_numpy( np.array([int(self.label_list[index])], dtype=np.float32))
def generateData(self, root, browser, line): for i in range(self.case_number): img = Image.open(self.open_root + str(line) + '_' + str(i) + '.png') img.save(root + str(browser) + '_' + str(i) + '_0.png') #origin picture edge = self.getEdge(img) if i == 3 or i == 4: sub = ImageChops.subtract(self.standard_pics[browser][i],img, 0.01) subt = ImageChops.subtract(img, self.standard_pics[browser][i], 0.01) ImageChops.add(sub, subt).convert('RGB').save(root + str(browser) + '_' + str(i) + '_1.png') #edge picture else: edge.save(root + str(browser) + '_' + str(i) + '_1.png') #edge picture sub = ImageChops.subtract(self.standard_pics[browser][i],img, 0.01) if i == 3 or i == 4: sub = ImageChops.subtract(sub, edge) sub = sub.convert('RGB') sub.save(root + str(browser) + '_' + str(i) + '_2.png') #standard - img sub = ImageChops.subtract(img, self.standard_pics[browser][i], 0.01) if i == 3 or i == 4: sub = ImageChops.subtract(sub, edge) sub = sub.convert('RGB') sub.save(root + str(browser) + '_' + str(i) + '_3.png') #img - standard
def threshold(img, thres): return ImageChops.subtract(img,Image.new(img.mode,img.size,BWtoRGB(thres)), scale = (255.-thres)/255)
def rasterize(path, pitch, origin, resolution=None, fill=True, width=None): """ Rasterize a Path2D object into a boolean image ("mode 1"). Parameters ------------ path: Path2D object pitch: float, length in model space of a pixel edge origin: (2,) float, origin position in model space resolution: (2,) int, resolution in pixel space fill: bool, if True will return closed regions as filled width: int, if not None will draw outline this wide (pixels) Returns ------------ raster: PIL.Image object, mode 1 """ # check inputs pitch = float(pitch) origin = np.asanyarray(origin, dtype=np.float64) # if resolution is None make it larget than path if resolution is None: span = np.vstack((path.bounds, origin)).ptp(axis=0) resolution = np.ceil(span / pitch) + 2 resolution = np.asanyarray(resolution, dtype=np.int64) resolution = tuple(resolution.tolist()) # convert all discrete paths to pixel space discrete = [((i - origin) / pitch).astype(np.int) for i in path.discrete] # draw the exteriors exteriors = Image.new(mode='1', size=resolution) edraw = ImageDraw.Draw(exteriors) # if a width is specified draw the outline if width is not None: width = int(width) for coords in discrete: edraw.line(coords.flatten().tolist(), fill=1, width=width) # if we are not filling the polygon exit if not fill: del edraw return exteriors # the path indexes that are exteriors # needed to know what to fill/empty but expensive roots = path.root # draw the interiors interiors = Image.new(mode='1', size=resolution) idraw = ImageDraw.Draw(interiors) for i, points in enumerate(discrete): # draw the polygon on either the exterior or # interior image buffer if i in roots: edraw.polygon(points.flatten().tolist(), fill=1) else: idraw.polygon(points.flatten().tolist(), fill=1) # clean up the draw objects # this is in their examples, I have no idea if # it is actually necessary del edraw del idraw # the final result is the exteriors minus the interiors raster = ImageChops.subtract(exteriors, interiors) return raster
source = 'cell.jpg' output = '' img = cv2.imread(source) blur = cv2.GaussianBlur(img,(25,25),0) #cv2.imwrite('blur.jpeg', blur) sourcePIL = Image.open(source).convert('RGB') blurPIL = Image.fromarray(blur).convert('RGB') # Subtract blur from source # Dependent on scale/offset scale = .2 offset = 10 diff = ImageChops.subtract(sourcePIL, blurPIL,scale,offset) diff.save("s"+str(scale)+"-o"+str(offset)+".jpeg") # Different blurs to try '''' median_blur = cv2.medianBlur(img,15) median_string = 'median_blur' cv2.putText(median_blur,median_string,(20,20),cv2.FONT_HERSHEY_COMPLEX_SMALL,1,(255,255,255)) cv2.imshow('Blur',median_blur) cv2.waitKey(3000) sub_median = cv2.absdiff(img, median_blur)
def getDifference(img1, img2): sub = ImageChops.subtract(img1,img2, 0.005) subt = ImageChops.subtract(img2, img1, 0.005) return ImageChops.add(sub, subt).convert('RGB') #edge picture
def build_map(slam_log_data, map_image_data): """ Parses the slam log to get the vacuum path and draws the path into the map. Returns the new map as a BytesIO. Thanks to CodeKing for the algorithm! https://github.com/dgiese/dustcloud/issues/22#issuecomment-367618008 """ map_image = Image.open(io.BytesIO(map_image_data)) map_image = map_image.convert('RGBA') # calculate center of the image center_x = map_image.size[0] / 2 center_y = map_image.size[0] / 2 # rotate image by -90° map_image = map_image.rotate(-90) red = (255, 0, 0, 255) grey = (125, 125, 125, 255) # background color transparent = (0, 0, 0, 0) # prepare for drawing draw = ImageDraw.Draw(map_image) # loop each line of slam log prev_pos = None for line in slam_log_data.split("\n"): # find positions if 'estimate' in line: d = line.split('estimate')[1].strip() # extract x & y y, x, z = map(float, d.split(' ')) # set x & y by center of the image # 20 is the factor to fit coordinates in in map x = center_x + (x * 20) y = center_y + (y * 20) pos = (x, y) if prev_pos: draw.line([prev_pos, pos], red) prev_pos = pos # draw current position def ellipsebb(x, y): return x-3, y-3, x+3, y+3 draw.ellipse(ellipsebb(x, y), red) # rotate image back by 90° map_image = map_image.rotate(90) # crop image bgcolor_image = Image.new('RGBA', map_image.size, grey) cropbox = ImageChops.subtract(map_image, bgcolor_image).getbbox() map_image = map_image.crop(cropbox) # and replace background with transparent pixels pixdata = map_image.load() for y in range(map_image.size[1]): for x in range(map_image.size[0]): if pixdata[x, y] == grey: pixdata[x, y] = transparent temp = io.BytesIO() map_image.save(temp, format="png") return temp
def createWaterfall(filename, colors, beamCount, shadeScale=1, zoom=1.0, annotate=True, xResolution=1, yResolution=1, rotate=False, gray=False, leftExtent=-100, rightExtent=100, distanceTravelled=0, navigation=[]): print("Processing file: ", filename) r = pyall.ALLReader(filename) totalrecords = r.getRecordCount() start_time = time.time() # time the process recCount = 0 waterfall = [] minDepth = 9999.0 maxDepth = -minDepth outputResolution = beamCount * zoom isoStretchFactor = (yResolution / xResolution) * zoom print("xRes %.2f yRes %.2f isoStretchFactor %.2f outputResolution %.2f" % (xResolution, yResolution, isoStretchFactor, outputResolution)) while r.moreData(): TypeOfDatagram, datagram = r.readDatagram() if (TypeOfDatagram == 0): continue if (TypeOfDatagram == 'X') or (TypeOfDatagram == 'D'): datagram.read() if datagram.NBeams == 0: continue # if datagram.SerialNumber == 275: for d in range(len(datagram.Depth)): datagram.Depth[ d] = datagram.Depth[d] + datagram.TransducerDepth # we need to remember the actual data extents so we can set the color palette mappings to the same limits. minDepth = min(minDepth, min(datagram.Depth)) maxDepth = max(maxDepth, max(datagram.Depth)) waterfall.insert(0, np.asarray(datagram.Depth)) # we need to stretch the data to make it isometric, so lets use numpy interp routing to do that for Us # datagram.AcrossTrackDistance.reverse() xp = np.array( datagram.AcrossTrackDistance ) #the x distance for the beams of a ping. we could possibly use the real values here instead todo # datagram.Depth.reverse() fp = np.array(datagram.Depth) #the depth list as a numpy array # fp = geodetic.medfilt(fp,31) x = np.linspace( leftExtent, rightExtent, outputResolution ) #the required samples needs to be about the same as the original number of samples, spread across the across track range # newDepths = np.interp(x, xp, fp, left=0.0, right=0.0) # run a median filter to remove crazy noise # newDepths = geodetic.medfilt(newDepths,7) # waterfall.insert(0, np.asarray(newDepths)) recCount += 1 if r.currentRecordDateTime().timestamp() % 30 == 0: percentageRead = (recCount / totalrecords) update_progress("Decoding .all file", percentageRead) update_progress("Decoding .all file", 1) r.close() # we have all data loaded, so now lets make a waterfall image... #--------------------------------------------------------------- print("Correcting for vessel speed...") # we now need to interpolate in the along track direction so we have apprximate isometry npGrid = np.array(waterfall) stretchedGrid = np.empty((0, int(len(npGrid) * isoStretchFactor))) for column in npGrid.T: y = np.linspace(0, len(column), len(column) * isoStretchFactor) #the required samples yp = np.arange(len(column)) w2 = np.interp(y, yp, column, left=0.0, right=0.0) # w2 = geodetic.medfilt(w2,7) stretchedGrid = np.append(stretchedGrid, [w2], axis=0) npGrid = stretchedGrid npGrid = np.ma.masked_values(npGrid, 0.0) if gray: print("Hillshading...") #Create hillshade a little brighter and invert so hills look like hills colorMap = None npGrid = npGrid.T * shadeScale * -1.0 hs = sr.calcHillshade(npGrid, 1, 45, 30) img = Image.fromarray(hs).convert('RGBA') else: print("Color mapping...") npGrid = npGrid.T # calculate color height map cmrgb = cm.colors.ListedColormap(colors, name='from_list', N=None) colorMap = cm.ScalarMappable(cmap=cmrgb) colorMap.set_clim(vmin=minDepth, vmax=maxDepth) colorArray = colorMap.to_rgba(npGrid, alpha=None, bytes=True) colorImage = Image.frombuffer( 'RGBA', (colorArray.shape[1], colorArray.shape[0]), colorArray, 'raw', 'RGBA', 0, 1) #Create hillshade a little darker as we are blending it. we do not need to invert as we are subtracting the shade from the color image npGrid = npGrid * shadeScale hs = sr.calcHillshade(npGrid, 1, 45, 5) img = Image.fromarray(hs).convert('RGBA') # now blend the two images img = ImageChops.subtract(colorImage, img).convert('RGB') if annotate: #rotate the image if the user requests this. It is a little better for viewing in a browser annotateWaterfall(img, navigation, isoStretchFactor) meanDepth = np.average(waterfall) waterfallPixelSize = (abs(rightExtent) + abs(rightExtent)) / img.width # print ("Mean Depth %.2f" % meanDepth) imgLegend = createLegend(filename, img.width, (abs(leftExtent) + abs(rightExtent)), distanceTravelled, waterfallPixelSize, minDepth, maxDepth, meanDepth, colorMap) img = spliceImages(img, imgLegend) if rotate: img = img.rotate(-90, expand=True) img.save(os.path.splitext(filename)[0] + '.png') print("Saved to: ", os.path.splitext(filename)[0] + '.png')
def main(): try: print('Inside of main') ip = TCP_IP.text # converting ip to text if ip == '': # making sure the ip isnt blank print('IP is blank') return else: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((ip, TCP_PORT)) s.listen(1) (conn, addr) = s.accept() #testing #put all in a while loop, always wait for a signal to take another photo and process again while (1): recv_data = 5 print('Infinite while') recv_data = ord(conn.recv(1)) print (recv_data) if (recv_data == 0): print ("Received Signal") camera.capture('dump.jpg') time.sleep(3) camera.capture('orig.jpg') time.sleep(3) camera.capture('update.jpg') #capture new image whenever there is a change img1 = Image.open('orig.jpg') img2 = Image.open('update.jpg') print ("Captured Images") toSend = img2.resize((400, 400), Image.ANTIALIAS) toSend.save('latest.png') img1 = img1.filter(ImageFilter.FIND_EDGES) img1.save('orig.jpg') img2 = img2.filter(ImageFilter.FIND_EDGES) img2.save('update.jpg') diff = ImageChops.subtract(img2, img1) diff.save('diff.jpg') diff = Image.open("diff.jpg").convert('1') black, white = diff.getcolors() print (black[0]) #number of black pixels) print (white[0]) #number of white pixels) status = "" if (white[0] < 3000): status = "relatively empty" elif (white[0] <5000): status = "kind of full" else: status = "full" print status files = {'file': open('latest.png','rb')} headers = {'Auth':'8spWsLd38ji08Tpc'} myData = {'status': status} rsp = requests.post('http://trambel.us/rooms/upload', files=files, data=myData, headers=headers) conn.send(chr(0)) print("sent data back") except KeyboardInterrupt: s.close() # Close socket connection
#val = int(abs((result[i]+8)) %256) val = int(math.ceil(result[i])) #print i,val image.putpixel((i,val),(255,255,255)) for j in range(val): image.putpixel((i,j),(255,255,255,128)) # The fun of the fractal is zooming in / repeating it is also a fractal, # Just reusing the created sqauare to fill a rectangle, # Could be more smarter by picking continuity or mirroring from an offset. idx = len(result)-1 for i in range(idx,h): val = int(math.ceil(result[idx])) idx=idx-1 #print i,val image.putpixel((i,val),(255,255,255)) for j in range(val): image.putpixel((i,j),(255,255,255)) # The compositing for a simple bg bg = Image.new('RGBA',(1920,1080)) bg.paste((255,120,0,255)) terrain = ImageChops.subtract(image,bg) terrain.save('/tmp/terrain.jpg') print 'Terrain saved as.. /tmp/terrain.jpg with resolution',image.size
# https://forum.omz-software.com/topic/4504/image-processing-using-objc from PIL import Image, ImageOps, ImageFilter, ImageChops import photos, clipboard, webbrowser imo = clipboard.get_image(idx=0) if not imo.mode == 'RGB': img = imo.convert('RGB') im1 = img.filter(ImageFilter.MaxFilter(size=9)) im2 = ImageChops.subtract(im1, img) im3 = ImageOps.invert(im2) im4 = im3.filter(ImageFilter.SHARPEN) im5 = ImageOps.autocontrast(im4, cutoff=1) clipboard.set_image(im5, jpeg_quality=1.0) webbrowser.open('workflow://')
if __name__ == '__main__': image = Image.open('../lenna.png') image.show('Original') # Array size is inverted image size shape = (image.size[1], image.size[0], 3) # Create an image using numpy # Generate an array of ones and multiply all values by 100 # This creates a grey-coloured image (all pixel values are (100, 100, 100)) second_image_array = np.ones(shape, dtype=np.uint8) * 100 second_image = Image.fromarray(second_image_array) # ImageChops.subtract clips the pixel intensity if it is below 0 # E.g. image 1 has a pixel with intensity 200 # image 2 has a pixel with intensity 100 # 100 - 200 = -100 # However images are represented by 8-bit unsigned integers with numbers # ranging [0-255], so the values are clipped at 0 # and 100 - 200 = 0, not -100 subtract_image = ImageChops.subtract(image, second_image) subtract_image.show('Subtract') # ImageChops.subtract_modulo on the other side does not clip the result # Using the example above it does the following: # (100 - 200) = x mod 256 or (100 - 200) % 256 # So in our case the new value will be: (100 - 200) % 256 = 156 subtract_modulo_image = ImageChops.subtract_modulo(image, second_image) subtract_modulo_image.show('Subtract modulo')
def celldim(im,thres,chan,thresw,chanw,thresn,chann,ima5x): # adjust raw image size to be multiple of four imc=im.crop([0,0,4*(im.size[0]/4),4*(im.size[1]/4)]) ima5xc=ima5x.crop([0,0,4*(ima5x.size[0]/4),4*(ima5x.size[1]/4)]) imr,img,imb=imc.split() # create thresholded image for strong expression detection if (chan[0:3]=='10x'): ima=ima5xc.point(lambda i: i-thres,"1") else: if chan=="red": ima=imr.point(lambda i: i-thres, "1") elif chan=="green": ima=img.point(lambda i: i-thres, "1") elif chan=="blue": ima=imb.point(lambda i: i-thres, "1") else: ima=imc.convert("L").point(lambda i: i-thres, "1") # create thresholded image for weak expression detection if (chanw[0:3]=='10x'): imaw=ima5xc.point(lambda i: i-240,"1") else: if chanw=="red": imaw=imr.point(lambda i: i-thresw, "1") elif chanw=="green": imaw=img.point(lambda i: i-thresw, "1") elif chanw=="blue": imaw=imb.point(lambda i: i-thresw, "1") else: imaw=imc.convert("L").point(lambda i: i-thresw, "1") # create thresholded image for non-expression detection if chann=="red": iman=imr.point(lambda i: i-thresn, "1") elif chann=="green": iman=img.point(lambda i: i-thresn, "1") elif chann=="blue": iman=imb.point(lambda i: i-thresn, "1") else: iman=imc.convert("L").point(lambda i: i-thresn, "1") del ima5xc ######################################################### # create masking array ggmask=ones((ima.size[1],ima.size[0])) # create mask image imamask=Image.new("1",ima.size,1) # prepare RGBA at 25% scale for geneatlas.org imcsmallr,imcsmallg,imcsmallb=imc.resize((imc.size[0]/4,imc.size[1]/4),Image.ANTIALIAS).split() imcsmalla=Image.new("L",imcsmallr.size,255) # create A-channel array for small rgba ggsmalla=ones((imcsmalla.size[1],imcsmalla.size[0]))+254 # create red,blue,green channels for big visual image ggbigr=zeros((im.size[1],im.size[0])) ggbigb=zeros((im.size[1],im.size[0])) ggbigg=zeros((im.size[1],im.size[0])) ######################################################## # set up array for dust detection #g=imaw.getdata() #gg=resize(g,(ima.size[1],ima.size[0])) #print "Step 0, Removing embedded dust particles" #dustg=img.getdata() #dustgg=resize(dustg,(img.size[1],img.size[0])) #dustb=imb.getdata() #dustbb=resize(dustb,(imb.size[1],imb.size[0])) #for x in range(3,ima.size[1]-3): # for y in range(3,ima.size[0]-3): # if (gg[x][y]==0): # if dustbb[x][y]<(dustgg[x][y]+9): # ggmask=maskspot(ggmask,x,y) #del dustg,dustgg,dustb,dustbb del imr,img,imb ###################################################### # mask out image and get new array #imamask.putdata(resize(ggmask,(1,ima.size[0]*ima.size[1]))[0]) #ima=ImageChops.lighter(ima,ImageChops.invert(imamask).point(lambda i: i-254)) g=ima.getdata() gg=resize(g,(ima.size[1],ima.size[0])) print "Step 1, Detecting Cells filled with Gene Expression" for x in range(3,ima.size[1]-3): for y in range(3,ima.size[0]-3): if (gg[x][y]==0): if (gg[x+1][y]==0): if (gg[x-1][y]==0): if (gg[x][y+1]==0): if (gg[x+1][y+1]==0): if (gg[x-1][y+1]==0): if (gg[x][y-1]==0)&(gg[x+1][y-1]==0)&(gg[x-1][y-1]==0): ggmask=maskspot(ggmask,x,y) ggsmalla[(x+2)/4][(y+2)/4]=253 ggbigr=markspot(ggbigr,x,y) ####################################################### # mask out image and get new array imamask.putdata(resize(ggmask,(1,ima.size[0]*ima.size[1]))[0]) ima=ImageChops.lighter(ima,ImageChops.invert(imamask).point(lambda i: i-254)) g=ima.getdata() gg=resize(g,(ima.size[1],ima.size[0])) print "Step 2, Detecting Cells partially filled with Gene Expression" for x in range(3,ima.size[1]-3): for y in range(3,ima.size[0]-3): if (gg[x][y]==0): if (gg[x+1][y]==0): if (gg[x][y+1]==0): if (gg[x+1][y+1]==0): ggmask=maskspot(ggmask,x,y) if ggsmalla[(x+2)/4][(y+2)/4]==255: ggsmalla[(x+2)/4][(y+2)/4]=252 ggbigb=markspot(ggbigb,x,y) ######################################################## # mask out image and get new array imamask.putdata(resize(ggmask,(1,ima.size[0]*ima.size[1]))[0]) imaw=ImageChops.lighter(imaw,ImageChops.invert(imamask).point(lambda i: i-254)) g=imaw.getdata() gg=resize(g,(ima.size[1],ima.size[0])) print "Step 3, Detecting Cells with scattered Gene Expression" for x in range(3,ima.size[1]-4): for y in range(3,ima.size[0]-4): if (gg[x][y]==0): if (gg[x+2][y+2]==0): ggmask=maskspot(ggmask,x+1,y+1) if ggsmalla[(x+2+1)/4][(y+2+1)/4]==255: ggsmalla[(x+2+1)/4][(y+2+1)/4]=251 ggbigg=markspot(ggbigg,x+1,y+1) gg=maskspot2(gg,x+1,y+1) elif (gg[x+2][y]==0)|(gg[x+2][y+1]==0): ggmask=maskspot(ggmask,x+1,y) if ggsmalla[(x+2+1)/4][(y+2)/4]==255: ggsmalla[(x+2+1)/4][(y+2)/4]=251 ggbigg=markspot(ggbigg,x+1,y) gg=maskspot2(gg,x+1,y) elif (gg[x][y+2]==0)|(gg[x+1][y+2]==0): ggmask=maskspot(ggmask,x,y+1) if ggsmalla[(x+2)/4][(y+2+1)/4]==255: ggsmalla[(x+2)/4][(y+2+1)/4]=251 ggbigg=markspot(ggbigg,x,y+1) gg=maskspot2(gg,x,y+1) else: ggmask=maskspot(ggmask,x,y) if ggsmalla[(x+2)/4][(y+2)/4]==255: ggsmalla[(x+2)/4][(y+2)/4]=251 ggbigg=markspot(ggbigg,x,y) gg=maskspot2(gg,x,y) ######################################################## # compute display picture imr,img,imb=im.split() imr.putdata(resize(ggbigr,(1,im.size[0]*im.size[1]))[0]) img.putdata(resize(ggbigg,(1,im.size[0]*im.size[1]))[0]) imb.putdata(resize(ggbigb,(1,im.size[0]*im.size[1]))[0]) del ggbigr,ggbigg,ggbigb immask=ImageChops.invert(ImageChops.lighter(imb,ImageChops.lighter(imr,img))) im2=ImageChops.darker(im,Image.merge("RGB",(immask,immask,immask))) imbnew=ImageChops.subtract(imb,imr) imgnew=ImageChops.subtract(ImageChops.subtract(img,imr),imbnew) imrnew=ImageChops.add(imgnew,imr) imview=ImageChops.lighter(im2,Image.merge("RGB",(imrnew,imgnew,imbnew))) del imrnew,imgnew,imbnew,im2,immask, imr, img, imb ######################################################## imamask.putdata(resize(ggmask,(1,ima.size[0]*ima.size[1]))[0]) iman=ImageChops.lighter(iman,ImageChops.invert(imamask).point(lambda i: i-254)) g=iman.getdata() gg=resize(g,(ima.size[1],ima.size[0])) print "Step 4, Detecting Cells without Gene Expression" for x in range(3,ima.size[1]-4): for y in range(3,ima.size[0]-4): if (gg[x][y]==0): if (gg[x+2][y+2]==0): ggmask=maskspot(ggmask,x+1,y+1) if ggsmalla[(x+2+1)/4][(y+2+1)/4]==255: ggsmalla[(x+2+1)/4][(y+2+1)/4]=250 gg=maskspot2(gg,x+1,y+1) elif (gg[x+2][y]==0)|(gg[x+2][y+1]==0): ggmask=maskspot(ggmask,x+1,y) if ggsmalla[(x+2+1)/4][(y+2)/4]==255: ggsmalla[(x+2+1)/4][(y+2)/4]=250 gg=maskspot2(gg,x+1,y) elif (gg[x][y+2]==0)|(gg[x+1][y+2]==0): ggmask=maskspot(ggmask,x,y+1) if ggsmalla[(x+2)/4][(y+2+1)/4]==255: ggsmalla[(x+2)/4][(y+2+1)/4]=250 gg=maskspot2(gg,x,y+1) else: ggmask=maskspot(ggmask,x,y) if ggsmalla[(x+2)/4][(y+2)/4]==255: ggsmalla[(x+2)/4][(y+2)/4]=250 gg=maskspot2(gg,x,y) ######################################################## # assemble RGBA imcsmalla.putdata(resize(ggsmalla,(1,imcsmalla.size[0]*imcsmalla.size[1]))[0]) imcrgba=Image.merge("RGBA",(imcsmallr,imcsmallg,imcsmallb,imcsmalla)) # insert header imcrgba.putpixel((0,0),(0,0,0,1)) return imview,imcrgba