def test_1(self, num): """ randomly selected as many points as the parameter num to be bright pixels and tests whether they can be identified or not """ test_list = [] for r in range(num): test_list.append((int(np.random.randint(0, 64, size=1) / 2) * 2, int(np.random.randint(0, 128, size=1) / 2) * 2)) RST = 24 # 128x64 display with hardware I2C: disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST) # Initialize library. disp.begin() # Clear display. disp.clear() disp.display() # Create blank image for drawing. # Make sure to create image with mode '1' for 1-bit color. self.width = disp.width self.height = disp.height image_1 = Image.new('1', (self.width, self.height)) # Get drawing object to draw on image. draw = ImageDraw.Draw(image_1) # Draw a black filled box to clear the image. draw.rectangle((0, 0, self.width, self.height), outline=0, fill=0) for point in test_list: draw.point([(point[1], point[0])], fill=255) disp.image(image_1) disp.display() # Now get the pixel data camera = picamera.PiCamera() camera.resolution = (2592, 1944) camera.start_preview() camera.led = False time.sleep(1) camera.capture('test.jpg') camera.stop_preview() image = 'test.jpg' self.crop(image, (1020, 620, 1800, 1050), 'test_crop.jpg') image = 'test_crop.jpg' img = Image.open(image) pixels = list(img.getdata()) pixels2 = [] for pixel in pixels: total = 0 for x in pixel: total += x total = int(total / 3) pixels2.append((total, total, total)) filtered_list = self.filter(150, pixels2) img = Image.new('RGB', Image.open('test_crop.jpg').size) img.putdata(filtered_list) img.save('test_filter.jpg') img = cv2.imread('test_filter.jpg') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gray = cv2.erode(gray, None, iterations=1) result_list = [] for i in range(64): for j in range(128): if not (gray[int(self.all_points_array[i][j][0])][int( self.all_points_array[i][j][1])] == 0): result_list.append((i, j)) print('test_value', test_list, 'result_value', result_list) camera.close()
def single_avi(args): if args.timestamp: print(os.path.join(script_dir, 'Helvetica.ttf')) font = ImageFont.truetype(args.font, args.ts) for i, fn in enumerate(args.cines): fn = fn.strip() frame_slice = slice(None) if '[' in fn: if fn[-1] == ']': fn, s = fn.split('[') try: frame_slice = slice(*map(noneint, s[:-1].split(':'))) except: raise ValueError( "Couldn't convert '[%s' to slice notation" % s) else: print "Warning, found '[' in input, but it didn't end with ']', so I'll assume you didn't mean to give a frame range." base, ext = os.path.splitext(fn) ext = ext.lower() if not os.path.exists(fn): print "File %s not found, ignoring." % fn continue output = args.output if '%s' in args.output: output = output % base elif '%' in args.output: output = output % i bpp = None if ext in ('.cin', '.cine'): inp = cine.Cine(fn) bpp = inp.real_bpp if bpp < 8 or bpp > 16: bpp = None #Just in case td = args.td if args.td else int(ceil(log10(inp.frame_rate))) # frame_text = lambda i: 't: %%.%df s' % td % (i/float(input.frame_rate)) # frame_text = lambda i: 't: %%.%d s' % (inp.get_time(i)) t0 = 0. frame_text = lambda i: 't: %%.%df s, ' % td % (inp.get_time( i) - t0) + 'Dt: %f ms' % (round( ((inp.get_time(i) - inp.get_time(i - 1)) * 1000) * 10) / 10 ) # 't: %f s \n Dt : ms' % elif ext in ('.tif', '.tiff'): inp = tiff.Tiff(fn) frame_text = lambda i: str(i) bpps = inp[0].dtype.itemsize * 8 if bpp is None: bpp = bpps frames = range(*frame_slice.indices(len(inp))) if args.clip == 0: map = linspace(0., 2.**(bpps - bpp), 2**bpps) else: counts = 0 bins = arange(2**bpps + 1) for i in frames[::args.hist_skip]: c, b = histogram(inp[i], bins) counts += c counts = counts.astype('d') / counts.sum() counts = counts.cumsum() bottom_clip = where(counts > args.clip)[0] if not len(bottom_clip): bottom_clip = 0 else: bottom_clip = bottom_clip[0] top_clip = where(counts < (1 - args.clip))[0] if not len(top_clip): top_clip = 2**bpps else: top_clip = top_clip[-1] #print bottom_clip, top_clip #import pylab #pylab.plot(counts) #pylab.show() #sys.exit() m = 1. / (top_clip - bottom_clip) map = clip(-m * bottom_clip + m * arange(2**bpps, dtype='f'), 0, 1) map = map**(1. / args.gamma) map = clip(map * 255, 0, 255).astype('u1') #print '%s -> %s' % (fn, output) ofn = output output = mjpeg.Avi(output, framerate=args.framerate, quality=args.quality) if args.rect is not None: rect = [int(i) for i in args.rect[1:-1].split(':')] print(rect) #print frames for i in StatusPrinter(frames, os.path.basename(ofn)): frame = inp[i] if args.rotate: frame = rot90(frame, (args.rotate % 360) // 90) frame = map[frame] # if args.rect == None: frame = asarray(frame) else: frame = asarray(frame[rect[0]:rect[1], rect[2]:rect[3]]) if args.timestamp: frame = Image.fromarray(frame) draw = ImageDraw.Draw(frame) draw.text((args.tx, args.ty), frame_text(i), font=font, fill=args.tb) frame = asarray(frame) # print(type(frame)) output.add_frame(frame) output.close()
def clear(self, color=255): self.image = Image.new('1', (self.width, self.height), color) self.draw = ImageDraw.Draw(self.image) self.lastY = 0
def addLine(self, acolor, x1, y1, x2, y2): d = ImageDraw.Draw(self.image) d.line([(x1, y1), (x2, y2)], fill=acolor.getRGB())
def print_bitmap(self, pixels, w, h, output_png=False): """ Best to use images that have a pixel width of 384 as this corresponds to the printer row width. pixels = a pixel array. RGBA, RGB, or one channel plain list of values (ranging from 0-255). w = width of image h = height of image if "output_png" is set, prints an "print_bitmap_output.png" in the same folder using the same thresholds as the actual printing commands. Useful for seeing if there are problems with the original image (this requires PIL). Example code with PIL: import Image, ImageDraw i = Image.open("lammas_grayscale-bw.png") data = list(i.getdata()) w, h = i.size p.print_bitmap(data, w, h) """ counter = 0 if output_png: import Image, ImageDraw test_img = Image.new('RGB', (384, h)) draw = ImageDraw.Draw(test_img) self.linefeed() black_and_white_pixels = self.convert_pixel_array_to_binary( pixels, w, h) print_bytes = [] # read the bytes into an array for rowStart in xrange(0, h, 256): chunkHeight = 255 if (h - rowStart) > 255 else h - rowStart print_bytes += (18, 42, chunkHeight, 48) for i in xrange(0, 48 * chunkHeight, 1): # read one byte in byt = 0 for xx in xrange(8): pixel_value = black_and_white_pixels[counter] counter += 1 # check if this is black if pixel_value == 0: byt += 1 << (7 - xx) if output_png: draw.point((counter % 384, round(counter / 384)), fill=(0, 0, 0)) # it's white else: if output_png: draw.point((counter % 384, round(counter / 384)), fill=(255, 255, 255)) print_bytes.append(byt) # output the array all at once to the printer # might be better to send while printing when dealing with # very large arrays... for b in print_bytes: self.printer.write(chr(b)) if output_png: test_print = open('print-output.png', 'wb') test_img.save(test_print, 'PNG') print "output saved to %s" % test_print.name test_print.close()
def randLine(self, num): draw = ImageDraw.Draw(self.image) for i in range(0, num): draw.line([self.randPoint(), self.randPoint()], self.randRGB()) del draw
def addOval(self, acolor, x, y, w, h): d = ImageDraw.Draw(self.image) d.ellipse([(x, y), (x + w, y + h)], outline=acolor.getRGB())
# PIL Image module (create or load images) is explained here: # http://effbot.org/imagingbook/image.htm # PIL ImageDraw module (draw shapes to images) explained here: # http://effbot.org/imagingbook/imagedraw.htm import Image import ImageDraw import time from rgbmatrix import Adafruit_RGBmatrix # Rows and chain length are both required parameters: matrix = Adafruit_RGBmatrix(32, 1) # Bitmap example w/graphics prims image = Image.new("1", (32, 32)) # Can be larger than matrix if wanted!! draw = ImageDraw.Draw(image) # Declare Draw instance before prims # Draw some shapes into image (no immediate effect on matrix)... draw.rectangle((0, 0, 31, 31), fill=0, outline=1) draw.line((0, 0, 31, 31), fill=1) draw.line((0, 31, 31, 0), fill=1) # Then scroll image across matrix... for n in range(-32, 33): # Start off top-left, move off bottom-right matrix.Clear() # IMPORTANT: *MUST* pass image ID, *NOT* image object! matrix.SetImage(image.im.id, n, n) time.sleep(0.05) # 8-bit paletted GIF scrolling example image = Image.open("cloud.gif") image.load() # Must do this before SetImage() calls matrix.Fill(0x6F85FF) # Fill screen to sky color
Field('tg_descr', length=17), Field('tg_type', length=3, default='sip'), Field('color', length=16, default='black'), Field('alarm', 'integer', default=90), Field('alarm_email', length=20), Field('alarm_call_number', length=32), ) h = height - 20 for g in range(1, 7): if os.path.isfile( '/home/www-data/web2py/applications/tgmonitor/static/group' + str(g) + '.png') == False: im = Image.new('RGB', (width, height), (255, 255, 255)) draw = ImageDraw.Draw(im) for i in range(11): draw.line((20, h / 10 * i, width - 20, h / 10 * i), fill='black') draw.text((4, h / 10 * i), str(abs(i - 10)) + '0%', fill='black') for i in range(24): draw.text((30 + i * 60 / 2, h + 5), str(i), fill='black') im.save('/home/www-data/web2py/applications/tgmonitor/static/group' + str(g) + '.png') #im = Image.open('/home/www-data/web2py/applications/tgmonitor/static/diagramm.png') #draw = ImageDraw.Draw(im) host = '10.200.66.70' port = '6000' tn = telnetlib.Telnet(host, port) tn.write('LGI:op="monitor",PWD ="dspoftk",SER="10.200.11.20---O&M System";')
def __init__(self, clust_data, labels=None, bsize=10, tree_space=200): self.space = tree_space colours = [ 'blue', 'green', 'red', 'cyan', 'magenta', 'brown', 'orange' ] self.colour_map = self._init_colours( colours, [x.cluster_id for x in clust_data.datapoints]) if labels is None: labels = [ clust_data.datapoints[x].sample_id for x in clust_data.reorder_indices ] try: self.font = ImageFont.load( 'courR08.pil' ) #Copyright (c) 1987 Adobe Systems, Inc., Portions Copyright 1988 Digital Equipment Corp. except IOError: self.font = None if len(clust_data.consensus_matrix) != len(labels): raise ValueError, "Number of columns and column label arrays have different lengths!" Hmap.__init__(self, clust_data.consensus_matrix, bsize=bsize) #Creates image in self.im if HMAP_ENABLED if self.im is not None: old_draw = ImageDraw.Draw(self.im) self.max_textsize = 0 for label in labels: self.max_textsize = max( self.max_textsize, old_draw.textsize(label, font=self.font)[0]) del old_draw #Keep GC from keeping the old image around if clust_data.tree is None: self.space = self.max_textsize + 5 #Prepare newsize = (self.im.size[1] + self.space, self.im.size[0] ) #To hold our rotated copy and some text im = Image.new('RGBA', newsize, 'white') #Trick to make vertical text when we're done, and add tree space im.paste(self.im.rotate(-90), (0, 0, self.im.size[1], self.im.size[0])) self.im = im self.draw = ImageDraw.Draw(self.im) #Actual work self._add_cluster_labels(labels) if clust_data.tree is not None: self._draw_dendogram(clust_data.tree) #Finish self.im = self.im.rotate(90)
def generate(self, height=None, width=None, text=None, label=None): """ initialize the height and width of the user text block """ if not height: HEIGHT = self.HEIGHT else: HEIGHT = height if not width: WIDTH = self.WIDTH else: WIDTH = width # calculate the square of the user block in pixels self.BLOCK_SQUARE = WIDTH * HEIGHT BOX_W = self.BOX_W BORDER = self.BORDER qr = qrcode.QRCode( version=1, error_correction=qrcode.constants.ERROR_CORRECT_H, box_size=BOX_W, border=BORDER, ) # check the passed text if not text: text = self.TEXT # add the text to qr code qr.add_data(text) qr.make(fit=True) img = qr.make_image() pil_img = img._img img_w, img_h = pil_img.size # calculate the square of the whole image in pixels self.IMAGE_SQUARE = (img_h - BORDER * BOX_W) * (img_w - BORDER * BOX_W) # PIL Draw initial. draw = ImageDraw.Draw(pil_img) # quantity of all cubes(8*8px) in one line cubes = img_w / BOX_W left = (cubes / 2 - WIDTH / 2) * BOX_W top = (cubes / 2 - HEIGHT / 2) * BOX_W right = (cubes / 2 + WIDTH / 2) * BOX_W bottom = (cubes / 2 + HEIGHT / 2) * BOX_W draw.rectangle([left, top, right, bottom], fill=255) draw.rectangle([left, top, right, bottom]) if label: left_text = left + len(label) / 2 * BOX_W # text left offset top_text = top + HEIGHT / 3 * BOX_W # text top offset f = ImageFont.load_default() # font settings draw.text((left_text, top_text), label, font=f) # draw text pil_img.show() self.check_text(text) self.decode(pil_img)
def read_input_stream(self, input, weights): """this method is there to read the input and led display data passed onto the neuron input - whether led is on or off weights - led display data """ camera = picamera.PiCamera() camera.resolution = (2592, 1944) led = LED(21) red_avg = [] image = 'input.jpg' RST = 24 disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST) # Initialize library. disp.begin() # Clear display. disp.clear() disp.display() self.width = disp.width self.height = disp.height read_weights = np.array(np.zeros(len(input) * 64 * 128)) read_weights.shape = (len(input), 64, 128) # done initialising for i in range(len(input)): if input[i] == 1: led.on() else: led.off() image_1 = Image.new('1', (self.width, self.height)) # Get drawing object to draw on image. draw = ImageDraw.Draw(image_1) # Draw a black filled box to clear the image. draw.rectangle((0, 0, self.width, self.height), outline=0, fill=0) for j in range(0, self.height): for k in range(0, self.width): if int(weights[i][j][k]) == 1: draw.point([(k, j)], fill=255) # x,y disp.image(image_1) disp.display() # drawing and display done for one neuron print('cheese') camera.capture(image) # Now get the pixel data image_obj = Image.open(image) img = image_obj.crop((1020, 620, 1800, 1050)) pixels = list(img.getdata()) blue = [] for point in pixels: blue.append((point[2], point[2], point[2])) # blue filter filtered_list = self.filter(120, blue) img = Image.new('RGB', Image.open(image).size) img.putdata(filtered_list) img.save('input_filter.jpg') img = cv2.imread('input_filter.jpg') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gray = cv2.erode(gray, None, iterations=1) for j in range(64): for k in range(128): if gray[int(self.all_points_array[j][k][0])][int( self.all_points_array[j][k][1])] == 0: read_weights[i][j][k] = 0 else: read_weights[i][j][k] = 1 errors = 0 for i in range(len(input)): for j in range(64): for k in range(128): if abs(read_weights[i][j][k] - weights[i][j][k]) > .1: errors += 1 print('num of errors' + str(errors)) camera.close()
def read_with_led(self, num): """ checks whether the display can be read or not when the led is on""" led = LED(21) camera = picamera.PiCamera() camera.resolution = (2592, 1944) camera.start_preview() led.on() test_list = [] for r in range(num): test_list.append((int(np.random.randint(0, 64, size=1) / 2) * 2, int(np.random.randint(0, 128, size=1) / 2) * 2)) # randomly lighting up a point RST = 24 # 128x64 display with hardware I2C: disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST) # Initialize library. disp.begin() # Clear display. disp.clear() disp.display() self.width = disp.width self.height = disp.height image_1 = Image.new('1', (self.width, self.height)) # Get drawing object to draw on image. draw = ImageDraw.Draw(image_1) # Draw a black filled box to clear the image. draw.rectangle((0, 0, self.width, self.height), outline=0, fill=0) for point in test_list: draw.point([(point[1], point[0])], fill=255) disp.image(image_1) disp.display() # Now get the pixel data image = 'led.jpg' camera.capture(image) camera.stop_preview() self.crop(image, (1020, 620, 1800, 1050), 'led_crop.jpg') image = 'led_crop.jpg' img = Image.open(image) pixels = list(img.getdata()) red = [] for point in pixels: red.append((point[0], point[0], point[0])) img = Image.new('RGB', Image.open('led_crop.jpg').size) img.putdata(red) img.save('led_crop_red.jpg') blue = [] for point in pixels: blue.append((point[2], point[2], point[2])) img = Image.new('RGB', Image.open('led_crop.jpg').size) img.putdata(blue) img.save('led_crop_blue.jpg') image = 'led_crop_blue.jpg' img = Image.open(image) pixels = list(img.getdata()) filtered_list = self.filter(120, pixels) img = Image.new('RGB', Image.open(image).size) img.putdata(filtered_list) img.save('led_blue_filter.jpg') img = cv2.imread('led_blue_filter.jpg') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gray = cv2.erode(gray, None, iterations=1) result_list = [] for i in range(64): for j in range(128): if not (gray[int(self.all_points_array[i][j][0])][int( self.all_points_array[i][j][1])] == 0): result_list.append((i, j)) print('test_value', test_list, 'result_value', result_list) camera.close()
def start(self): """ In order to initialise the self.all_points_array this method follows a general strategy of first having a grid in the center, then the last row and the last columns with alternating pixels lighting up and then finding the center points of those pixels and then interpolating to find the center of all pixels """ # Raspberry Pi pin configuration: RST = 24 # 128x64 display with hardware I2C: disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST) disp.begin() disp.clear() disp.display() self.width = disp.width self.height = disp.height image_1 = Image.new('1', (self.width, self.height)) # Get drawing object to draw on image. draw = ImageDraw.Draw(image_1) # Draw a black filled box to clear the image. draw.rectangle((0, 0, self.width, self.height), outline=0, fill=0) # drawing a grid that is separated by 4 units in the vertical and 2 units in the horizontal for i in range(0, self.height, 4): for j in range(0, self.width, 2): draw.point([(j, i)], fill=255) # x,y disp.image(image_1) disp.display() # made the display needed to find the points # Now get the pixel data camera = picamera.PiCamera() camera.resolution = (2592, 1944) camera.start_preview() camera.led = False time.sleep(2) camera.capture('full_multi_point_2.jpg') camera.stop_preview() # taking a picture of the display image = 'full_multi_point_2.jpg' self.crop(image, (1020, 620, 1800, 1050), 'full_multi_crop_2.jpg') image = 'full_multi_crop_2.jpg' img = Image.open(image) pixels = list(img.getdata()) pixels2 = [] # this part is kinda redundant now for pixel in pixels: total = 0 for x in pixel: total += x total = int(total / 3) pixels2.append((total, total, total)) filtered_list = self.filter(140, pixels2) # cropped and filtered that image img = Image.new('RGB', Image.open('full_multi_crop_2.jpg').size) img.putdata(filtered_list) img.save('full_multi_filter_2.jpg') image = cv2.imread('full_multi_filter_2.jpg') gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.erode(gray, None, iterations=1) new_image = Image.fromarray(gray) new_image.save('cv_proc_mult.png') labels = measure.label(gray, neighbors=8, background=0) mask = np.zeros(gray.shape, dtype="uint8") # loop over the unique components for label in np.unique(labels): # if this is the background label, ignore it if label == 0: continue labelMask = np.zeros(gray.shape, dtype="uint8") labelMask[labels == label] = 255 mask = cv2.add(mask, labelMask) pixel_point = [] cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if imutils.is_cv2() else cnts[1] cnts = contours.sort_contours(cnts)[0] # loop over the contours # pixel_point is going to contain the coords of all the bright pixels for (i, c) in enumerate(cnts): ((cX, cY), radius) = cv2.minEnclosingCircle(c) pixel_point.append((int(cY), int(cX))) # found the center points of the pixels # going to arrange all the bright pixels according to their vertical coords pixel_point_sort = sorted(pixel_point, key=lambda y: y[0]) new_pixel_point = [] for i in range(16): new_pixel_point.append( sorted(pixel_point_sort[64 * i:64 * (i + 1)], key=lambda z: z[1])) new_pixel_array = np.array([y for x in new_pixel_point for y in x]) new_pixel_array.shape = (16, 64, 2) # new_pixel_array contains all the bright pixels in an arranged way self.all_points_array = np.array(np.zeros(64 * 128 * 2)) self.all_points_array.shape = (64, 128, 2) for i in range(0, 16): for j in range(0, 64): self.all_points_array[60 - 4 * i][126 - 2 * j][0] = new_pixel_array[i][j][0] self.all_points_array[60 - 4 * i][126 - 2 * j][1] = new_pixel_array[i][j][1] # finding the coords of the bright pixels for i in range(0, 61, 4): for j in range(1, 127, 2): if j % 2 == 1: self.all_points_array[i][j][0] = int( (self.all_points_array[i][j - 1][0] + self.all_points_array[i][j + 1][0]) / 2) self.all_points_array[i][j][1] = int( (self.all_points_array[i][j - 1][1] + self.all_points_array[i][j + 1][1]) / 2) # interpolation for i in range(61): for j in range(127): if i % 4 == 1: self.all_points_array[i][j][0] = int( (3 * self.all_points_array[i - 1][j][0] + self.all_points_array[i + 3][j][0]) / 4) self.all_points_array[i][j][1] = int( (3 * self.all_points_array[i - 1][j][1] + self.all_points_array[i + 3][j][1]) / 4) elif i % 4 == 2: self.all_points_array[i][j][0] = int( (2 * self.all_points_array[i - 2][j][0] + 2 * self.all_points_array[i + 2][j][0]) / 4) self.all_points_array[i][j][1] = int( (2 * self.all_points_array[i - 2][j][1] + 2 * self.all_points_array[i + 2][j][1]) / 4) elif i % 4 == 3: self.all_points_array[i][j][0] = int( (self.all_points_array[i - 3][j][0] + 3 * self.all_points_array[i + 1][j][0]) / 4) self.all_points_array[i][j][1] = int( (self.all_points_array[i - 3][j][1] + 3 * self.all_points_array[i + 1][j][1]) / 4) # Draw a black filled box to clear the image. draw.rectangle((0, 0, self.width, self.height), outline=0, fill=0) # now drawing the last row i = 63 for j in range(0, self.width, 2): draw.point([(j, i)], fill=255) # x,y disp.image(image_1) disp.display() # Now get the pixel data camera.start_preview() camera.led = False time.sleep(2) camera.capture('last_row_disp.jpg') camera.stop_preview() image = 'last_row_disp.jpg' self.crop(image, (1020, 620, 1800, 1050), 'last_row_crop.jpg') image = 'last_row_crop.jpg' img = Image.open(image) pixels = list(img.getdata()) pixels2 = [] for pixel in pixels: total = 0 for x in pixel: total += x total = int(total / 3) pixels2.append((total, total, total)) filtered_list = self.filter(140, pixels2) img = Image.new('RGB', Image.open(image).size) img.putdata(filtered_list) img.save('last_row_filter.jpg') image = cv2.imread('last_row_filter.jpg') gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.erode(gray, None, iterations=1) labels = measure.label(gray, neighbors=8, background=0) mask = np.zeros(gray.shape, dtype="uint8") # loop over the unique components for label in np.unique(labels): # if this is the background label, ignore it if label == 0: continue labelMask = np.zeros(gray.shape, dtype="uint8") labelMask[labels == label] = 255 mask = cv2.add(mask, labelMask) pixel_point = [] cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if imutils.is_cv2() else cnts[1] cnts = contours.sort_contours(cnts)[0] # loop over the contours for (i, c) in enumerate(cnts): ((cX, cY), radius) = cv2.minEnclosingCircle(c) pixel_point.append((int(cY), int(cX))) # found the center points pixel_point_sort = sorted(pixel_point, key=lambda z: z[1]) # arranging and interpolation for j in range(64): self.all_points_array[63][126 - 2 * j][0] = pixel_point_sort[j][0] self.all_points_array[63][126 - 2 * j][1] = pixel_point_sort[j][1] self.all_points_array[62][ 126 - 2 * j][0] = (2 * pixel_point_sort[j][0] + self.all_points_array[60][126 - 2 * j][0]) / 3 self.all_points_array[62][ 126 - 2 * j][1] = (2 * pixel_point_sort[j][1] + self.all_points_array[60][126 - 2 * j][1]) / 3 self.all_points_array[61][ 126 - 2 * j][0] = (pixel_point_sort[j][0] + 2 * self.all_points_array[60][126 - 2 * j][0]) / 3 self.all_points_array[61][ 126 - 2 * j][1] = (pixel_point_sort[j][1] + 2 * self.all_points_array[60][126 - 2 * j][1]) / 3 draw.rectangle((0, 0, self.width, self.height), outline=0, fill=0) for i in [61, 62, 63]: for j in range(126): if j % 2 == 1: self.all_points_array[i][j][1] = ( self.all_points_array[i][j - 1][1] + self.all_points_array[i][j + 1][1]) / 2 self.all_points_array[i][j][0] = ( self.all_points_array[i][j - 1][0] + self.all_points_array[i][j + 1][0]) / 2 # now drawing the last column j = 127 for i in range(0, self.width, 3): draw.point([(j, i)], fill=255) # x,y disp.image(image_1) disp.display() # Now get the pixel data camera.start_preview() camera.led = False time.sleep(2) camera.capture('last_col_disp.jpg') camera.stop_preview() image = 'last_col_disp.jpg' self.crop(image, (1020, 620, 1800, 1050), 'last_col_crop.jpg') image = 'last_col_crop.jpg' img = Image.open(image) pixels = list(img.getdata()) pixels2 = [] for pixel in pixels: total = 0 for x in pixel: total += x total = int(total / 3) pixels2.append((total, total, total)) filtered_list = self.filter(140, pixels2) img = Image.new('RGB', Image.open(image).size) img.putdata(filtered_list) img.save('last_col_filter.jpg') image = cv2.imread('last_col_filter.jpg') gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.erode(gray, None, iterations=1) labels = measure.label(gray, neighbors=8, background=0) mask = np.zeros(gray.shape, dtype="uint8") # loop over the unique components for label in np.unique(labels): # if this is the background label, ignore it if label == 0: continue labelMask = np.zeros(gray.shape, dtype="uint8") labelMask[labels == label] = 255 mask = cv2.add(mask, labelMask) pixel_point = [] cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if imutils.is_cv2() else cnts[1] cnts = contours.sort_contours(cnts)[0] # loop over the contours for (i, c) in enumerate(cnts): ((cX, cY), radius) = cv2.minEnclosingCircle(c) pixel_point.append((int(cY), int(cX))) # found the center points and then arranging and interpolating pixel_point_sort = sorted(pixel_point, key=lambda z: z[0]) for j in range(64): if j % 3 == 0: self.all_points_array[j][127][0] = pixel_point_sort[int(21 - j / 3)][0] self.all_points_array[j][127][1] = pixel_point_sort[int(21 - j / 3)][1] elif j % 3 == 1: self.all_points_array[j][127][0] = ( 2 * pixel_point_sort[21 - int((j - 1) / 3)][0] + pixel_point_sort[21 - int((j + 2) / 3)][0]) / 3 self.all_points_array[j][127][1] = ( 2 * pixel_point_sort[21 - int((j - 1) / 3)][1] + pixel_point_sort[21 - int((j + 2) / 3)][1]) / 3 elif j % 3 == 2: self.all_points_array[j][127][0] = ( 2 * pixel_point_sort[21 - int((j + 1) / 3)][0] + pixel_point_sort[21 - int((j - 2) / 3)][0]) / 3 self.all_points_array[j][127][1] = ( 2 * pixel_point_sort[21 - int((j + 1) / 3)][1] + pixel_point_sort[21 - int((j - 2) / 3)][1]) / 3 img = Image.new('RGB', Image.open('last_col_filter.jpg').size) test_image_array = np.zeros(gray.shape) print(test_image_array.shape) for i in range(64): for j in range(128): test_image_array[int(self.all_points_array[i][j][0])][int( self.all_points_array[i][j][1])] = 1 test_image_data = [] for i in range(test_image_array.shape[0]): for j in range(test_image_array.shape[1]): if test_image_array[i][j] == 1: test_image_data.append((255, 255, 255)) else: test_image_data.append((0, 0, 0)) img.putdata(test_image_data) img.save('verify.jpg') # print(time.time() - start_time) camera.close()
def drawMatrix(matrix, matrix_value2color, left_label_ls=[], top_label_ls=[], right_label_ls=[], bottom_label_ls=[], with_grid=0, font=None): """ 2008-08-21 shift the left_label to the right to stick next to the matrix 2008-08-21 matrix_value2color could be a dictionary or function. if it's a dictionary, turn it into lambda function. use real font-rendition length as maximum label length 2007-10-23 matrix is either Numeric, numarray or numpy array. use PIL to draw a matrix 2007-11-02 add line to get default font """ sys.stderr.write("Drawing matrix ...") import Image, ImageDraw if not font: font = get_font() if type(matrix_value2color)==dict: matrix_value2color_func = lambda x: matrix_value2color[x] else: matrix_value2color_func = matrix_value2color char_dimension = font.getsize('W') #W is the the biggest(widest) char_width, char_height = char_dimension word_font_length = lambda word: font.getsize(word)[0] if left_label_ls: #not empty max_left_label_length = max(map(word_font_length, left_label_ls)) else: max_left_label_length = 0 if top_label_ls: max_top_label_length = max(map(word_font_length, top_label_ls)) else: max_top_label_length = 0 if right_label_ls: max_right_label_length = max(map(word_font_length, right_label_ls)) else: max_right_label_length = 0 if bottom_label_ls: max_bottom_label_length = max(map(word_font_length, bottom_label_ls)) else: max_bottom_label_length = 0 left_label_dimension = (max_left_label_length + char_width, char_height) top_label_dimension = (max_top_label_length + char_width, char_height) #need rotation right_label_dimension = (max_right_label_length + char_width, char_height) bottom_label_dimension = (max_bottom_label_length + char_width, char_height) #need rotation x_offset0 = char_width x_offset1 = left_label_dimension[0] x_offset2 = x_offset1 + matrix.shape[1]*char_height y_offset0 = 0 y_offset1 = top_label_dimension[0] y_offset2 = y_offset1 + matrix.shape[0]*char_height whole_dimension = (x_offset2+right_label_dimension[0], \ y_offset2+bottom_label_dimension[0]) im = Image.new('RGB',(whole_dimension[0],whole_dimension[1]),(255,255,255)) draw = ImageDraw.Draw(im) #left label if left_label_ls: for i in range(len(left_label_ls)): left_label = left_label_ls[i] _left_label_dimension = font.getsize(left_label) text_region = get_text_region(left_label, _left_label_dimension, rotate=0, font=font) #no rotate label_shift = max_left_label_length - _left_label_dimension[0] #shift the left_label to the right to stick next to the matrix box = (x_offset0+label_shift, y_offset1+i*left_label_dimension[1], x_offset1, y_offset1+(i+1)*left_label_dimension[1]) im.paste(text_region, box) #draw matrix and top_label_ls and bottom_label_ls for i in range(matrix.shape[1]): #x-axis x_offset_left = x_offset1+i*top_label_dimension[1] x_offset_right = x_offset1+(i+1)*top_label_dimension[1] #draw top_label_ls if top_label_ls: top_label = top_label_ls[i] text_region = get_text_region(top_label, top_label_dimension, rotate=1, font=font) box = (x_offset_left, y_offset0, x_offset_right, y_offset1) im.paste(text_region, box) for j in range(matrix.shape[0]): #y-axis draw.rectangle((x_offset_left, y_offset1+j*left_label_dimension[1], \ x_offset_right, y_offset1+(j+1)*left_label_dimension[1]), fill=matrix_value2color_func(matrix[j,i])) #draw bottom_label_ls if bottom_label_ls: bottom_label = bottom_label_ls[i] text_region = get_text_region(bottom_label, bottom_label_dimension, rotate=1, font=font) box = (x_offset_left, y_offset2, x_offset_right, whole_dimension[1]) im.paste(text_region, box) #right label if right_label_ls: for i in range(len(right_label_ls)): right_label = right_label_ls[i] text_region = get_text_region(right_label, right_label_dimension, rotate=0, font=font) box = (x_offset2, y_offset2, x_offset_right, whole_dimension[0]) im.paste(text_region, box) if with_grid: draw_grid(im, draw, [x_offset1, y_offset1, x_offset2, y_offset2], char_height, char_height) #im = im.rotate(270) sys.stderr.write("Done.\n") return im
# -*- coding: utf-8 -*- ''' 生成随机验证码 ''' import string strings = string.letters ascii_letters = string.ascii_letters import random import Image, ImageDraw, ImageFilter, ImageFont letter = '' myImg = Image.new('RGB', (100, 50), 'white') draw = ImageDraw.Draw(myImg) font = ImageFont.truetype("/usr/share/fonts/truetype/AbyssinicaSIL-R.ttf", 20) # for x in range(20): # for y in range(20): # draw.point((x*5,y*5), fill=(0,10,0)) # draw.line((x,y) + (myImg.size[1],myImg.size[0]), fill=20) for i in range(4): random_letter = random.choice(strings) print random_letter # letter += random_letter print draw.text((10 + i * 25, 18), str(random_letter), fill='red', font=font) myImg = myImg.filter(ImageFilter.MinFilter) myImg.save('hello.jpg', 'JPEG')
def drawText(self, pos, txt, fill): draw = ImageDraw.Draw(self.image) draw.text(pos, txt, font=self.font, fill=fill) del draw
def convert_data(self, filename): verbose = 'HEADER of ' + filename + ':\n' file_code = unpack('>i', ''.join( self.data[0:4]))[0] #4byte integer big endian if file_code != 9994: print 'sorry, no shape file - ' + filename sys.exit() file_length = unpack('>i', ''.join( self.data[24:28]))[0] * 2 #4byte integer big endian verbose += 'file length = ' + str(file_length) + ' Bytes\n' version = unpack('<i', ''.join( self.data[28:32]))[0] #4byte integer little endian verbose += 'version = ' + str(version) + '\n' shape_type = self.shape_type_def[unpack('<i', ''.join( self.data[32:36]))[0]] #4byte integer little endian verbose += 'shape type = ' + str(shape_type) + '\n' verbose += 'bounding box = ' for i in range(8): n = unpack('<d', ''.join(self.data[i * 8 + 36:i * 8 + 44]))[0] #8byte double little endian if i == 0: xmin = n elif i == 1: ymin = n elif i == 2: xmax = n elif i == 3: ymax = n verbose += str(xmin) + ', ' + str(ymin) + ', ' + str( xmax) + ', ' + str(ymax) + '\n' #print verbose if self.xdelta == -1.0 or self.ydelta == -1.0: self.xdelta = abs(xmin) + abs(xmax) self.ydelta = abs(ymin) + abs(ymax) self.pixel = self.scr_width / self.xdelta img_height = self.scr_width / (self.xdelta / self.ydelta) self.imagebuffer = Image.new( 'RGBA', (int(self.scr_width), int(img_height)), self.bgcolor) self.drawbuffer = ImageDraw.Draw(self.imagebuffer) elif self.imagebuffer == None: self.pixel = self.scr_width / self.xdelta img_height = self.scr_width / (self.xdelta / self.ydelta) self.imagebuffer = Image.new( 'RGBA', (int(self.scr_width), int(img_height)), self.bgcolor) self.drawbuffer = ImageDraw.Draw(self.imagebuffer) i = 100 #index of records = 100 while i < file_length: record_nr = unpack('>i', ''.join(self.data[i:i + 4]))[0] #big endian content_length = unpack('>i', ''.join( self.data[i + 4:i + 8]))[0] * 2 shape_type = unpack('<i', ''.join( self.data[i + 8:i + 12]))[0] #little endian if shape_type == 3: self.get_polyline(self.data[i + 8:i + 8 + content_length]) elif shape_type == 5: self.get_polygon(self.data[i + 8:i + 8 + content_length]) else: print '--- ' + self.shape_type_def[shape_type] + ' ---' i += content_length + 8
def addRect(self, acolor, x, y, w, h): d = ImageDraw.Draw(self.image) d.rectangle([(x, y), (x + w, y + h)], outline=acolor.getRGB())
allColors[0] += labColor[0] allColors[1] += labColor[1] allColors[2] += labColor[2] # allColors.append(labColor) # Average colors averageLab = (allColors[0] / numColors, allColors[1] / numColors, allColors[2] / numColors) averageRGB = lab2rgb(averageLab) averageColors[metric] = averageRGB print averageColors swatchsize = 20 numcolors = 17 pal = Image.new('RGB', (swatchsize * numcolors, swatchsize)) draw = ImageDraw.Draw(pal) posx = 0 for metric in averageColors: col = averageColors[metric] col = (int(col[0]), int(col[1]), int(col[2])) draw.rectangle([posx, 0, posx + swatchsize, swatchsize], fill=col) posx = posx + swatchsize del draw pal.save('averageOutput.png', "PNG") # When averageing colors, we may need to use Lab colorspace: http://en.wikipedia.org/wiki/Lab_color_space # As recommeded here: http://stackoverflow.com/questions/398224/how-to-mix-colors-naturally-with-c #
def addArc(self, acolor, x, y, w, h, start, angle): d = ImageDraw.Draw(self.image) d.arc([(x, y), (x + w, y + h)], start, start + angle, outline=acolor.getRGB())
frame_id += 1 # write input (angle) str = "{},{},{}\n".format(int(ts * 1000), frame_id, angle) keyfile.write(str) # write input (button: left, center, stop, speed) str = "{},{},{},{}\n".format(int(ts * 1000), frame_id, btn, cfg_throttle) keyfile_btn.write(str) if use_dnn and fpv_video: textColor = (255, 255, 255) bgColor = (0, 0, 0) newImage = Image.new('RGBA', (100, 20), bgColor) drawer = ImageDraw.Draw(newImage) drawer.text((0, 0), "Frame #{}".format(frame_id), fill=textColor) drawer.text((0, 10), "Angle:{}".format(car_angle), fill=textColor) newImage = cv2.cvtColor(np.array(newImage), cv2.COLOR_BGR2RGBA) frame = cm.overlay_image(frame, newImage, x_offset=0, y_offset=0) # write video stream vidfile.write(frame) if frame_id >= 1000: print("recorded 1000 frames") break print("%.3f %d %.3f %d %d(ms)" % (ts, frame_id, angle, btn, int((time.time() - ts) * 1000))) print("Finish..") turn_off()
def addText(self, acolor, x, y, string): global defaultFont d = ImageDraw.Draw(self.image) d.text((x, y), string, font=defaultFont, fill=acolor.getRGB())
cluster_b = {} for i in range(0, num): cluster_r[i], cluster_g[i], cluster_b[i] = km.cluster_centers_[ i].round().astype( int ) # Python 2.7x round() returns a float but 3.3x returns and int clust_r = [] clust_g = [] clust_b = [] for row in temp_imdf.iterrows(): cluster_num = row[1][5] clust_r.append(cluster_r[cluster_num]) clust_g.append(cluster_g[cluster_num]) clust_b.append(cluster_b[cluster_num]) temp_imdf['cluster_r'] = clust_r temp_imdf['cluster_g'] = clust_g temp_imdf['cluster_b'] = clust_b ## Generate posterize image subset = temp_imdf[['cluster_r', 'cluster_g', 'cluster_b']] rgb_tuples = [tuple(x) for x in subset.values] temp_image = Image.new("RGB", [imageW, imageH]) temp_image.putdata(rgb_tuples) draw = ImageDraw.Draw(temp_image) placeText(temp_image, str(num), font) temp_image.save(output_path + "cluster_out_" + str(num) + ".png") dt_loop_end = datetime.now() loop_duration = dt_loop_end - dt_loop_start print(str(num) + " cluster loop completed in: " + str(loop_duration))
data = band.ReadAsArray(xOffset, yOffset, ncols, nrows) geoMatrix[0] = minXgeom geoMatrix[3] = maxYgeom geoMatrix[1] = pixelWidth geoMatrix[5] = pixelHeight geoMatrix[2] = 0.0 geoMatrix[4] = 0.0 #transforming between pixel/line (P,L) raster space, and projection coordinates (Xp,Yp) space. pixel, line = world2Pixel(geoMatrix,pnts[0],pnts[1]) #pixel, line = world2Pixel(geo_t,pnts[0],pnts[1]) # Create a new image with the raster dimension #rasterPoly = Image.new("L", (cols, rows),1) rasterPoly = Image.new("L", (ncols, nrows),1) rasterize = ImageDraw.Draw(rasterPoly) listdata = [(pixel[i],line[i]) for i in xrange(len(pixel))] rasterize.polygon(listdata,0) mask = 1 - imageToArray(rasterPoly) ncell = np.sum(mask) dataInPoly=data*mask dime = dataInPoly.shape dataInPolyFreq = Counter( np.reshape(dataInPoly, dime[0]*dime[1]) ) for k in dataInPolyFreq.keys(): dataInPolyFreq[k] = 100.*dataInPolyFreq[k]/ncell #dataInPoly[np.where(dataInPoly == 0)] = -99999 #maxZ = np.max(dataInPoly[np.nonzero(dataInPoly)]) #minZ = np.min(dataInPoly[np.nonzero(dataInPoly)])
import Image, ImageDraw, math, colorsys # Constant variables dimensions = (800, 800) # scale image dimension down to dimension of Mandelbrot Set ~(3,3) in loop scale = 1.0 / (dimensions[0] / 3) # centers the image in the given dimensions #(otherwise only real-real quadrantwould be visible) center = (1.5, 1.5) # Use this for Julia set iterate_max = 100 colors_max = 50 # Create image and object d which can be used to draw in given image img = Image.new("RGB", dimensions) d = ImageDraw.Draw(img) # Calculate a tolerable palette. # HSV easier to sample from (different hues with same sat and value) # use colorsys to convert HSV to RGB # alternatively, download available palettes palette = [0] * colors_max for i in xrange(colors_max): f = 1 - abs((float(i) / colors_max - 1)**15) r, g, b = colorsys.hsv_to_rgb(.66 + f / 3, 1 - f / 2, f) palette[i] = (int(r * 255), int(g * 255), int(b * 255)) # Calculate the mandelbrot sequence for the point c with start value z def iterate_mandelbrot(c, z=0): for n in xrange(iterate_max + 1):
def multiple_avi(args): """ Generate avi from multiple cinefiles INPUT ----- args : Namespace object contain each of the arguments used to generate an avi. See the top of this file for details, or from a terminal cine2avi --help OUTPUT ----- None """ if args.timestamp: print(os.path.join(script_dir, 'Helvetica.ttf')) font = ImageFont.truetype(args.font, args.ts) files = args.cines #for i, fn in enumerate(files): fn = files[0].strip() frame_slice = slice(None) if '[' in fn: if fn[-1] == ']': fn, s = fn.split('[') try: frame_slice = slice(*map(noneint, s[:-1].split(':'))) except: raise ValueError("Couldn't convert '[%s' to slice notation" % s) else: print "Warning, found '[' in input, but it didn't end with ']', so I'll assume you didn't mean to give a frame range." base, ext = os.path.splitext(fn) ext = ext.lower() if not os.path.exists(fn): print "File %s not found, ignoring." % fn # continue output = args.output if '%s' in args.output: output = output % base elif '%' in args.output: output = output % i base, extout = os.path.splitext(output) output = output[:-6] + '_multiple' + extout print(output) bpp = None inp = [None for fn in files] if ext in ('.cin', '.cine'): inp_ref = cine.Cine(fn) for i, fn in enumerate(files): print(fn) fn = fn.strip() inp[i] = cine.Cine(fn) bpp = inp_ref.real_bpp if bpp < 8 or bpp > 16: bpp = None #Just in case td = args.td if args.td else int(ceil(log10(inp_ref.frame_rate))) t0 = 0. frame_text = lambda i: 't: %%.%df s, ' % td % (inp_ref.get_time( i) - t0) + 'Dt: %f ms' % (round( ((inp_ref.get_time(i) - inp_ref.get_time(i - 1)) * 1000) * 10) / 10) # 't: %f s \n Dt : ms' % elif ext in ('.tif', '.tiff'): inp = tiff.Tiff(fn) frame_text = lambda i: str(i) bpps = inp_ref[0].dtype.itemsize * 8 if bpp is None: bpp = bpps lengths = [len(i) for i in inp] print('Movie lengths :' + str(lengths)) Nmax = min(lengths) frames = range(*frame_slice.indices(Nmax)) if args.clip == 0: map = linspace(0., 2.**(bpps - bpp), 2**bpps) else: counts = 0 bins = arange(2**bpps + 1) for i in frames[::args.hist_skip]: c, b = histogram(inp_ref[i], bins) counts += c counts = counts.astype('d') / counts.sum() counts = counts.cumsum() bottom_clip = where(counts > args.clip)[0] if not len(bottom_clip): bottom_clip = 0 else: bottom_clip = bottom_clip[0] top_clip = where(counts < (1 - args.clip))[0] if not len(top_clip): top_clip = 2**bpps else: top_clip = top_clip[-1] #print bottom_clip, top_clip #import pylab #pylab.plot(counts) #pylab.show() #sys.exit() m = 1. / (top_clip - bottom_clip) map = clip(-m * bottom_clip + m * arange(2**bpps, dtype='f'), 0, 1) map = map**(1. / args.gamma) map = clip(map * 255, 0, 255).astype('u1') #print '%s -> %s' % (fn, output) ofn = output output = mjpeg.Avi(output, framerate=args.framerate, quality=args.quality) if args.rect is not None: rect = [int(i) for i in args.rect[1:-1].split(':')] print(rect) #print frames for i in StatusPrinter(frames, os.path.basename(ofn)): frame = [None for p in inp] for p, inpp in enumerate(inp): framep = inpp[i] if args.rotate: framep = rot90(framep, (args.rotate % 360) // 90) framep = map[framep] # print(type(frame)) if args.rect == None: frame[p] = asarray(framep) else: frame[p] = asarray(framep[rect[0]:rect[1], rect[2]:rect[3]]) fra = concatenate(tuple(frame), axis=0) if args.timestamp: fra = Image.fromarray(fra) draw = ImageDraw.Draw(fra) draw.text((args.tx, args.ty), frame_text(i), font=font, fill=args.tb) fra = asarray(fra) # print(type(frame)) # plt.imshow(fra) # plt.show() # input() output.add_frame(fra) output.close()
def drawContinousLegend(min_value, max_value, no_of_ticks, value2color, font=None, no_of_bands_per_char_height=5): """ 2008-09-30 fix a bug when min_value, max_value are all integers and band_value_step=0 if no_of_bands>(max_value-min_value). 2008-08-21 deal with the case that tick_index goes out of bound 2008-08-21 draw legend for continous values """ sys.stderr.write("Drawing continous legend ...") import Image, ImageDraw if type(value2color)==dict: value2color_func = lambda x: value2color[x] else: value2color_func = value2color if not font: font = get_font() char_dimension = font.getsize('W') #W is the the biggest(widest) char_width, char_height = char_dimension band_height = int(char_height/no_of_bands_per_char_height) no_of_bands = 2*(no_of_ticks-1)*no_of_bands_per_char_height #this is the number of bands to draw min_value = float(min_value) #2008-09-30 convert to float in case all integer cause band_value_step=0 if no_of_bands>(max_value-min_value). python gives integer output if numerator/denominator are all integer. max_value = float(max_value) band_value_step = (max_value-min_value)/no_of_bands band_value_ls = [] band_value = max_value while band_value >= min_value: band_value_ls.append(band_value) band_value -= band_value_step tick_step = (max_value-min_value)/(no_of_ticks-1) tick_value_ls = [] tick_value = max_value while tick_value>=min_value: tick_value_ls.append(tick_value) tick_value -= tick_step value_label_ls = [] tick_index = 0 max_label_len = 0 for i in range(len(band_value_ls)): band_value = band_value_ls[i] label = None if tick_index<len(tick_value_ls): #2008-08-21 tick_value = tick_value_ls[tick_index] if abs(band_value-tick_value)<band_value_step: #if the tick_value and band_value is close enough, bind them together label = '%.2f'%tick_value if len(label)>max_label_len: max_label_len = len(label) tick_index += 1 value_label_ls.append((band_value, label)) label_dimension = (char_width*max_label_len, char_height) x_offset0 = 0 x_offset1 = char_height #sample color starts here x_offset2 = x_offset1 + 2*char_height #label starts here x_offset3 = x_offset2 + label_dimension[0] #the margin to the right starts here y_offset0 = 0 y_offset1 = y_offset0 + char_height #sample color starts here y_offset2 = y_offset1 + len(value_label_ls)*band_height # len(label_ls)-1 gaps among char_height's whole_dimension = (x_offset3+char_height, \ y_offset2+char_height) im = Image.new('RGB',(whole_dimension[0],whole_dimension[1]),(255,255,255)) draw = ImageDraw.Draw(im) for i in range(len(value_label_ls)): band_value, label = value_label_ls[i] y_offset_upper = y_offset1 + i*band_height y_offset_lower = y_offset1 + (i+1)*band_height #draw a sample color for this label draw.rectangle((x_offset1, y_offset_upper, x_offset1+char_height, y_offset_lower), fill=value2color_func(band_value)) if label!=None: #draw a line here draw.line((x_offset1, y_offset_upper, x_offset1+char_height, y_offset_upper), fill='black') #draw the label text_region = get_text_region(label, label_dimension, rotate=0, font=font) #no rotate box = (x_offset2, y_offset_upper, x_offset3, y_offset_upper+label_dimension[1]) im.paste(text_region, box) sys.stderr.write("Done.\n") return im
#!/usr/bin/python import epd7in5 import Image import ImageDraw import ImageFont from netifaces import interfaces, ifaddresses, AF_INET, AF_INET6 EPD_WIDTH = 640 EPD_HEIGHT = 384 epd = epd7in5.EPD() epd.init() image = Image.new('1', (EPD_WIDTH, EPD_HEIGHT), 1) draw = ImageDraw.Draw(image) head_text = ImageFont.truetype( '/usr/share/fonts/truetype/freefont/FreeSansBold.ttf', 28) body_text = ImageFont.truetype( '/usr/share/fonts/truetype/freefont/FreeMonoBold.ttf', 24) text_y = 10 head_buffer = 10 line_height = 20 def print_line(text, style): global text_y y_pos = text_y text_y = text_y + line_height font = body_text if style == "heading":
def test(self): """ testing whether the data input has been taken properly or not by assign 1 or 0 to each pixel random and then checking whether the computer can read the display accurately or not """ # test_array contains the randomly selected led display data (rn its alternate pixels) test_array = np.array(np.zeros(64 * 128)) test_array.shape = (64, 128) for i in range(0, 64, 2): for j in range(0, 128, 2): test_array[i][j] = int(np.random.randint(0, 2, size=1)) RST = 24 # 128x64 display with hardware I2C: disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST) # Initialize library. disp.begin() # Clear display. disp.clear() disp.display() # Create blank image for drawing. # Make sure to create image with mode '1' for 1-bit color. self.width = disp.width self.height = disp.height image_1 = Image.new('1', (self.width, self.height)) # Get drawing object to draw on image. draw = ImageDraw.Draw(image_1) # Draw a black filled box to clear the image. draw.rectangle((0, 0, self.width, self.height), outline=0, fill=0) for i in range(0, self.height): for j in range(0, self.width): if int(test_array[i][j]) == 1: draw.point([(j, i)], fill=255) # x,y disp.image(image_1) disp.display() # Now get the pixel data camera = picamera.PiCamera() camera.resolution = (2592, 1944) camera.start_preview() camera.led = False time.sleep(2) camera.capture('test.jpg') camera.stop_preview() image = 'test.jpg' self.crop(image, (1020, 620, 1800, 1050), 'test_crop.jpg') image = 'test_crop.jpg' img = Image.open(image) pixels = list(img.getdata()) pixels2 = [] for pixel in pixels: pixels2.append((pixel[2], pixel[2], pixel[2])) # using only blue pixels for reading filtered_list = self.filter(120, pixels2) img = Image.new('RGB', Image.open('test_crop.jpg').size) img.putdata(filtered_list) img.save('test_filter.jpg') img = cv2.imread('test_filter.jpg') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gray = cv2.erode(gray, None, iterations=1) # result_array would contains the data of the pixels that are being read result_array = np.array(np.zeros(64 * 128)) result_array.shape = (64, 128) for i in range(64): for j in range(128): if gray[int(self.all_points_array[i][j][0])][int( self.all_points_array[i][j][1])] == 0: result_array[i][j] = 0 else: result_array[i][j] = 1 # checking whether result_array and test_array are in fact same or not errors_list = [] errors = 0 for i in range(0, 64, 4): for j in range(0, 128, 2): if (abs(result_array[i][j] - test_array[i][j])) > 0.1: errors += 1 errors_list.append((i, j)) print('errors', errors) print(len(errors_list)) camera.close()