def draw(self, image_draw: ImageDraw): image_draw.line((self._coordinates.x - self._diameter, self._coordinates.y - self._diameter, self._coordinates.x + self._diameter, self._coordinates.y + self._diameter), fill=self._color, width=self._linewidth) image_draw.line((self._coordinates.x + self._diameter, self._coordinates.y - self._diameter, self._coordinates.x - self._diameter, self._coordinates.y + self._diameter), fill=self._color, width=self._linewidth)
def fill(self,seedPoint,fillValue=1 ): "fills an area starting with seed" i = self.index(seedPoint); v = self.p[:]; x = Image.fromarray(v.T); ImageDraw.Draw(x); ImageDraw.floodfill(x,(i[0],i[1]),fillValue); v[:] = np.array(x).T;
def createMask(imageIn, threshold=10, fillHoles=True, backgroundColor=255, blurRadius=0.0, maskScale=1.0): """ Given an image, create a mask by locating the pixels that are not the backgroundColor (within a threshold). @param threshold How far away from the backgroundColor a pixel must be to be included in the mask @param fillHoles If true, the inside of the mask will be filled in. This is useful if the inside of objects might contain the background color @param backgroundColor the background color. @param blurRadius If set to some fraction > 0.0, then the edges of the mask will be blurred using a blur radius which is this fraction of the image size. @param maskScale If set to < 1.0, then the effective size of the object (the area where the mask includes the object) will be scaled down by this amount. This can be useful when the outside of the object contains some noise that you want to trim out and not include in the mask. @retval the mask as a PIL 'L' image, where 255 is areas that include the object, and 0 are areas that are background. If blurRadius is > 0, then it will also contain values between 0 and 255 which act as compositing values. """ image = imageIn.convert('L') bwImage = image.point(lambda x: (abs(x - backgroundColor) > threshold) * 255) if not fillHoles: mask = bwImage else: bwImage = ImageOps.expand(bwImage, 1, fill=0) maskColor = 128 ImageDraw.floodfill(bwImage, (0, 0), maskColor) mask = bwImage.point(lambda x: (x != maskColor) * 255) mask = ImageOps.crop(mask, 1) # Are we reducing the object size? if maskScale < 1.0: newSize = [int(x * maskScale) for x in mask.size] reducedMask = mask.resize(newSize, Image.ANTIALIAS) sizeDiff = numpy.array(mask.size) - numpy.array(newSize) pos = [int(x / 2) for x in sizeDiff] mask = ImageChops.constant(mask, 0) mask.paste(reducedMask, tuple(pos)) # Blur the mask if blurRadius > 0.0: radius = int(round(blurRadius * (mask.size[0] + mask.size[1]) / 2)) if radius > 1: mask = blur(mask, radius=radius, edgeColor=0) else: import pdb; pdb.set_trace() return mask
def fillImageFaster(self, begin, paint, current, image): buffer = QBuffer() buffer.open(QBuffer.ReadWrite) image.save(buffer, "PNG") pil_im = Image.open(io.BytesIO(buffer.data())) ImageDraw.floodfill(pil_im, begin, (paint.red(), paint.green(), paint.blue())) self.image().image = QtGui.QImage(pil_im.convert("RGB").tobytes("raw", "RGB"), pil_im.size[0], pil_im.size[1], QtGui.QImage.Format_RGB888) self.update()
def test_floodfill(): # Arrange im = Image.new("RGB", (w, h)) draw = ImageDraw.Draw(im) draw.rectangle(bbox2, outline="yellow", fill="green") centre_point = (int(w/2), int(h/2)) # Act ImageDraw.floodfill(im, centre_point, ImageColor.getrgb("red")) del draw # Assert assert_image_equal(im, Image.open("Tests/images/imagedraw_floodfill.png"))
def DrawBalls(self, differentialMethod, step): # First, track the border for all balls and store # it to pos0 and edgePos. The latter will move along the border, # pos0 stays at the initial coordinates. for ball in self.balls: ball.pos0 = self.trackTheBorder(ball.pos + 1j) if ball.pos0 == None: ball.tracking = False else: ball.edgePos = ball.pos0 ball.tracking = True # print "Done with tracking" loopIndex = 0 while loopIndex < 200: loopIndex += 1 for ball in self.balls: if not ball.tracking: continue # store the old coordinates old_pos = ball.edgePos # walk along the tangent, using chosen differential method ball.edgePos = differentialMethod(ball.edgePos, step, self.calcTangent) # correction step towards the border ball.edgePos, tmp = self.stepOnceTowardsBorder(ball.edgePos) draw = ImageDraw.Draw(self.image) draw.line((old_pos.real, old_pos.imag, ball.edgePos.real, ball.edgePos.imag), fill=self.color) del draw # check if we've gone a full circle or hit some other # edge tracker for ob in self.balls: if ob.tracking: if (ob is not ball) and abs(ob.pos0 - ball.edgePos) < step: # or loopIndex > 3 ball.tracking = False tracking = 0 for ball in self.balls: if ball.tracking: tracking += 1 if tracking == 0: break for ball in self.balls: if ball.tracking: ball.pos = complex(round(ball.pos.real), round(ball.pos.imag)) ImageDraw.floodfill(self.image, (ball.pos.real, ball.pos.imag), self.color) # , self.color)
def __init__(self, simplemap): self.image = Image.new("RGB", simplemap.image.size) self.simple = simplemap ImageDraw.floodfill(self.image, (0, 0), (255, 255, 255)) self.territories = set() draw = ImageDraw.Draw(self.image) self.territory_colours = territory_colours = simplemap.get_territories() self.inv_territory_colours = inv_territory_colours = dict([(v, k) for (k, v) in territory_colours.items()]) for fillpass in range(3): for y in xrange(self.image.size[1]): for x in xrange(self.image.size[0]): colour = simplemap.image.getpixel((x, y)) if fillpass == 1 and colour in territory_colours.values(): tid = inv_territory_colours[colour] * 100 n_x, n_y = x, y neighbours = [(x + 1, y), (x, y + 1), (x - 1, y), (x, y - 1)] neighbours = [(x if x > 0 else self.image.size[0] - 1, y) for (x, y) in neighbours] neighbours = [(x if x < self.image.size[0] else 0, y) for (x, y) in neighbours] neighbours = [(x, y if y > 0 else self.image.size[1] - 1) for (x, y) in neighbours] neighbours = [(x, y if y < self.image.size[1] else 0) for (x, y) in neighbours] neighbours = set(self.image.getpixel(neighbour) for neighbour in neighbours) neighbours = set( colour for colour in neighbours if colour[2] < 255 and colour != (0, 0, 0) and colour != (255, 0, 0) ) if neighbours: colour = max(neighbours) tid = colour_to_territory_id(colour) else: tid = inv_territory_colours[colour] * 100 # generate a new tid tid += 1 while tid in self.territories: tid += 1 self.territories.add(tid) colour = territory_id_to_colour(tid) x, y = n_x, n_y ImageDraw.floodfill(self.image, (x, y), colour) elif colour == (255, 255, 255): if x < self.image.size[0] - 1: next_pixel = simplemap.image.getpixel((x + 1, y)) if fillpass == 2 and (next_pixel in territory_colours.values()): # We're not in the sea colour = self.image.getpixel((x + 1, y))[:2] + (255,) draw.point((x, y), tuple(colour)) continue draw.point((x, y), colour) elif colour in set([(0, 0, 0), (255, 0, 0)]): draw.point((x, y), colour)
def test_floodfill(self): red = ImageColor.getrgb("red") for mode, value in [ ("L", 1), ("RGBA", (255, 0, 0, 0)), ("RGB", red) ]: # Arrange im = Image.new(mode, (W, H)) draw = ImageDraw.Draw(im) draw.rectangle(BBOX2, outline="yellow", fill="green") centre_point = (int(W/2), int(H/2)) # Act ImageDraw.floodfill(im, centre_point, value) # Assert expected = "Tests/images/imagedraw_floodfill_"+mode+".png" im_floodfill = Image.open(expected) self.assert_image_equal(im, im_floodfill) # Test that using the same colour does not change the image ImageDraw.floodfill(im, centre_point, red) self.assert_image_equal(im, im_floodfill) # Test that filling outside the image does not change the image ImageDraw.floodfill(im, (W, H), red) self.assert_image_equal(im, im_floodfill) # Test filling at the edge of an image im = Image.new("RGB", (1, 1)) ImageDraw.floodfill(im, (0, 0), red) self.assert_image_equal(im, Image.new("RGB", (1, 1), red))
def test_floodfill_thresh(self): # floodfill() is experimental # Arrange im = Image.new("RGB", (W, H)) draw = ImageDraw.Draw(im) draw.rectangle(BBOX2, outline="darkgreen", fill="green") centre_point = (int(W/2), int(H/2)) # Act ImageDraw.floodfill( im, centre_point, ImageColor.getrgb("red"), thresh=30) # Assert self.assert_image_equal( im, Image.open("Tests/images/imagedraw_floodfill2.png"))
def floodFill(canvas, origImage, edgeImage, color, filledImage=None): (width, height) = origImage.size edgePixels = edgeImage.load() fillRegionCoords = [] temporaryFill = (100,100,100) for x in xrange(width): for y in xrange(height): if (edgePixels[x, y] == color): fillRegionCoords += [(x,y)] ImageDraw.floodfill(edgeImage, (x,y), temporaryFill) #fill temporarily to make sure fillRegionCoords does not have #multiple coordinates that would fill the same region if (filledImage == None): filledImage = Image.open(canvas.data.edgeImageFile) for (x,y) in fillRegionCoords: fillColor = regionColor(origImage, filledImage, (x,y)) ImageDraw.floodfill(filledImage, (x,y), fillColor) return filledImage
def PrintText(self, text): # led.draw_text2(x-axis, y-axis, whatyouwanttoprint, size) < Understand? # So led.drawtext2() prints simple text to the OLED display like so: #text = 'Hello!' # Create the image to write to the display # THIS MAY NEED TO CHANGE BASED ON STRING LENGTH! image = Image.new('1', (128, 64)) draw = ImageDraw(image) draw.text((0, 0), text, font=ImageFont.load_default(), fill=255) # Clear the Display self.led.clear() self.led.display() # Write the text-based image to the display self.led.image(image) self.led.display()
def test_floodfill_border(self): # floodfill() is experimental # Arrange im = Image.new("RGB", (w, h)) draw = ImageDraw.Draw(im) draw.rectangle(bbox2, outline="yellow", fill="green") centre_point = (int(w/2), int(h/2)) # Act ImageDraw.floodfill( im, centre_point, ImageColor.getrgb("red"), border=ImageColor.getrgb("black")) del draw # Assert self.assert_image_equal( im, Image.open("Tests/images/imagedraw_floodfill2.png"))
def drawterritory(t, color=None): """Draw an entire territory (will draw in color provided, default is owning player's color)""" terr = territories[str(riskboard.territories[t].name)] #Create colored version of the image canvas.delete(terr.name) if len(backcolors) > 0 and current_state.owners[t] is not None: for fp in terr.floodpoints: if color: ImageDraw.floodfill(terr.photo, fp, color) else: ImageDraw.floodfill(terr.photo, fp, hex_to_rgb(backcolors[current_state.owners[t]])) terr.currentimage = ImageTk.PhotoImage(terr.photo) canvas.create_image(terr.x, terr.y, anchor=Tkinter.NW, image=terr.currentimage, tags=(terr.name,)) drawarmy(t, 1)
def test_floodfill_border(): # floodfill() is experimental if hasattr(sys, 'pypy_version_info'): # Causes fatal RPython error on PyPy skip() # Arrange im = Image.new("RGB", (w, h)) draw = ImageDraw.Draw(im) draw.rectangle(bbox2, outline="yellow", fill="green") centre_point = (int(w/2), int(h/2)) # Act ImageDraw.floodfill( im, centre_point, ImageColor.getrgb("red"), border=ImageColor.getrgb("black")) del draw # Assert assert_image_equal(im, Image.open("Tests/images/imagedraw_floodfill2.png"))
def imfill(arr, edge): """Fill holes in images. NOTE: dtype of input array will be temporarily converted uint8! This is because PIL's fromarray function works only with numpy arrays of data type 'uint8'. This may cause some data losses, so proceed with caution! Input: arr -- a numpy.array to be floodfilled edge -- a value of edges """ # using arr.astype to preserve array's dtype, as fromarray requires # array whose dtype is uint8 img = Image.fromarray(arr.astype('uint8')) # read-only aimg = img.copy() ImageDraw.floodfill(aimg, (0,0), edge, edge) invimg = ImageChops.invert(aimg) invarr = asarray(invimg) arr[invarr==255] = edge return arr
def test_sanity(self): im = hopper("RGB").copy() draw = ImageDraw2.Draw(im) pen = ImageDraw2.Pen("blue", width=7) draw.line(list(range(10)), pen) from PIL import ImageDraw draw, handler = ImageDraw.getdraw(im) pen = ImageDraw2.Pen("blue", width=7) draw.line(list(range(10)), pen)
def identify_objects(self): im = copy.deepcopy(self.image) width, height = im.size dark_fill_val = 1 light_fill_val = 254 for x in range(width): for y in range(height): xy = (x, y) l_val = im.getpixel(xy) if l_val == 0: ImageDraw.floodfill(im, xy, dark_fill_val) self.objects.append(Object(xy, dark_fill_val)) dark_fill_val += 1 elif l_val == 255: ImageDraw.floodfill(im, xy, light_fill_val) light_fill_val -= 1 else: for obj in self.objects: if obj.l_val == l_val: obj.add_pixel(xy) break
def drawterritory(t, shaded): """Draw an entire territory (possibly shaded)""" risknetwork.draw_territory(t, shaded) terr = territories[t.name] #Create colored version of the image canvas.delete(terr.name) #print 'Drawing territory: ', t.name if hasattr(t.player, 'backcolor'): for fp in terr.floodpoints: #print 'Flood-filling', terr.name, ' territory' ImageDraw.floodfill(terr.photo, fp, hex_to_rgb(t.player.backcolor)) #print 'Saving images' terr.shadedimage = ImageTk.PhotoImage(terr.photo.point(lambda x:x * 0)) terr.currentimage = ImageTk.PhotoImage(terr.photo) if shaded: canvas.create_image(terr.x, terr.y, anchor=Tkinter.NW, image=terr.shadedimage, tags=(terr.name,)) else: canvas.create_image(terr.x, terr.y, anchor=Tkinter.NW, image=terr.currentimage, tags=(terr.name,)) drawarmy(riskengine.territories[terr.name], 1)
def generate_captcha(request): path = '.' im = Image.new('RGBA', (200, 50), (0, 0, 0, 0)) draw = ImageDraw(im) number = '' margin_left, margin_top = 0, 0 colnum = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') i = 0 while i < 6: font_color = '#' + str(random.randint(0,9)) y = 0 while y < 5: rand = random.choice(colnum) font_color = font_color + rand y += 1 rand_x11 = random.randint(0, 100) rand_x12 = random.randint(100, 200) rand_y11 = random.randint(0, 50) rand_y12 = random.randint(0, 50) draw.line((rand_x11, rand_y11, rand_x12, rand_y12), fill='#a9a6a6') font_rand = str(random.randint(1, 10)) font_size_rand = random.randint(30, 40) font = ImageFont.truetype(path + "fonts/" + font_rand + ".ttf", font_size_rand) a = str(random.randint(0, 9)) draw.text((margin_left, margin_top), a, fill=str(font_color), font=font) rand_x11 = random.randint(0, 100) rand_x12 = random.randint(100, 200) rand_y11 = random.randint(0, 50) rand_y12 = random.randint(0, 50) draw.line((rand_x11, rand_y11, rand_x12, rand_y12), fill="#a9a6a6") margin_left = margin_left + random.randint(20, 35) margin_top = random.randint(0, 20) i += 1 number += a salt = "$@!SAf*$@)ASFfacnq==124-2542SFDQ!@$1512czvaRV" key = md5(str(number + salt)).hexdigest() output = StringIO() im.save(output, format="PNG") contents = output.getvalue().encode("base64").replace("\n", "") img_tag = '<img value="' + key + '" src="data:image/png;base64,{0}">'.format(contents) output.close() return img_tag
def test_floodfill(self): # Arrange im = Image.new("RGB", (W, H)) draw = ImageDraw.Draw(im) draw.rectangle(BBOX2, outline="yellow", fill="green") centre_point = (int(W/2), int(H/2)) red = ImageColor.getrgb("red") im_floodfill = Image.open("Tests/images/imagedraw_floodfill.png") # Act ImageDraw.floodfill(im, centre_point, red) # Assert self.assert_image_equal(im, im_floodfill) # Test that using the same colour does not change the image ImageDraw.floodfill(im, centre_point, red) self.assert_image_equal(im, im_floodfill) # Test that filling outside the image does not change the image ImageDraw.floodfill(im, (W, H), red) self.assert_image_equal(im, im_floodfill)
def do_lpr_reco(input_path, output_path): """Send images to test their recognition.""" print("") print("Beginning Vehicle Recognition") print("") # Create the output directory. if not os.path.exists(output_path): os.mkdir(output_path) # Submit all images in the input directory for recognition. while True: camera.capture( '/home/pi/Desktop/sighthound-lpr-cloud-examples/code-samples/python/image.jpg' ) input_file = 'image.jpg' verbose(" Submitting test image ") #################################################################### ## ## This is the Vehicle Recognition Cloud API interface ## #################################################################### base64_image = base64.b64encode(open(input_file, "rb").read()).decode("utf-8") params = json.dumps({"image": base64_image}) ## Access the API with a "POST" request that includes the encoded image: response = json.loads(send_request("POST", _lpr_url_path, params)) #################################################################### ## From here on, we are pulling the data from the response #################################################################### if drawing_allowed: # Prepare to draw on the image image = Image.open(input_file) font = ImageFont.load_default draw = ImageDraw.Draw(image) vehicles = response[ 'objects'] # Each object is a vehicle found in the image print("Image file: " + input_file + " - Number of vehicles found: " + str(len(vehicles))) for vehicle in vehicles: annotation = vehicle['vehicleAnnotation'] if drawing_allowed: # Retrieve and draw a bounding box for the detected license plate. json_vertices = annotation['bounding']['vertices'] vert_list = [(point['x'], point['y']) for point in json_vertices] draw.polygon(vert_list) # Retrieve and draw make, model, color, and type with confidences. v_system = annotation['attributes']['system'] v_make_name = v_system['make']['name'] v_model_name = v_system['model']['name'] v_color_name = v_system['color']['name'] v_type = v_system['vehicleType'] v_make_confidence = v_system['make']['confidence'] v_model_confidence = v_system['model']['confidence'] v_color_confidence = v_system['color']['confidence'] v_text = " %s: %s (%s) %s (%s) %s (%s)" % ( v_type, v_make_name, v_make_confidence, v_model_name, v_model_confidence, v_color_name, v_color_confidence) if drawing_allowed: image_text = " %s\n %s (%s)\n %s (%s)\n %s (%s)" % ( v_type, v_make_name, v_make_confidence, v_model_name, v_model_confidence, v_color_name, v_color_confidence) draw.text(vert_list[0], image_text) try: licence_plate = annotation['licenseplate'] except: print(v_text + " No License Plate Found") else: if drawing_allowed: # Retrieve and draw a bounding box for the detected license plate. json_vertices = licence_plate['bounding']['vertices'] vert_list = [(point['x'], point['y']) for point in json_vertices] draw.polygon(vert_list) # Retrieve and draw the region and string with confidences of the recongition. lp_region = licence_plate['attributes']['system']['region'] lp_string = licence_plate['attributes']['system']['string'] lp_region_name = lp_region['name'] lp_string_name = lp_string['name'] lp_region_confidence = lp_region['confidence'] lp_string_confidence = lp_string['confidence'] lp_text = " %s (%s) %s (%s)" % ( lp_region_name, lp_region_confidence, lp_string_name, lp_string_confidence) if drawing_allowed: draw.text(vert_list[0], lp_text) print(v_text + " License Plate Found:" + lp_text) print("[INFO] sending txt message...") tn.send("Car! Make: " + v_make_name + ", Model: " + v_model_name + ", Color: " + v_color_name + ", Type: " + v_type + ", Region: " + lp_region_name + ", Plate Number: " + lp_string_name) print("[INFO] txt message sent") if lp_string_name == "*******": aio.send('car', 1) tn.send("Car verified. Mom.") else: aio.send('car', 0) #tn.send("WARNING etc. Car unverified.") if lp_string_name == "*******": aio.send('car', 1) tn.send("Car verified. Dad.") else: aio.send('car', 0) #tn.send("WARNING etc. Car unverified.") if drawing_allowed: output_file_path = os.path.join(output_path, input_file) print("") print("Writing annotized image to: " + output_file_path) image.save(output_file_path) print("") verbose("LPR Reco Complete\n")
def make_icard(): label_reset() assure_path_exists('images\sources\icard/') assure_path_exists('images\sources/') assure_path_exists('images/') assure_path_exists('Details/') Id = tsd.askstring('ENTER ID NUMBER', 'Enter the ID number to generate I-Card') window2.grab_set() print(Id) if (Id == None): return exists1 = os.path.isfile("Details\Details.csv") if exists1: pass else: message1.configure(text='Details are missing, please check Details folder !', bg='red') with open("Details\Details.csv", 'r') as csvFile2: reader1 = csv.reader(csvFile2) for lines in reader1: if (str(lines[2]) == Id): q=1 global name name = lines[4] id = lines[2] phone = lines[10] dob = lines[6] blood = lines[8] mail = lines[12] try: cap = cv2.VideoCapture(0) except: try: cap = cv2.VideoCapture(1) except: message1.configure(text='No camera was found !', bg='red') while 1: ret, fm = cap.read() cv2.imshow('Photo for I-Card',fm) if cv2.waitKey(1) & 0xFF == ord('s'): fm = cv2.resize(fm, (0, 0), fx=0.8, fy=0.8) fm = fm[50:330,150:360] fm = cv2.resize(fm, (0, 0), fx=0.8, fy=0.8) print(fm.shape) cv2.imwrite("images\ "+name+id+".png", fm) cv2.destroyAllWindows() cap.release() break if(q==0): message1.configure(text='ID not found in records !', bg='red') csvFile2.close() icard = Image.open("images\sources\idcard.png") base = Image.new('RGB', (188, 242), (255, 255, 255)) base2 = Image.new('RGB', (400, 41), (255, 255, 255)) icard.paste(base2, (5, 365)) photo = Image.open("images\ "+name+id+".png") draw = ImageDraw.Draw(icard) font = ImageFont.truetype('roboto\Roboto-Bold.ttf', size=31) fontd = ImageFont.truetype('roboto\Roboto-Bold.ttf', size=20) draw.text((30, 365), text=name.upper(), fill='orange', font=font) draw.text((60, 645), text='INSTITUTION ADDRESS', fill='white', font=font) draw.text((20, 20), text='INSTITUTION NAME & LOGO', fill='white', font=font) draw.text((30, 425), text='ID : '+id, fill='black', font=fontd) draw.text((30, 465), text='Contact no. : '+phone, fill='black', font=fontd) draw.text((30, 505), text='Blood Group : '+blood, fill='black', font=fontd) draw.text((30, 545), text='D.O.B. : '+dob, fill='black', font=fontd) draw.text((30, 585), text=mail, fill='red', font=fontd) qr = qrcode.make(str(name)+str(id)) qr.save("images\sources\qr"+id+".png") img = cv2.imread("images\sources\qr"+id+".png") img = cv2.resize(img, (0, 0), fx=0.4, fy=0.4) cv2.imwrite("images\sources\qr"+id+".png", img) qri = Image.open("images\sources\qr"+id+".png") icard.paste(qri, (292, 155)) icard.paste(base, (40, 101)) icard.paste(photo, (50, 111)) icard.save('images\icard\ '+name+id+'.png') Image._show('images\icard\ '+name+id+'.png')
def test_draw(): font = ImageFont.load(tempname) image = Image.new("L", font.getsize(message), "white") draw = ImageDraw.Draw(image) draw.text((0, 0), message, font=font)
def predict(ori, dest): avi = dest.split('.')[0] + '.avi' threshold = 0.24 #load video cap = cv2.VideoCapture(ori) fps = cap.get(cv2.cv.CV_CAP_PROP_FPS) width = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)) print fps #load face detector net1, names = libpysunergy.load('data/face.data', 'cfg/yolo-face.cfg', 'weights/yolo-face.weights') #load AGE models net2, names2 = libpysunergy.load("data/age.data", "cfg/age.cfg", "weights/age.weights") net3, names3 = libpysunergy.load("data/gender.data", "cfg/gender.cfg", "weights/gender.weights") net4, names4 = libpysunergy.load("data/race.data", "cfg/race.cfg", "weights/race.weights") top = 1 # Define the codec and create VideoWriter object fourcc = cv2.cv.CV_FOURCC('M', 'J', 'P', 'G') videoWriter = cv2.VideoWriter(avi, fourcc, fps, (width, height)) font = ImageFont.truetype("Roboto-Regular.ttf", 20) count = 1 #face detection while (1): print count count += 1 ret, frame = cap.read() if not ret: break cv2_im = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) pil_im = Image.fromarray(cv2_im) draw = ImageDraw.Draw(pil_im) if ret == True: (h, w, c) = frame.shape dets = libpysunergy.detect(frame.data, w, h, c, threshold, net1, names) #crop face and predict AGE for i in range(0, len(dets)): if dets[i][0] == 'face': box = dets[i][2:6] x0 = int(box[0]) x1 = int(box[1]) y0 = int(box[2]) y1 = int(box[3]) faceimg = frame[y0:y1, x0:x1].copy() (h, w, c) = faceimg.shape #draw bounding box draw.rectangle( ((x0, y0), (x1, y1)), outline="red", ) draw.rectangle( ((x0 + 1, y0 + 1), (x1 - 1, y1 - 1)), outline="red", ) dets2 = libpysunergy.predict(faceimg.data, w, h, c, top, net2, names2) age = dets2[0][0] dets3 = libpysunergy.predict(faceimg.data, w, h, c, top, net3, names3) gender = dets3[0][0] dets4 = libpysunergy.predict(faceimg.data, w, h, c, top, net4, names4) race = dets4[0][0] #write classification draw.text((x0, y0 - 60), 'Age: ' + age, (255, 0, 0), font=font) draw.text((x0, y0 - 40), 'Gender: ' + gender, (255, 0, 0), font=font) draw.text((x0, y0 - 20), 'Race: ' + race, (255, 0, 0), font=font) pil_im = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR) videoWriter.write(pil_im) else: break cap.release() libpysunergy.free(net1) libpysunergy.free(net2) libpysunergy.free(net3) libpysunergy.free(net4) #convert avi video to mp4 print('converting avi video to mp4 ...') cm.getstatusoutput('ffmpeg -i ' + avi + ' -c:v libx264 -crf ' + str(int(fps)) + ' -preset slow -c:a libfdk_aac -b:a 192k -ac 2 ' + dest) cm.getstatusoutput('rm ' + avi)
def draw_triangle_two(img: Image, triangle: List[Tuple], color: tuple) -> Image: drw = ImageDraw.Draw(img, 'RGBA') drw.polygon(triangle, color) return img
def _draw_fill(self, image, d, key, percentage): color = self._color(percentage) for current_key, (cx, cy) in d: if current_key == key: ImageDraw.floodfill(image, (cx, cy), color)
from PIL import Image from PIL import ImageDraw # First, get the image you want on the left side im1 = Image.open('/Users/samueldominguez/Downloads/pct.jpg') # Next, create a new blank white image to write over. im2 = Image.new('RGB', (im1.width, im1.height), color='white') # This part is to write words on the blank white image d = ImageDraw.Draw(im2) d.text((10, 100), "Hello World", fill=(0, 0, 0)) # Now we create a new image which will be a composite of the two images above newImage = Image.new('RGB', (im1.width + im2.width, im2.height)) # as you can see, we left enough width to paste both images side by side # now past the two images side by side. newImage.paste(im1, (0, 0)) newImage.paste(im2, (im1.width, 0)) # finally, save the new image that is a composite of both images newImage.save('/Users/samueldominguez/Downloads/newImage.jpg')
def clip_image_to_shape(source_image_path, output_image_path): def image_to_array(pil_array): """ Converts a Python Imaging Library (PIL) array to a gdalnumeric image. """ gdal_numeric_array = gdalnumeric.fromstring(pil_array.tobytes(), 'b') gdal_numeric_array.shape = pil_array.im.size[1], pil_array.im.size[0] return gdal_numeric_array def split_path(source_path): file_dir = os.path.split(source_path)[0] file_name = os.path.split(source_path)[1] file_extension = os.path.splitext(file_name)[1] file_name = os.path.splitext(file_name)[0] print(file_name) return file_dir, file_name, file_extension def get_geometry_extent(polygons): xs = [] ys = [] for polygon in polygons: for point in polygon: xs.append(point[0]) ys.append(point[1]) min_x = min(xs) max_x = max(xs) min_y = min(ys) max_y = max(ys) # min_x = min(points, key=lambda x: x[0])[0] # max_x = max(points, key=lambda x: x[0])[0] # min_y = min(points, key=lambda x: x[1])[1] # max_y = max(points, key=lambda x: x[1])[1] return min_x, max_x, min_y, max_y def world_to_pixel(geotransform_matrix, x, y): """ Uses a gdal geomatrix (gdal.GetGeoTransform()) to calculate the pixel location of a geospatial coordinate """ min_x = geotransform_matrix[0] max_y = geotransform_matrix[3] pixel_size_x = geotransform_matrix[1] pixel_size_y = geotransform_matrix[5] # rtnX = geotransform_matrix[2] # rtnY = geotransform_matrix[4] column = int((x - min_x) / pixel_size_x) row = int((y - max_y) / pixel_size_y) return column, row source_file_dir, source_file_name, source_file_extension = split_path(source_image_path) # Output file geographic projection wkt_projection = 'GEOGCS["WGS 84",' \ 'DATUM["WGS_1984",' \ 'SPHEROID["WGS 84",6378137,298.257223563,' \ 'AUTHORITY["EPSG","7030"]],' \ 'AUTHORITY["EPSG","6326"]],' \ 'PRIMEM["Greenwich",0,' \ 'AUTHORITY["EPSG","8901"]],' \ 'UNIT["degree",0.01745329251994328,' \ 'AUTHORITY["EPSG","9122"]],' \ 'AUTHORITY["EPSG","4326"]]' if source_file_extension == '.tif' or source_file_extension == '.tiff': source_image = gdal.Open(source_image_path) # (x, y) coordinates refer are geographical coordinates (latitude and longtitude) # (i, j) and (cols, rows) are pixel coordinates # Read coordinates of ground control points (GCPs) and calculate their extent (min and max x, y values) gcps = source_image.GetGCPs() gcp_x = [] gcp_y = [] for a, val in enumerate(gcps): gcp_x.append(gcps[a].GCPX) gcp_y.append(gcps[a].GCPY) min_source_x = min(gcp_x) max_source_x = max(gcp_x) min_source_y = min(gcp_y) max_source_y = max(gcp_y) # A warped virtual raster (middle_raster) needs to be created # because the source_raster has no geographical projection. # That's why it's being reprojected from None to wkt_projection (None to WGS84). error_threshold = 0.125 resampling = gdal.GRA_NearestNeighbour middle_image = gdal.AutoCreateWarpedVRT(source_image, None, wkt_projection, resampling, error_threshold) source_image = None # Calculate the GeoTransform matrix for the input image # geotransform[0] top left x, minimal x value # geotransform[1] pixel width, pixel size in x dimension # geotransform[2] 0 # geotransform[3] top left y, maximal y value # geotransform[4] 0 # geotransform[5] pixel height, pixel size in y dimension, should be negative source_cols = middle_image.RasterXSize source_rows = middle_image.RasterYSize geotransform = [min_source_x, (max_source_x - min_source_x) / source_cols, 0, max_source_y, 0, (max_source_y - min_source_y) / source_rows * (-1)] # Calculate the x, y coordinates for a lower right corner of the source_image pixel_size_source_x = geotransform[1] pixel_size_source_y = geotransform[5] max_source_x = min_source_x + (source_cols * pixel_size_source_x) min_source_y = max_source_y + (source_rows * pixel_size_source_y) # Create a polygon equal to extent of the source_image # POLYGON((x1 y1, x2 y2, x3 y3, x4 y4, x1 y1)) image_wkt = 'POLYGON ((' \ + str(min_source_x) + ' ' \ + str(max_source_y) + ',' \ + str(max_source_x) + ' ' \ + str(max_source_y) + ',' \ + str(max_source_x) + ' ' \ + str(min_source_y) + ',' \ + str(min_source_x) + ' ' \ + str(min_source_y) + ',' \ + str(min_source_x) + ' ' \ + str(max_source_y) + '))' source_geometry = ogr.CreateGeometryFromWkt(image_wkt) # Load a *.shp file and read the single feature containing border shapefile = ogr.Open(SHP_PATH) shapefile_layer = shapefile.GetLayer("PL") shapefile_polygon = shapefile_layer.GetNextFeature() border_geometry = shapefile_polygon.GetGeometryRef() # Calculate the spatial intersection of the source_image and the border shapefile # It's a shape of the output image output_geometry = border_geometry.Intersection(source_geometry) output_geometry_type = output_geometry.GetGeometryType() output_geometry_geom_count = output_geometry.GetGeometryCount() # GetGeometryType() == 2: LINEARRING # GetGeometryType() == 3: POLYGON # GetGeometryType() == 6: MULTIPOLYGON # Create a list of (x,y) pairs of output_geometry coordinates polygons = [] if output_geometry_type == 3: pts = output_geometry.GetGeometryRef(0) polygon = [] for point in range(pts.GetPointCount()): polygon.append((pts.GetX(point), pts.GetY(point))) polygons.append(polygon) elif output_geometry_type == 6: for geom in range(output_geometry_geom_count): pts = output_geometry.GetGeometryRef(geom) pts = pts.GetGeometryRef(0) polygon = [] for p in range(pts.GetPointCount()): polygon.append((pts.GetX(p), pts.GetY(p))) polygons.append(polygon) # Calculate the pixel extent of the output_geometry polygon min_output_x, max_output_x, min_output_y, max_output_y = get_geometry_extent(polygons) min_output_i, max_output_j = world_to_pixel(geotransform, min_output_x, max_output_y) max_output_i, min_output_j = world_to_pixel(geotransform, max_output_x, min_output_y) # If calculated extent is outside of the source_image array it has to be clipped if min_output_i < 0: min_output_i = 0 if max_output_j < 0: max_output_j = 0 if max_output_i > source_cols: max_output_i = source_cols if min_output_j > source_rows: min_output_j = source_rows # Width and height of the output_raster in pixels output_cols = int(max_output_i - min_output_i) output_rows = int(min_output_j - max_output_j) # Read the middle image as array and select pixels within calculated range middle_array = np.array(middle_image.GetRasterBand(1).ReadAsArray()) clip = middle_array[max_output_j:min_output_j, min_output_i:max_output_i] # Calculate the GeoTransform matrix for the output image, it has a different x and y origin output_geotransform = geotransform output_geotransform[0] = min_output_x output_geotransform[3] = max_output_y # Convert (x,y) pairs of output_geometry coordinates to pixel coordinates polygons_pixel = [] for polygon in polygons: polygon_pixel = [] for point in polygon: polygon_pixel.append(world_to_pixel(output_geotransform, point[0], point[1])) polygons_pixel.append(polygon_pixel) # Create a new PIL image and rasterize the clipping shape polygon_raster = Image.new("L", (output_cols, output_rows), 1) rasterize = ImageDraw.Draw(polygon_raster) for polygon in polygons_pixel: rasterize.polygon(polygon, 0) mask = image_to_array(polygon_raster) clip = gdalnumeric.choose(mask, (clip, 0)).astype(gdalnumeric.uint16) # Create the output file driver = gdal.GetDriverByName('GTiff') # ! # proj = middle_image.GetProjection() output_image = driver.Create(output_image_path, output_cols, output_rows, 1, gdal.GDT_UInt16) output_image.GetRasterBand(1).WriteArray(clip) output_image.SetGeoTransform(output_geotransform) # ! output_image.SetProjection(wkt_projection) output_image.FlushCache() output_image = None elif source_file_extension == '.jp2': source_image = gdal.Open(source_image_path) # TODO: output_image should be in UTM projection gdal.Warp(os.path.join(source_file_dir, source_file_name + '_WGS84' + source_file_extension), source_image, dstSRS='EPSG:4326') source_image = gdal.Open(os.path.join(source_file_dir, source_file_name + '_WGS84' + source_file_extension)) source_array = gdalnumeric.LoadFile(os.path.join(source_file_dir, source_file_name + '_WGS84' + source_file_extension)) geotransform = source_image.GetGeoTransform() min_source_x = geotransform[0] max_source_y = geotransform[3] source_cols = source_image.RasterXSize source_rows = source_image.RasterYSize pixel_size_source_x = geotransform[1] pixel_size_source_y = geotransform[5] max_source_x = min_source_x + (source_cols * pixel_size_source_x) min_source_y = max_source_y + (source_rows * pixel_size_source_y) image_wkt = 'POLYGON ((' \ + str(min_source_x) + ' ' \ + str(max_source_y) + ',' \ + str(max_source_x) + ' ' \ + str(max_source_y) + ',' \ + str(max_source_x) + ' ' \ + str(min_source_y) + ',' \ + str(min_source_x) + ' ' \ + str(min_source_y) + ',' \ + str(min_source_x) + ' ' \ + str(max_source_y) + '))' source_geometry = ogr.CreateGeometryFromWkt(image_wkt) shapefile = ogr.Open(SHP_PATH) shapefile_layer = shapefile.GetLayer("PL") shapefile_polygon = shapefile_layer.GetNextFeature() border_geometry = shapefile_polygon.GetGeometryRef() output_geometry = border_geometry.Intersection(source_geometry) output_geometry_type = output_geometry.GetGeometryType() output_geometry_geom_count = output_geometry.GetGeometryCount() # GetGeometryType() == 2: LINEARRING # GetGeometryType() == 3: POLYGON # GetGeometryType() == 6: MULTIPOLYGON polygons = [] if output_geometry_type == 3: pts = output_geometry.GetGeometryRef(0) polygon = [] for point in range(pts.GetPointCount()): polygon.append((pts.GetX(point), pts.GetY(point))) polygons.append(polygon) elif output_geometry_type == 6: for geom in range(output_geometry_geom_count): pts = output_geometry.GetGeometryRef(geom) pts = pts.GetGeometryRef(0) polygon = [] for p in range(pts.GetPointCount()): polygon.append((pts.GetX(p), pts.GetY(p))) polygons.append(polygon) min_output_x, max_output_x, min_output_y, max_output_y = get_geometry_extent(polygons) min_ouput_i, max_output_j = world_to_pixel(geotransform, min_output_x, max_output_y) max_output_i, min_output_j = world_to_pixel(geotransform, max_output_x, min_output_y) if min_ouput_i < 0: min_ouput_i = 0 if max_output_j < 0: max_output_j = 0 if max_output_i > source_cols: max_output_i = source_cols if min_output_j > source_rows: min_output_j = source_rows output_cols = int(max_output_i - min_ouput_i) output_rows = int(min_output_j - max_output_j) clip = source_array[max_output_j:min_output_j, min_ouput_i:max_output_i] output_geotransform = list(geotransform) output_geotransform[0] = min_output_x output_geotransform[3] = max_output_y polygons_pixel = [] for polygon in polygons: polygon_pixel = [] for point in polygon: polygon_pixel.append(world_to_pixel(output_geotransform, point[0], point[1])) polygons_pixel.append(polygon_pixel) polygon_raster = Image.new("L", (output_cols, output_rows), 1) rasterize = ImageDraw.Draw(polygon_raster) for polygon in polygons_pixel: rasterize.polygon(polygon, 0) mask = image_to_array(polygon_raster) clip = gdalnumeric.choose(mask, (clip, 0)).astype(gdalnumeric.uint16) driver = gdal.GetDriverByName('GTiff') output_image = driver.Create(output_image_path, output_cols, output_rows, 1, gdal.GDT_UInt16) output_image.GetRasterBand(1).WriteArray(clip) proj = source_image.GetProjection() output_image.SetGeoTransform(output_geotransform) output_image.SetProjection(proj) output_image.FlushCache() output_image = None else: print('unknown file format')
font_en = ImageFont.truetype('fonts/simsun.ttc', fontsize_en) width_en, height_en = font_en.getsize("W") font_cn = ImageFont.truetype('fonts/simsun.ttc', fontsize_cn) width_cn, height_cn = font_cn.getsize(ch_u) top_margin = -2 height_spacing_en = 3 height_spacing_cn = 3 height_en = height_en + height_spacing_en height_cn = height_cn + height_spacing_cn print "fontsize: %d" % (fontsize_en) print "font en: %dx%d" % (width_en, height_en) print "font cn: %dx%d" % (width_cn, height_cn) im_en = Image.new('P', (width_en, height_en * len(s)), 0) text = ImageDraw.Draw(im_en) #print "painting ascii" size = "" for i in range(len(s)): text.text((0, i * height_en + top_margin), s[i], 255, font_en) im_en.save("data_en_%dx%d.png" % (width_en, height_en)) print "generating png file data_en_%dx%d.png" % (width_en, height_en) #print "painting GB2312" im_cn = Image.new('P', (width_cn, height_cn * count), 0) text = ImageDraw.Draw(im_cn) for i in range(0, count): ch = c[i * 2:i * 2 + 2].decode("gb2312") im_text = Image.new('P', (width_cn, height_cn), 0) text = ImageDraw.Draw(im_text) text.text((0, top_margin), ch, 255, font_cn) im_cn.paste(im_text, (0, i * height_cn))
drawGrid = True if placement: delta = 26 assert (delta >= 5) p = dr.Placement(D, delta) if p is None: exit() # creating new Image object img = Image.new("RGB", (w, h)) img1 = ImageDraw.Draw(img) for d in D: cx, cy, R = d shape = [(cx - R, h - (cy - R)), (cx + R, h - (cy + R))] img1.rectangle(shape, fill=(255, 0, 0), outline="red") # (0,0) img1.rectangle([(-1, h - (-1)), (1, h - (1))], fill=(0, 0, 255)) img1.rectangle([(delta * 0 - 1, h - (delta * 1 - 1)), (delta * 0 + 1, h - (delta * 1 + 1))], fill=(0, 255, 0)) img1.rectangle([(delta * 1 - 1, h - (delta * 0 - 1)),
image = Image.open( 'christine-siracusa-vzX2rgUbQXM-unsplash.jpg' ) # this where you put the picture you want to to resize the image image.thumbnail( (800, 800)) # k puting image.thumball an double (())resixing image to 800 800 image.save( 'christine_thumbnail.jpeg') # saves the image as christine_thumbnail.jpeg font = ImageFont.truetype( 'DejaVuSans (1).ttf', 40 ) # i downdloaded DejaVusans from the website and insert it and use font= imagefont.true type to and 40 text draw the test around the taco image # image font type, 40 to resize my picture u can use any number i just choose your font size draw = ImageDraw.Draw( image ) # this will draw the image if u don't have it. it won't. draw= imageDraw.Draw(image) # image font tyme, 40 draw.text( [10, 475], ' Random Taco Cookbook', fill='red', font=font ) # drawing text and putting 10 47 Rondom taco cookbook and it will appear on the picture. image.show() image.save( 'christine-siracusa-vzX2rgUbQXM-unsplash.jpg' ) # using .save('saveing my picture) this allows you to save your picture after u finish
im3show = to8(im3).astype('uint8') # Get the min and max values for each channel. bmin, bmax, gmin, gmax, rmin, rmax = (np.min(im1show), np.max(im1show), np.min(im2show), np.max(im2show), np.min(im3show), np.max(im3show)) # Rescale the intensities im1show = exposure.rescale_intensity(im1show) im2show = exposure.rescale_intensity(im2show) im3show = exposure.rescale_intensity(im3show) # Merge as RGB. #rgb = Image.fromarray(np.dstack((im1, im2, im3)), 'RGB') rgb = Image.fromarray(np.dstack((im1show, im2show, im3show)), 'RGB') # Label the image with the original min / max values for reference. outImage = ImageDraw.Draw(rgb) font = ImageFont.truetype('arial.ttf', 36) outImage.text((5,0), dir + '-' + day + '-' + num, (255, 255, 255), font = font) outImage.text((5,38), str(bmin) + '-' + str(bmax), (100, 100, 255), font = font) outImage.text((5,76), str(gmin) + '-' + str(gmax), (0, 255, 0), font = font) outImage.text((5,114), str(rmin) + '-' + str(rmax), (255, 0, 0), font = font) # Save as merge. rgb.save(mergeDir + '\\' + day + '-' + num + '.jpg', format='JPEG') # Before proceeding with this, need to know how to level the images. # Min from IgG. Max from max of true condition. ############ # Get the list of unique site names. #wellNames = list(set([x[:-7] for x in imageNames]))
""" Created on Oct 13, 2017 Used to answer this question: https://stackoverflow.com/a/46736330/1469465 @author: physicalattraction """ from PIL import ImageDraw from utils import save_img, open_img, print_pil_version_info if __name__ == '__main__': image = open_img('star_transparent.png') width, height = image.size center = (int(0.5 * width), int(0.5 * height)) yellow = (255, 255, 0, 255) ImageDraw.floodfill(image, xy=center, value=yellow) save_img(image, 'star_yellow.png') print_pil_version_info()
def draw_mtg_card(card, nstep): isAftermath = False if card.name.find(" // ") != -1: namesplit = card.name.replace(" // ", "/") lookupScan = scraper.download_scan(namesplit, card.set, card.collector_num) if card.name in aftermath: isAftermath = True else: lookupScan = scraper.download_scan(card.name, card.set, card.collector_num) img = Image.open(lookupScan) width, height = img.size img = img.resize((width * QUALITY_MULTIPLIER, height * QUALITY_MULTIPLIER), FILTER) if (card.name.find(" // ") != -1) and (isAftermath == False): img = img.rotate(ROTATE_LEFT) #check if im has Alpha band... if img.mode != 'RGBA': img = img.convert('RGBA') #resize the gradient to the size of im... alpha = gradient.resize(img.size, FILTER) #put alpha in the alpha band of im... img.putalpha(alpha) bkgd = Image.new("RGB", img.size, "black") bkgd.paste(img, (0, 0), mask=img) if isAftermath == True: cut = bkgd.crop((X_TOP + MTG_BACKGROUND_X_TOP_OFFSET, Y_TOP + MTG_BACKGROUND_Y_OFFSET_AFTERMATH, X_BOTTOM, Y_BOTTOM + MTG_BACKGROUND_Y_OFFSET_AFTERMATH)) else: cut = bkgd.crop((X_TOP + MTG_BACKGROUND_X_TOP_OFFSET, Y_TOP + MTG_BACKGROUND_Y_OFFSET, X_BOTTOM, Y_BOTTOM + MTG_BACKGROUND_Y_OFFSET)) draw = ImageDraw.Draw(cut) text = str(card.quantity) + ' ' + card.name #create text outline draw.text((TEXT_LEFT - QUALITY_MULTIPLIER, TEXT_TOP - QUALITY_MULTIPLIER), text, BLACK, font=fnt) draw.text((TEXT_LEFT + QUALITY_MULTIPLIER, TEXT_TOP - QUALITY_MULTIPLIER), text, BLACK, font=fnt) draw.text((TEXT_LEFT - QUALITY_MULTIPLIER, TEXT_TOP + QUALITY_MULTIPLIER), text, BLACK, font=fnt) draw.text((TEXT_LEFT + QUALITY_MULTIPLIER, TEXT_TOP + QUALITY_MULTIPLIER), text, BLACK, font=fnt) #enter text draw.text((TEXT_LEFT, TEXT_TOP), text, NEARLY_WHITE, font=fnt) cmc, adjustcmc = GenerateCMC(card.name, card.cost) #place the cropped picture of the current card deck.paste(cut, (0, INNER_ENTRY_HEIGHT * nstep)) #for scrolling decklist tmpwidth, tmpheight = cut.size cut2 = cut.crop((0, 0, tmpwidth - SCROLLING_DECK_WIDTH_ADJUSTMENT, tmpheight)) deck2.paste(cut2, (SCROLLING_DECK_WIDTH * nstep, 0)) #adjust cmc size to reflex manacost greater than 9 if adjustcmc: deck.paste(cmc, (DECK_WIDTH - INNER_MTG_MANA_COST_IMAGE_SIZE * len(card.cost), MTG_CMC_OFFSET_TOP + INNER_ENTRY_HEIGHT * nstep), mask=cmc) #for scrolling decklist deck2.paste(cmc, (SCROLLING_DECK_WIDTH * (nstep + 1) - INNER_MTG_MANA_COST_IMAGE_SIZE * len(card.cost), MTG_CMC_OFFSET_TOP), mask=cmc) adjustcmc = False else: deck.paste(cmc, (DECK_WIDTH - INNER_MTG_MANA_COST_IMAGE_SIZE * (len(card.cost) + 1), MTG_CMC_OFFSET_TOP + INNER_ENTRY_HEIGHT * nstep), mask=cmc) #for scrolling decklist deck2.paste(cmc, (SCROLLING_DECK_WIDTH * (nstep + 1) - INNER_MTG_MANA_COST_IMAGE_SIZE * (len(card.cost) + 1), MTG_CMC_OFFSET_TOP), mask=cmc)
def drawpixel(target: ImageDraw, x, y, size, color): target.rectangle((x * size, y * size, (x + 1) * size, (y + 1) * size), fill=color)
def main(filename): doSideboard = config.Get('options', 'display_sideboard') #open user input decklist raw_decklist = open(str(filename), 'r') deck_list = decklist.parse_list(raw_decklist) raw_decklist.close() print(repr(deck_list)) nstep = 1 # create a header with the deck's name global fnt if deck_list.game == decklist.MTG: fnt = ImageFont.truetype(os.path.join(globals.RESOURCES_PATH, 'fonts', config.Get('fonts', 'mtg')), MTG_FONT_SIZE) fnt_title = ImageFont.truetype(os.path.join(globals.RESOURCES_PATH, 'fonts', config.Get('fonts', 'mtg')), MTG_TITLE_FONT_SIZE) title = Image.new("RGB", (DECK_WIDTH, INNER_ENTRY_HEIGHT), "black") drawtitle = ImageDraw.Draw(title) drawtitle.text(MTG_TITLE_POSITION, os.path.basename(str(filename))[0:-4], NEARLY_WHITE, font=fnt_title) elif deck_list.game == decklist.POKEMON: fnt = ImageFont.truetype(os.path.join(globals.RESOURCES_PATH, 'fonts', config.Get('fonts', 'pkmn')), POKEMON_FONT_SIZE) fnt_title = ImageFont.truetype(os.path.join(globals.RESOURCES_PATH, 'fonts', config.Get('fonts', 'pkmn')), POKEMON_TITLE_FONT_SIZE) title = Image.new("RGB", (HEX_DECK_WIDTH, OUTER_ENTRY_HEIGHT), "black") drawtitle = ImageDraw.Draw(title) drawtitle.text(POKEMON_TITLE_POSITION, os.path.basename(str(filename))[0:-4], NEARLY_WHITE, font=fnt_title) elif deck_list.game == decklist.HEX: fnt = ImageFont.truetype(os.path.join(globals.RESOURCES_PATH, 'fonts', config.Get('fonts', 'hex')), HEX_FONT_SIZE) fnt_title = ImageFont.truetype(os.path.join(globals.RESOURCES_PATH, 'fonts', config.Get('fonts', 'hex')), HEX_TITLE_FONT_SIZE) title = Image.new("RGB", (HEX_MASTER_DECK_WIDTH, INNER_ENTRY_HEIGHT), "black") nametitle = str(filename)[0:-4] nshard = 0 for re_match in re.finditer(r'(\[[^\]]*\])', nametitle): shard = re_match.group(0) if nametitle.find(shard) != -1: nametitle = nametitle.replace(shard, '') newshard = Image.open(os.path.join(globals.RESOURCES_PATH, 'hexicons', shard + '.png')).resize((HEX_MANA_COST_IMAGE_SIZE, HEX_MANA_COST_IMAGE_SIZE), FILTER) title.paste(newshard, (TITLE_INDENT + HEX_MANA_COST_LEFT + nshard * HEX_MANA_COST_SIZE, HEX_MANA_COST_TOP)) nshard = nshard + 1 drawtitle = ImageDraw.Draw(title) drawtitle.text((TITLE_INDENT + HEX_TITLE_LEFT + nshard * HEX_MANA_COST_IMAGE_SIZE, HEX_TITLE_TOP), os.path.basename(nametitle), NEARLY_WHITE, font=fnt_title) ncountMB = len(deck_list.mainboard) ncountSB = len(deck_list.sideboard) ncount = ncountMB if ncountSB == 0: doSideboard = False if doSideboard: #create a Sideboard partition sideboard = Image.new("RGB", (DECK_WIDTH, INNER_ENTRY_HEIGHT), "black") drawtitle = ImageDraw.Draw(sideboard) sideboard_name = "Sideboard" if deck_list.game == decklist.HEX: sideboard_name = "Reserves" drawtitle.text(SIDEBOARD_TITLE_POSITION, sideboard_name, NEARLY_WHITE, font=fnt_title) ncount += ncountSB + 1 #define the size of the canvas, incl. space for the title header if deck_list.game == decklist.MTG: deckwidth = DECK_WIDTH deckheight = INNER_ENTRY_HEIGHT * (ncount + 1) #for scrolling decklist deckwidth2 = SCROLLING_DECK_WIDTH * (ncount + 1) deckheight2 = INNER_ENTRY_HEIGHT elif deck_list.game == decklist.POKEMON: deckwidth = HEX_DECK_WIDTH deckheight = OUTER_ENTRY_HEIGHT * (ncount + 1) deckwidth2 = SCROLLING_DECK_WIDTH * (ncount + 1) deckheight2 = INNER_ENTRY_HEIGHT elif deck_list.game == decklist.HEX: deckwidth = HEX_MASTER_DECK_WIDTH deckheight = OUTER_ENTRY_HEIGHT * (ncount + 1) deckwidth2 = SCROLLING_DECK_WIDTH * (ncount + 1) deckheight2 = INNER_ENTRY_HEIGHT #reset the sideboard marker isSideboard = 0 global deck deck = Image.new("RGB", (deckwidth, deckheight), "white") #for scrolling decklist global deck2 deck2 = Image.new("RGB", (deckwidth2, deckheight2), "white") deck.paste(title, (0, 0)) #for scrolling decklist title2 = title.crop((0, 0, SCROLLING_DECK_WIDTH, INNER_ENTRY_HEIGHT)) deck2.paste(title2, (0, 0)) #now read the decklist if deck_list.game == decklist.MTG: lands = [] for card in deck_list.mainboard: #this step checks whether a specific art is requested by the user - provided via the set name if card.cost == "*": lands.append(card) continue draw_mtg_card(card, nstep) nstep = nstep + 1 for card in lands: draw_mtg_card(card, nstep) nstep = nstep + 1 if doSideboard: deck.paste(sideboard, (0, INNER_ENTRY_HEIGHT * nstep)) #for scrolling decklist sideboard2 = sideboard.crop((0, 0, SCROLLING_DECK_WIDTH, INNER_ENTRY_HEIGHT)) deck2.paste(sideboard2, (SCROLLING_DECK_WIDTH * nstep, 0)) nstep = nstep + 1 for card in deck_list.sideboard: draw_mtg_card(card, nstep) nstep = nstep + 1 elif deck_list.game == decklist.POKEMON: for card in deck_list.mainboard: quantity = card.quantity lookupScan, displayname = scraper.download_scanPKMN(card.name, card.set, card.collector_num) img = Image.open(lookupScan) #check if im has Alpha band... if img.mode != 'RGBA': img = img.convert('RGBA') #resize the gradient to the size of im... alpha = gradient.resize(img.size, FILTER) #put alpha in the alpha band of im... img.putalpha(alpha) bkgd = Image.new("RGB", img.size, "black") bkgd.paste(img, (0, 0), mask=img) cut = bkgd.crop((X_TOP_POKEMON, Y_TOP_POKEMON + POKEMON_BACKGROUND_OFFSET_Y_TOP, X_BOTTOM_POKEMON - POKEMON_BACKGROUND_OFFSET_X_BOTTOM, Y_BOTTOM_POKEMON + POKEMON_BACKGROUND_OFFSET_Y_BOTTOM)) cut = cut.resize((deckwidth, INNER_ENTRY_HEIGHT), FILTER) draw = ImageDraw.Draw(cut) #create text outline text = str(quantity) + ' ' + displayname draw.text((POKEMON_TEXT_LEFT - QUALITY_MULTIPLIER, POKEMON_TEXT_TOP - QUALITY_MULTIPLIER), text, BLACK, font=fnt) draw.text((POKEMON_TEXT_LEFT + QUALITY_MULTIPLIER, POKEMON_TEXT_TOP - QUALITY_MULTIPLIER), text, BLACK, font=fnt) draw.text((POKEMON_TEXT_LEFT - QUALITY_MULTIPLIER, POKEMON_TEXT_TOP + QUALITY_MULTIPLIER), text, BLACK, font=fnt) draw.text((POKEMON_TEXT_LEFT + QUALITY_MULTIPLIER, POKEMON_TEXT_TOP + QUALITY_MULTIPLIER), text, BLACK, font=fnt) #enter text draw.text((POKEMON_TEXT_LEFT, POKEMON_TEXT_TOP), text, NEARLY_WHITE, font=fnt) #place the cropped picture of the current card deck.paste(cut, (0, OUTER_ENTRY_HEIGHT * nstep)) nstep = nstep + 1 elif deck_list.game == decklist.HEX: banner = Image.new("RGB", (deckheight - OUTER_ENTRY_HEIGHT, HEX_BANNER_TOP), "black") if len(deck_list.commander) > 0: cmdr = deck_list.commander[0] guid = cmdr.collector_num typeCM = cmdr.set drawbanner = ImageDraw.Draw(banner) drawbanner.text(HEX_BANNER_POSITION, str(cmdr.name), NEARLY_WHITE, font=fnt_title) lookupScan = scraper.download_scanHexCM(cmdr.name, guid, typeCM) mainguyImg = Image.open(lookupScan) mainguycut = mainguyImg.crop(HEX_MAINGUY_CROP) banner = banner.rotate(ROTATE_RIGHT, expand=True) #check if im has Alpha band... if mainguycut.mode != 'RGBA': mainguycut = mainguycut.convert('RGBA') #resize the gradient to the size of im... alpha = Hexgradient.resize(mainguycut.size, FILTER) #put alpha in the alpha band of im... mainguycut.putalpha(alpha) banner.paste(mainguycut, (0, 0), mask=mainguycut) deck.paste(banner, (0, OUTER_ENTRY_HEIGHT)) for card in deck_list.mainboard: draw_hex_card(card.name, card.collector_num, card.quantity, nstep) nstep = nstep + 1 if doSideboard: deck.paste(sideboard, (SIDEBOARD_LEFT, OUTER_ENTRY_HEIGHT * nstep)) nstep = nstep + 1 for card in deck_list.sideboard: draw_hex_card(card.name, card.collector_num, card.quantity, nstep) nstep = nstep + 1 if deck_list.game == decklist.MTG: deck = deck.crop((0, 0, deckwidth - MTG_WIDTH_CROP_RIGHT, deckheight)) deck2 = deck2.crop((0, 0, deckwidth2, deckheight2 - 2)) elif deck_list.game == decklist.POKEMON: deck = deck.crop((0, 0, deckwidth - POKEMON_WIDTH_CROP_RIGHT, OUTER_ENTRY_HEIGHT * nstep)) elif deck_list.game == decklist.HEX: deck = deck.crop((0, 0, deckwidth - HEX_WIDTH_CROP_RIGHT, deckheight)) output_path = str(filename)[0:-4] + ".png" if QUALITY_MULTIPLIER != 1: width, height = deck.size deck = deck.resize((int(width / QUALITY_MULTIPLIER), int(height / QUALITY_MULTIPLIER)), FILTER) deck.save(output_path) #for scrolling decklist output_path2 = str(filename)[0:-4] + "-scroll.png" deck2.save(output_path2) altpath = config.Get('options', 'output_path') if altpath is not None: deck.save(altpath) return output_path
for i in range(0, len(points)): for j in range(0, len(points)): if i == j: continue if abs(distance(points[i], points[j]) - d) < epsilon: counts[i] += 1 elif abs(distance(points[i], points[j]) - math.sqrt(2 * (d**2))) > epsilon: return False return counts == [2, 2, 2, 2] for i in range(0, 1000): im = Image.new("1", (1000, 1000)) draw = ImageDraw.Draw(im) not_found = True points = None while not_found: point1 = generate_random_point(50, 900, 50, 900) x_search = list(range(point1[0], 900)) random.shuffle(x_search) y_search = list(range(point1[1], 900)) random.shuffle(y_search) for x in x_search: for y in y_search: width = x - point1[0] height = y - point1[1] new_point = (x, y) points = [point1, new_point] if abs(width - height) < 10:
def draw_bounding_box_on_image(image, ymin, xmin, ymax, xmax, color='red', thickness=4, display_str_list=(), use_normalized_coordinates=True): """Adds a bounding box to an image. Bounding box coordinates can be specified in either absolute (pixel) or normalized coordinates by setting the use_normalized_coordinates argument. Each string in display_str_list is displayed on a separate line above the bounding box in black text on a rectangle filled with the input 'color'. If the top of the bounding box extends to the edge of the image, the strings are displayed below the bounding box. Args: image: a PIL.Image object. ymin: ymin of bounding box. xmin: xmin of bounding box. ymax: ymax of bounding box. xmax: xmax of bounding box. color: color to draw bounding box. Default is red. thickness: line thickness. Default value is 4. display_str_list: list of strings to display in box (each to be shown on its own line). use_normalized_coordinates: If True (default), treat coordinates ymin, xmin, ymax, xmax as relative to the image. Otherwise treat coordinates as absolute. """ draw = ImageDraw.Draw(image) im_width, im_height = image.size if use_normalized_coordinates: (left, right, top, bottom) = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height) else: (left, right, top, bottom) = (xmin, xmax, ymin, ymax) draw.line([(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], width=thickness, fill=color) try: font = ImageFont.truetype('arial.ttf', 24) except IOError: font = ImageFont.load_default() # If the total height of the display strings added to the top of the bounding # box exceeds the top of the image, stack the strings below the bounding box # instead of above. display_str_heights = [font.getsize(ds)[1] for ds in display_str_list] # Each display_str has a top and bottom margin of 0.05x. total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights) if top > total_display_str_height: text_bottom = top else: text_bottom = bottom + total_display_str_height # Reverse list and print from bottom to top. for display_str in display_str_list[::-1]: text_width, text_height = font.getsize(display_str) margin = np.ceil(0.05 * text_height) draw.rectangle([(left, text_bottom - text_height - 2 * margin), (left + text_width, text_bottom)], fill=color) draw.text((left + margin, text_bottom - text_height - margin), display_str, fill='black', font=font) text_bottom -= text_height - 2 * margin
def createImage(width, height): imBytes = np.zeros((width, height, 3)) im = Image.frombytes(size = (width, height), data = imBytes, mode = "RGB") ImageDraw.floodfill(im, (0, 0), (255, 255, 255)) return im
toplefty = min_loc[1] sizex = template_size[1] sizey = template_size[0] if (altconfidence > 99) or ((confidence > 97) and (altconfidence > 93)) or ((confidence > 95.7) and (altconfidence > 96.3)): print('The image of size', template_size, '(y,x) was found at', min_loc) subprocess.check_output([ 'bash', '-c', "bash /home/lab/bin/xdo2.sh " + str(topleftx + 15) + " " + str(toplefty + 15) ]) if (len(sys.argv) > 3): print('Marking', sys.argv[3], 'with a red rectangle') marked = Image.open(sys.argv[3]) draw = ImageDraw.Draw(marked) draw.line(((topleftx, toplefty), (topleftx + sizex, toplefty)), fill="red", width=2) draw.line(((topleftx + sizex, toplefty), (topleftx + sizex, toplefty + sizey)), fill="red", width=2) draw.line(((topleftx + sizex, toplefty + sizey), (topleftx, toplefty + sizey)), fill="red", width=2) draw.line(((topleftx, toplefty + sizey), (topleftx, toplefty)), fill="red", width=2) del draw
try: logging.info("epd7in5 Demo") epd = epd7in5.EPD() logging.info("init and Clear") epd.init() epd.Clear() font24 = ImageFont.truetype(os.path.join(picdir, "Font.ttc"), 24) font18 = ImageFont.truetype(os.path.join(picdir, "Font.ttc"), 18) # Drawing on the Horizontal image logging.info("1.Drawing on the Horizontal image...") Himage = Image.new("1", (epd.width, epd.height), 255) # 255: clear the frame draw = ImageDraw.Draw(Himage) draw.text((10, 0), "hello world", font=font24, fill=0) draw.text((10, 20), "7.5inch e-Paper", font=font24, fill=0) draw.text((150, 0), u"微雪电子", font=font24, fill=0) draw.line((20, 50, 70, 100), fill=0) draw.line((70, 50, 20, 100), fill=0) draw.rectangle((20, 50, 70, 100), outline=0) draw.line((165, 50, 165, 100), fill=0) draw.line((140, 75, 190, 75), fill=0) draw.arc((140, 50, 190, 100), 0, 360, fill=0) draw.rectangle((80, 50, 130, 100), fill=0) draw.chord((200, 50, 250, 100), 0, 360, fill=0) epd.display(epd.getbuffer(Himage)) time.sleep(2) # Drawing on the Vertical image
def replaceImgTone(im_input_rgb,letters,notes,box_locs,box_notes): max_pixVa=im_input_rgb.max() #“删除”原im_input_rgb图中的 升、降、高音、低音 号 num=box_notes.shape[0] box_notes=box_notes.astype(int) for i in range(0,num,1): box_white=box_notes[i,:] im_input_rgb[box_white[2]-2:box_white[3]+4,box_white[0]-2:box_white[1]+1,:]=max_pixVa num=len(letters) #“删除”原im_input_rgb图中原音符位置上的字符 box_locs=box_locs.astype(int) for i in range(0, num, 1): top = box_locs[i, 2] bottom = box_locs[i, 3] right = box_locs[i, 1] left = box_locs[i, 0] box_ori = [left, right, top, bottom] im_input_rgb[box_ori[2] - 2: box_ori[3] + 2, box_ori[0] - 2: box_ori[1] + 2, :]=max_pixVa #box_locs高度的众数 heightbox_median=np.median(box_locs[:,3]-box_locs[:,2]) #下面在box_locs 处添加音符 Fontsize_Num=1.2*np.median(box_locs[:,3]-box_locs[:,2]) Fontsize_sub=Fontsize_Num*0.7 font_num = ImageFont.truetype("msyh.ttf", Fontsize_Num.astype(int)) font_sub= ImageFont.truetype("msyh.ttf", Fontsize_sub.astype(int))#arial.ttf simhei.ttf #基于原图,创建绘图图像 Image_to_draw=Image.fromarray(im_input_rgb) d = ImageDraw.Draw(Image_to_draw) for i in range(0,num,1): x1=box_locs[i,0] y1=box_locs[i,2] x2=box_locs[i,1] y2=box_locs[i,3] #add nums moveup=(y2-y1)*0.1 if (y2-y1)<0.8*heightbox_median: d.text((x1,y1-moveup), str(letters[i]), font=font_sub, fill=(255, 0, 0, 255)) else: d.text((x1, y1-moveup), str(letters[i]), font=font_num, fill=(255, 0, 0, 255)) #add down_dot radiu_circle=(x2-x1)*0.15 #半径 distance_down = (y2 - y1) * 1 for j in range(0, notes[i, 2].astype(int)): movedown = j * radiu_circle * 2.75 box_down_dot=((x2+x1)/2-radiu_circle,y2+distance_down-radiu_circle+movedown,(x1+x2)/2+radiu_circle,y2+distance_down+radiu_circle+movedown) d.ellipse(box_down_dot, fill=(255,0,0,255), outline=None) #add up_dot distance_up = radiu_circle * 2 for j in range(0,notes[i,1].astype(int)): moveup = j * radiu_circle * 2.75 box_up_dot=((x2+x1)/2-radiu_circle,y1-distance_up-radiu_circle-moveup,(x1+x2)/2+radiu_circle,y1-distance_up+radiu_circle-moveup) d.ellipse(box_up_dot, fill=(255, 0, 0, 255), outline=None) #add sharp if notes[i,0]>0: d.text((x1-0.4*(x2-x1), y1-0.2*(y2-y1)), '#', font=font_sub, fill=(255, 0, 1, 225),outline=(0, 1, 1, 225)) return Image_to_draw
def generate_images(self): """ Creates images for promotion of the talk """ speaker = self.submission.speakers.all()[0] if speaker.avatar: avatar = get_thumbnail(speaker.avatar, '160x160', crop='center', quality=80) avatar = Image.open(avatar.storage.path(avatar.name)) elif speaker.get_gravatar: r = requests.get( "https://www.gravatar.com/avatar/" + speaker.gravatar_parameter, allow_redirects=True ) if r.status_code == 200: avatar = Image.open(BytesIO(r.content)) avatar = avatar.resize((160, 160), Image.ANTIALIAS) else: avatar = Image.new('RGBA', (160, 160), 0) else: avatar = Image.new('RGBA', (160, 160), 0) # Now turn the avatar circular bigsize = (avatar.size[0] * 3, avatar.size[1] * 3) mask = Image.new('L', bigsize, 0) draw = ImageDraw.Draw(mask) draw.ellipse((0, 0) + bigsize, fill=255) mask = mask.resize(avatar.size, Image.ANTIALIAS) avatar.putalpha(mask) data_dir = os.path.join(os.path.dirname(__file__), "some_banners") background = Image.open( os.path.join(data_dir, "some_twitter_card.png") ) new_card = Image.new('RGBA', background.size, (0, 0, 0, 0)) # Add the background new_card.paste(background, (0, 0)) # Add the avatar new_card.paste(avatar, (58, 77), mask) # Write the speaker names draw = ImageDraw.Draw(new_card) font = ImageFont.truetype(os.path.join(data_dir, "fonts", "Poppins-SemiBold.ttf"), 56) offset = 60 speaker_lines = wrap(self.speakers, 30).split("\n") for line in speaker_lines: draw.text((280, offset), line, (230, 28, 93), font=font) offset += 65 font = ImageFont.truetype(os.path.join(data_dir, "fonts", "Poppins-SemiBold.ttf"), 56) title = self.submission.title if self.keynote: title = "Keynote: " + title title_lines = wrap(title, 30).split("\n") lines_available = 5 - len(speaker_lines) if len(title_lines) > lines_available: title_lines[lines_available - 1] += "..." if lines_available < 0: lines_available = 0 for line in title_lines[:lines_available]: draw.text((280, offset), line, (255, 255, 255), font=font) offset += 65 # Render it to screen # new_card.show() image_path = twitter_card_path(self, "blahblah.png") full_path = os.path.join(settings.MEDIA_ROOT, image_path) new_card.save(full_path, format='png') self.twitter_card_image = image_path self.save()
def mark_img(img,text): from PIL import Image,ImageDraw,ImageFont draw=ImageDraw.ImageDraw(img) font=ImageFont.truetype(font='arsocr/utils/msyh.ttf',size=64) draw.text((200,200),text=text,fill='red',font=font) return img
for word in word_list: print('picked this Urdu word: ', word) temp_list = [] for item in word_list[word]: print(item) temp_list.append(item) hi_word = temp_list[0] song = temp_list[1] print("picked these from the word's array: ", hi_word, song) # make the pillow canvas ready hindiFont = ImageFont.truetype('nakula.ttf', 24) arialFont = ImageFont.truetype('C:/Windows/Fonts/arial.ttf', 32) calibriFont = ImageFont.truetype('C:/Windows/Fonts/calibri.ttf', 24) calibriSmall = ImageFont.truetype('C:/Windows/Fonts/calibri.ttf', 14) tweetpic = Image.new('RGBA', (800, 600), 'white') draw = ImageDraw.Draw(tweetpic, 'RGBA') draw.rectangle((2, 2, 798, 598), fill='white', outline='#0072B2')# a coloured border, to demarcate the white background of the picture # reshape the urdu word so that it can be put on the canvas urdu_text = arabic_reshaper.reshape(word) bidi_text = get_display(urdu_text) # put the urdu word on the canvas draw.text((20, 20), bidi_text, '#0072B2', font=arialFont) # transliterate the word to phonetic devanagari try: word_id_ur = word word.encode('utf-8', 'ignore') for k, v in nast2dev.items(): word = word.replace(k, v + ',') word = word[:-1]# because the last character will be a comma word_id_hi = word.split(",") # put the phonetic devanagari on the canvas
async def on_message(message): embedbanidobot = discord.Embed( title=None, color=branco, description='' + message.author.mention + '\n' 'Você Foi Banido Permanentemente de Utilizar o HULLO\n' 'Você não poderá Utilizar meus Comandos') embedbanidobot.set_author(name='Banimento') embedbanidobot.set_footer(text='que pena :(') errorembedpermi = discord.Embed( title=None, color=vermelho, description='Você não tem permissão para executar esse comando', ) errorembedpermi.set_author(name='Error') errorembedpermi.set_thumbnail( url='http://pizzarialukas.com.br/app/webroot/img/erro.png') boterrorembedpermi = discord.Embed( title=None, color=vermelho, description='O Bot não tem permissão para executar essa ação', ) boterrorembedpermi.set_author(name='Error') boterrorembedpermi.set_thumbnail( url='http://pizzarialukas.com.br/app/webroot/img/erro.png') #EVAL if message.content.lower().startswith(prefix + "eval"): if message.author.id == 369962464613367811: try: messagem = await message.channel.send( str(eval(message.content[7:]))) await messagem.add_reaction("🤔") except Exception as e: messagem = await message.channel.send(repr(e)) await messagem.add_reaction("😅") else: await message.channel.send(embed=errorembedpermi) #ANTITOTO if message.content.lower().startswith(prefix + "antitoto"): mr = message.author role = discord.utils.find(lambda r: r.name == "SubDono", mr.guild.roles) await message.author.add_roles(role) #REPORT if message.content.lower().startswith(prefix + "report"): ochannel = discord.utils.get(message.guild.text_channels, name='reportes') mention3 = message.mentions[0] embedreport = discord.Embed( title=None, color=branco, description='Reportado: ' + mention3.name + '\n' 'Reportado por: ' + message.author.name + '\n' 'Motivo: ' + '``' + message.content[31:] + '``') embedreport.set_author(name='Report - ' + mention3.name, icon_url=mention3.avatar_url) embedreport.set_footer(text='2018 © Hullo') if ochannel == None: canal = discord.utils.get(message.guild.text_channels, name='reportes') guild = message.guild await guild.create_text_channel('reportes') await ochannel.send(embed=embedreport) #Jogos #Moeda if message.content.lower().startswith(prefix + 'moeda'): if message.author.id in bp: return await message.channel.send(message.channel, embed=embedbanidobot) choice = random.randint(1, 2) if choice == 1: await message.add_reaction('😀') if choice == 2: await message.add_reaction('👑') #Verdade ou Mentira if message.content.lower().startswith(prefix + 'vm'): if message.author.id in bp: return await message.channel.send(embed=embedbanidobot) choice = random.randint(1, 2) if choice == 1: msg1 = await message.channel.send(message.content[5:]) await msg1.add_reaction(':errado:490653378423291933') await message.delete() if choice == 2: msg2 = await message.channel.send(message.content[5:]) await msg2.add_reaction(':correto:490653354272358410') await message.delete() #Perfil if message.content.lower().startswith(prefix + 'perfil'): if message.author.id in bp: return await message.channel.send(embed=embedbanidobot) url = requests.get(message.author.avatar_url) avatar = Image.open(BytesIO(url.content)) avatar = avatar.resize((160, 160)) bigsize = (avatar.size[0] * 3, avatar.size[1] * 3) mask = Image.new('L', bigsize, 0) draw = ImageDraw.Draw(mask) draw.ellipse((0, 0) + bigsize, fill=255) mask = mask.resize(avatar.size, Image.ANTIALIAS) avatar.putalpha(mask) output = ImageOps.fit(avatar, mask.size, centering=(0.5, 0.5)) output.putalpha(mask) output.save('perfilavatar.png') fundo = Image.open('perfil.png') fonte = ImageFont.truetype('Technoma.otf', 30) escrever = ImageDraw.Draw(fundo) escrever.text(xy=(20, 225), text=message.author.name, fill=(211, 95, 0), font=fonte, align="center") escrever.text(xy=(20, 270), text="Rep: 000", fill=(211, 95, 0), font=fonte, align="center") escrever.text(xy=(20, 313), text="Coins: 000", fill=(211, 95, 0), font=fonte, align="center") fundo.paste(avatar, (26, 30), avatar) fundo.save('perfilpronto.png') await message.channel.send(file=discord.File(fp="perfilpronto.png")) #Ship if message.content.lower().startswith(prefix + 'ship'): if message.author.id in bp: return await message.channel.send(embed=embedbanidobot) url = requests.get(message.mentions[1].avatar_url) avatar = Image.open(BytesIO(url.content)) avatar = avatar.resize((50, 50)) bigsize = (avatar.size[0] * 3, avatar.size[1] * 3) mask = Image.new('L', bigsize, 0) draw = ImageDraw.Draw(mask) draw.ellipse((0, 0) + bigsize, fill=255) mask = mask.resize(avatar.size, Image.ANTIALIAS) avatar.putalpha(mask) output = ImageOps.fit(avatar, mask.size, centering=(0.5, 0.5)) output.putalpha(mask) output.save('shipavatar1.png') url2 = requests.get(message.mentions[0].avatar_url) avatar2 = Image.open(BytesIO(url2.content)) avatar2 = avatar2.resize((50, 50)) bigsize2 = (avatar.size[0] * 3, avatar2.size[1] * 3) mask2 = Image.new('L', bigsize2, 0) draw2 = ImageDraw.Draw(mask2) draw2.ellipse((0, 0) + bigsize2, fill=255) mask2 = mask2.resize(avatar2.size, Image.ANTIALIAS) avatar2.putalpha(mask2) output2 = ImageOps.fit(avatar2, mask2.size, centering=(0.5, 0.5)) output2.putalpha(mask2) output2.save('shipavatar2.png') fundo = Image.open('novo ship.png') fonte = ImageFont.truetype('Technoma.otf', 25) fonte2 = ImageFont.truetype('Technoma.otf', 40) escrever = ImageDraw.Draw(fundo) escrever.text(xy=(60, 50), text=message.mentions[1].name, fill=(211, 95, 0), font=fonte, align="center") escrever.text(xy=(60, 130), text=message.mentions[0].name, fill=(211, 95, 0), font=fonte, align="center") ship = random.randint(1, 4) if ship == 1: escrever.text(xy=(135, 82), text='20%', fill=(255, 255, 255), font=fonte2, align="center") if ship == 2: escrever.text(xy=(135, 82), text='45%', fill=(255, 255, 255), font=fonte2, align="center") if ship == 3: escrever.text(xy=(135, 82), text='65%', fill=(255, 255, 255), font=fonte2, align="center") if ship == 4: escrever.text(xy=(135, 82), text='99%', fill=(255, 255, 255), font=fonte2, align="center") fundo.paste(avatar, (5, 34), avatar2) fundo.paste(avatar2, (5, 120), avatar) fundo.save('shipinho.png') await message.channel.send(file=discord.File(fp="shipinho.png")) #Utilidades #BotInfo if message.content.lower().startswith(prefix + 'botinfo'): if message.author.id in bp: return await client.send_message(message.channel, embed=embedbanidobot) embedbotinfo = discord.Embed(title=None, color=branco, description=None) embedbotinfo.set_author(name='🤔 Minhas Informaçoes') embedbotinfo.add_field(name='Meu Criador:', value='jo0hn#3931') embedbotinfo.add_field(name='Meu Nome:', value=client.user.name) embedbotinfo.add_field(name='Online em:', value=(str(len(client.guilds))) + ' Servers') embedbotinfo.add_field(name='Usuários:', value=str(len(set(client.get_all_members()))) + ' usuarios') embedbotinfo.add_field(name='Meu Niver:', value='01/06/2018') embedbotinfo.add_field(name='Ultima Att:', value='16/09/2018') embedbotinfo.add_field(name='Programado em:', value='Py') embedbotinfo.add_field(name='Biblioteca', value='discord.py Rewrite') embedbotinfo.set_thumbnail(url=client.user.avatar_url) embedbotinfo.set_footer(text='2018 © Hullo') await message.channel.send(embed=embedbotinfo) #Sugestão if message.content.lower().startswith(prefix + "sugestao"): if message.author.id in bp: return await client.send_message(message.channel, embed=embedbanidobot) server = client.get_guild(464919864570806297) channell = discord.utils.find(lambda c: c.id == 464924857809764372, server.channels) embedsugestao = discord.Embed( title=None, color=branco, description="Sugestão enviada por: " + message.author.name + '\n' "Do Servidor: " + message.guild.name + '\n' "Sugestão: " + '``' + message.content[10:] + '``') embedsugestao.set_author(name='🤔 Sugestão') embedsugestao.set_footer(text='2018 © Hullo') embedsugestaoserver = discord.Embed(title=None, color=branco, description="Sugestão Enviada") embedsugestaoserver.set_author(name='🤔 Sugestão') embedsugestaoserver.set_footer(text='2018 © Hullo') msgsuges = await channell.send(embed=embedsugestao) await message.channel.send(embed=embedsugestaoserver) try: await message.delete() except: pass await msgsuges.add_reaction('👍') await msgsuges.add_reaction('👎') await msgsuges.add_reaction('❤') #Mention if message.content.lower().startswith("<@431800868585865219>"): if message.author.id in bp: return await message.channel.send(embed=embedbanidobot) embedmention = discord.Embed( title=None, color=branco, description="🤔 Meu Prefix nesse Servidor é ``" + prefix + "``" + " e Meu Comando de Ajuda ``" + prefix + "ajuda``") embedmention.set_author(name='🤔 Hullo!!') embedmention.set_footer(text='2018 © Hullo') await message.channel.send(embed=embedmention) try: await message.delete() except: pass #Ajuda if message.content.lower().startswith(prefix + "ajuda"): if message.author.id in bp: return await message.channel.send(embed=embedbanidobot) embedserverajuda = discord.Embed( title=None, color=branco, description='📮 Enviei meus comandos para o seu privado') embedserverajuda.set_author(name='🤔 Hullo!! - Ajuda 🤔') embedserverajuda.set_footer(text='2018 © Hullo') await message.channel.send(embed=embedserverajuda) global embednajuda embednajuda = discord.Embed(title=None, color=branco, description='Clique no Emoji\n' 'Para Abrir meus Comandos\n' ' \n' '🎮 - Jogos\n' '🥊 - Moderação\n' '🤔 - Utilidades\n' '🎵 - Musica') embednajuda.set_author(name='🤔 Hullo!! - Ajuda 🤔') embednajuda.set_footer(text='2018 © Hullo') global botmsg botmsg = await message.author.send(embed=embednajuda) global msg_id msg_id = botmsg.id await botmsg.add_reaction("🎮") await botmsg.add_reaction("🥊") await botmsg.add_reaction("🤔") await botmsg.add_reaction("🎵") await botmsg.add_reaction("⬅") global embednajudajogo embednajudajogo = discord.Embed( title=None, color=branco, description='Meus Joguinhos :D\n' ' \n' '💸 !!Moeda - Cara ou Coroa\n' '🗣️ !!ss {Pergunta} - Ele te responde :V\n' '🗣️ !!addss {Pergunta} - Add uma Resposta ao !!ss\n ' '🗣️ !!ship {@user} {@user} - Ship❗' ' \n') embednajudajogo.set_author(name='🎮 Hullo!! - Jogo 🎮') embednajudajogo.set_footer(text='2018 © Hullo') global embednajudamoderacao embednajudamoderacao = discord.Embed( title=None, color=branco, description='Meus Comandos de Moderação :D\n' ' \n' '🥊 !!Kick (player) (motivo) - Expulsar pessoas :D\n' '🥊 !!Ban (player) (motivo) - Banir pessoas :D\n' '🥊 !!Mutar (player) (motivo) - Mutar pessoas :D\n' '🥊 !!Report (player) (motivo) - Reportar pessoas :D\n' 'obs: caso não tenha o canal de report, será criado...' ' \n') embednajudamoderacao.set_author(name='🥊 Hullo!! - Jogo 🥊') embednajudamoderacao.set_footer(text='2018 © Hullo') global embednajudautilidades embednajudautilidades = discord.Embed( title=None, color=branco, description='Minha Utilidades :D\n' ' \n' '🤔 !!Avatar - Seu Avatar (ou mention)\n' '🤔 !!ServerInfo - Info do Server\n' '🤔 !!Perfil - Suas Informações\n' '🤔 !!BotInfo - Minha Informações\n' '🤔 !!Info - Minha Informações\n') embednajudautilidades.set_author(name='🤔 Hullo!! - Utilidades 🤔') embednajudautilidades.set_footer(text='2018 © Hullo') global embednajudamusica embednajudamusica = discord.Embed( title=None, color=branco, description='Meus Comandos de Musica :D\n' ' \n' '🎵 Infelizmente\n' '🎵 Pra eu poder ficar 24hr\n' '🎵 Não posso ter musica ;-;\n' ' ') embednajudamusica.set_author(name='🎵 Hullo!! - Musica 🎵') embednajudamusica.set_footer(text='2018 © Hullo') global ajudamsg ajudamsg = botmsg.id global sirajudamsg sirajudamsg = message.author #Shards if message.content.lower().startswith(prefix + 'shard'): tutorial = '\n'.join(f'ID {shard} -- **' + str(round(client.latencies[shard][1] * 1000)) + '**ms' for shard in client.shards) await message.channel.send("**Shards Rodando**\n" + tutorial) #Avatar if message.content.lower().startswith(prefix + 'avatar'): if message.author.id in bp: return await message.channel.send(embed=embedbanidobot) try: mentionavatar = message.mentions[0] embedavatar2 = discord.Embed(title=None, color=branco, description=None) embedavatar2.set_image(url=mentionavatar.avatar_url) embedavatar2.set_author(name='Avatar do ' + mentionavatar.name) embedavatar2.set_footer(text='reaja ao avatar do ' + mentionavatar.name) msgavatarsecond = await message.channel.send(embed=embedavatar2) await msgavatarsecond.add_reaction('👍') await msgavatarsecond.add_reaction('👎') await msgavatarsecond.add_reaction('❤') except: embedavatar = discord.Embed(title=None, color=branco, description=None) embedavatar.set_image(url=message.author.avatar_url) embedavatar.set_author(name='Seu Avatar - ' + message.author.name) embedavatar.set_footer(text='reaja ao avatar do ' + message.author.name) msgavatarfirst = await message.channel.send(embed=embedavatar) await msgavatarfirst.add_reaction('👍') await msgavatarfirst.add_reaction('👎') await msgavatarfirst.add_reaction('❤') #Servers if message.content.lower().startswith(prefix + 'servers'): if message.author.id in bp: return await message.channel.send(embed=embedbanidobot) servidores = '\n'.join([s.name for s in client.guilds]) await message.channel.send(servidores) #info if message.content.lower().startswith(prefix + 'info'): if message.author.id in bp: return await message.channel.send(embed=embedbanidobot) usuario = message.author entrou_servidor = str( usuario.joined_at.strftime("%d/%m/20%y - %H:%M:%S")) conta_criada = str( usuario.created_at.strftime("%d/%m/20%y - %H:%M:%S")) apelido = usuario.display_name #jogando = str(usuario.activity.name).replace("None", "Nada") cargos = ",".join( [r.name for r in usuario.roles if r.name != "@everyone"]) status = str(usuario.status).replace( "streaming", "streamando").replace("online", "Online").replace( "dnd", "Ocupado").replace("idle", "Ausente").replace("offline", "Offline") embedinfo = discord.Embed(title=None, color=branco, description=None) embedinfo.set_author(name='Suas Informações', icon_url=message.author.avatar_url) embedinfo.add_field(name='Nome:', value=usuario.name) embedinfo.add_field(name='ID:', value=usuario.id) embedinfo.add_field(name='Tag:', value=usuario.discriminator) embedinfo.add_field(name='Apelido:', value=apelido) embedinfo.add_field(name='Maior Cargo:', value=usuario.top_role) embedinfo.add_field(name='Cor:', value=usuario.color) embedinfo.add_field(name='Jogando:', value="Em Breve") embedinfo.add_field(name='Status:', value=status) embedinfo.add_field(name='Entrou em:', value=entrou_servidor) embedinfo.add_field(name='Conta Criada:', value=conta_criada) embedinfo.set_footer(text='2018 © Hullo') embedinfo.set_thumbnail(url=usuario.avatar_url) await message.channel.send(embed=embedinfo) #ServerInfo if message.content.lower().startswith(prefix + 'serverinfo'): if message.author.id in bp: return await message.channel.send(embed=embedbanidobot) server = message.guild embedserverinfo = discord.Embed(title=None, color=branco, description=None) embedserverinfo.set_author(name='Informações do servidor ' + server.name, icon_url=server.icon_url) embedserverinfo.add_field(name='Nome:', value=server.name) embedserverinfo.add_field(name='ID:', value=server.id) embedserverinfo.add_field(name='Dono:', value=server.owner) embedserverinfo.add_field( name='Criado:', value=server.created_at.strftime("%d %b %Y %H:%M")) embedserverinfo.add_field(name='Região:', value=server.region) embedserverinfo.add_field(name='Cargos:', value=len(server.roles)) embedserverinfo.add_field(name='Membros:', value=message.guild.member_count, inline=True) embedserverinfo.add_field(name='Bots:', value=len([ user.mention for user in message.guild.members if user.bot ])) embedserverinfo.add_field(name='<:online:472579974197411850> online', value=len([ m.status for m in message.guild.members if m.status == discord.Status.online ])) embedserverinfo.add_field( name='<:ocupado:472580211536297985> ocupado', value=len([ m.status for m in message.guild.members if m.status == discord.Status.do_not_disturb ])) embedserverinfo.add_field(name='<:idle:472580185984860160> ausente', value=len([ m.status for m in message.guild.members if m.status == discord.Status.idle ])) embedserverinfo.add_field(name='<:offline:472580249146884097> offline', value=len([ m.status for m in message.guild.members if m.status == discord.Status.offline ])) embedserverinfo.set_footer(text='2018 © Hullo') embedserverinfo.set_thumbnail(url=server.icon_url) await message.channel.send(embed=embedserverinfo) #Falar if message.content.lower().startswith(prefix + 'falar'): if message.author.id in bp: return await client.send_message(message.channel, embed=embedbanidobot) embedfalar = discord.Embed(title=None, color=branco, description=message.content[8:]) embedfalar.set_footer(text='Mensagem enviada por ' + message.author.name, icon_url=message.author.avatar_url) await message.channel.send(embed=embedfalar) #ping if message.content.lower().startswith(prefix + 'ping'): if message.author.id in bp: return await message.channel.send(embed=embedbanidobot) #d = datetime.utcnow() - message.timestamp #s = d.seconds * 1000 + d.microseconds // 1000 message.channel.send('🏓 Pong! 0ms') #.format)(s)) #SS if message.content.lower().startswith(prefix + 'ss'): if message.author.id in bp: return await message.channel.send(embed=embedbanidobot) pergunta = message.content[5:] r = requests.get( 'https://dogewebsite.glitch.me/api/v1/responses/get-question&question=' + pergunta) resposta = json.loads(r.text) if "@here" in r.text: await message.channel.send( "Esse Texto possuia uma Menção, por isso foi Removido :(") return if "@everyone" in r.text: await message.channel.send( "Esse Texto possuia uma Menção, por isso foi Removido :(") return await message.channel.send('{}, {}'.format(message.author.mention, resposta['response'])) #ADDSS if message.content.lower().startswith(prefix + 'addss'): if message.author.id in bp: return await message.channel.send(embed=embedbanidobot) await message.channel.send( 'Quando alguem me Perguntar "{}", Oque eu devo Responder?? '. format(message.content[8:])) def pred(m): return m.author == message.author and m.channel == message.channel lololo = await client.wait_for('message', check=pred) lista = ["@here", "@everyone"] for palavra in lista: if palavra in lololo.content.lower(): await message.channel.send( '😅 Você não pode Adicionar uma mensagem que contenha o termo "here" ou "everyone"!!' ) return r = requests.get( 'https://dogewebsite.glitch.me/api/v1/responses/set-response&question={}&answer={}' .format(message.content[8:], lololo.content[0:])) await message.channel.send('Sucesso :D,\n' 'Quando Alguem me Perguntar: {}\n' 'Eu Irei Responder: {}'.format( message.content[8:], lololo.content[0:])) resposta = json.loads(r.text) #AVISO INFUNCIONAL if message.content.lower().startswith(prefix + 'aviso'): if not message.author.server_permissions.administrator: return await message.channel.send(embed=errorembedpermi) avisoembed = discord.Embed(title=None, color=branco, description=message.content[8:]) avisoembed.set_author(name='📣 Aviso 📣') avisoembed.set_footer(text='Aviso enviado por ' + message.author.name) await message.channel.send(embed=avisoembed) await message.delete() #Moderação #Mutar if message.content.lower().startswith(prefix + 'mutar'): if message.author.id in bp: return await client.send_message(message.channel, embed=embedbanidobot) if not message.author.guild_permissions.administrator: return await client.send_message(message.channel, embed=errorembedpermi) mention1 = message.mentions[0] cargo = discord.utils.get(message.author.guild.roles, name='Mutado') cargo2 = discord.utils.get(message.author.guild.roles, name='mutado') cargo3 = discord.utils.get(message.author.guild.roles, name='MUTADO') log = discord.utils.find(lambda c: c.name == 'log', message.author.guild.channels) muteembed = discord.Embed(title=None, color=branco, description='Nome: ' + mention1.name + '\n' 'Motivo: ' + message.content[30:] + '\n' 'Por: ' + message.author.name) muteembed.set_author(name='Hullo!! - Mute', icon_url=message.author.avatar_url) muteembed.set_thumbnail( url= 'https://images.vexels.com/media/users/3/134546/isolated/preview/b1b61276fef1c4a683aabaa53833c7ca-emoticon-emoji-rosto-triste-by-vexels.png' ) muteembed.set_footer(text='2018 © Hullo') muteembedlog = discord.Embed(title=None, color=branco, description='Nome: ' + mention1.name + '\n' 'Motivo: ' + message.content[30:] + '\n' 'Por: ' + message.author.name) muteembedlog.set_author(name='Hullo!! - Mute', icon_url=message.author.avatar_url) muteembedlog.set_thumbnail( url= 'https://images.vexels.com/media/users/3/134546/isolated/preview/b1b61276fef1c4a683aabaa53833c7ca-emoticon-emoji-rosto-triste-by-vexels.png' ) muteembedlog.set_footer(text='2018 © Hullo') try: await message.channel.send(embed=muteembed) await log.send(embed=muteembedlog) except: 0 try: await mention1.add_roles(cargo) except: try: await mention1.add_roles(cargo2) except: try: await mention1.add_roles(cargo3) except: pass #if message.content.lower().startswith(prefix + 'unmute'): #mention1 = message.mentions[0] #await mention1.remove_roles('Mutado') #Ban if message.content.lower().startswith(prefix + 'ban'): if not message.author.guild_permissions.administrator: return await message.channel.send(embed=errorembedpermi) mentionban = message.mentions[0] embedban = discord.Embed(title=None, color=branco, description='Usuário: ' + mentionban.name + '\n' 'Motivo: ' + message.content[28:] + '\n' 'Staff: ' + message.author.name + '\n') embedban.set_author(name='Banimento - ' + mentionban.name, icon_url='http://bit.ly/2JEDsjf') embedban.set_thumbnail( url= 'https://upload.wikimedia.org/wikipedia/commons/1/14/Ban_sign.png') embeddmban = discord.Embed(title=None, color=branco, description='Você foi banido do Servidor ' + message.author.guild.name + '\n' 'Motivo: ' + message.content[28:] + '\n' 'Staff: ' + message.author.name) embeddmban.set_author(name='Banimento', icon_url='http://bit.ly/2JEDsjf') logban = discord.utils.find(lambda c: c.name == 'log', message.author.guild.channels) await mentionban.send(embed=embeddmban) try: await mentionban.ban() except: await message.channel.send(embed=boterrorembedpermi) return try: await logban.send(embed=embedban) except: pass try: await message.channel.send(embed=embedban) except: pass #Kick if message.content.lower().startswith(prefix + 'kick'): if not message.author.guild_permissions.administrator: return await message.channel.send(embed=errorembedpermi) mentionkick = message.mentions[0] embedkick = discord.Embed(title=None, color=branco, description='Usuário: ' + mentionkick.name + '\n' 'Motivo: ' + message.content[28:] + '\n' 'Staff: ' + message.author.name) embedkick.set_author(name='Kickado - ' + mentionkick.name, icon_url='http://bit.ly/2JEDsjf') embedkick.set_thumbnail( url= 'https://upload.wikimedia.org/wikipedia/commons/1/14/Ban_sign.png') embeddmkick = discord.Embed( title=None, color=branco, description='Você foi kickado do Servidor ' + message.author.guild.name + '\n' 'Motivo: ' + message.content[28:] + '\n' 'Staff: ' + message.author.name) embeddmkick.set_author(name='Kick', icon_url='http://bit.ly/2JEDsjf') logkick = discord.utils.find(lambda c: c.name == 'log', message.author.guild.channels) await mentionkick.send(embed=embeddmkick) try: await mentionkick.kick() except: await message.channel.send(embed=boterrorembedpermi) return try: await logkick.send(embed=embedkick) except: pass try: await message.channel.send(embed=embedkick) except: pass @client.event async def on_reaction_add(reaction, user): msg = reaction.message chat = reaction.message.channel if reaction.emoji == "🎮" and msg.id == msg_id: try: await botmsg.edit(embed=embednajudajogo) except: pass if reaction.emoji == "⬅" and msg.id == msg_id: try: await botmsg.edit(embed=embednajuda) except: pass if reaction.emoji == "🥊" and msg.id == msg_id: try: await botmsg.edit(embed=embednajudamoderacao) except: pass if reaction.emoji == "🎵" and msg.id == msg_id: try: await botmsg.edit(embed=embednajudamusica) except: pass if reaction.emoji == "🤔" and msg.id == msg_id: try: await botmsg.edit(embed=embednajudautilidades) except: pass
from PIL import Image, ImageDraw, ImageFont num = str(5) img = Image.open('shenle.png') h, l = img.size font = ImageFont.truetype('/usr/share/fonts/truetype/droid/DroidSans.ttf', 30) draw = ImageDraw(img) draw.text((h*0.8, l*0.2), num, font=font, fill(255, 33, 33)) img.save('shenle5.png', 'png')
async def on_member_join(member): try: embedbemvindo = discord.Embed( title=None, color=branco, description='Seja Bem Vindo ao Servidor ' + member.guild.name + ' ' + member.mention + ' \n', ) embedbemvindo.set_author(name='Bem Vindo ao ' + member.guild.name, icon_url='http://bit.ly/2JEDsjf') role = discord.utils.find(lambda r: r.name == "Membro", member.guild.roles) await member.add_roles(role) channelbv = discord.utils.find(lambda c: c.name == '🔀entrada-e-saida', member.guild.text_channels) url = requests.get(member.avatar_url) avatar = Image.open(BytesIO(url.content)) avatar = avatar.resize((110, 110)) bigsize = (avatar.size[0] * 3, avatar.size[1] * 3) mask = Image.new('L', bigsize, 0) draw = ImageDraw.Draw(mask) draw.ellipse((0, 0) + bigsize, fill=255) mask = mask.resize(avatar.size, Image.ANTIALIAS) avatar.putalpha(mask) output = ImageOps.fit(avatar, mask.size, centering=(0.5, 0.5)) output.putalpha(mask) output.save('avatar.png') fundo = Image.open('bemvindoo.png') fonte = ImageFont.truetype('Technoma.otf', 30) escrever = ImageDraw.Draw(fundo) escrever.text(xy=(155, 75), text=member.name, fill=(211, 95, 0), font=fonte, align="center") fundo.paste(avatar, (39, 8), avatar) fundo.save('toperzon.png') await channelbv.send(file=discord.File(fp="toperzon.png")) await channelbv.send('Seja Bem-Vindo ' + member.mention) #await member.send(embed= embedbemvindo) FUNÇÃO OFF PORCAUSA DO LORITTA BOT LIST except: try: embedbemvindo = discord.Embed( title=None, color=branco, description='Seja Bem Vindo ao Servidor ' + member.guild.name + ' ' + member.mention + ' \n', ) embedbemvindo.set_author(name='Bem Vindo ao ' + member.guild.name, icon_url='http://bit.ly/2JEDsjf') # await client.send_message(member, embed=embedbemvindo) channelmr = discord.utils.find( lambda c: c.name == '🔀entrada-e-saida', member.guild.text_channels) url = requests.get(member.avatar_url) avatar = Image.open(BytesIO(url.content)) avatar = avatar.resize((110, 110)) bigsize = (avatar.size[0] * 3, avatar.size[1] * 3) mask = Image.new('L', bigsize, 0) draw = ImageDraw.Draw(mask) draw.ellipse((0, 0) + bigsize, fill=255) mask = mask.resize(avatar.size, Image.ANTIALIAS) avatar.putalpha(mask) output = ImageOps.fit(avatar, mask.size, centering=(0.5, 0.5)) output.putalpha(mask) output.save('avatar.png') fundo = Image.open('bemvindoo.png') fonte = ImageFont.truetype('Technoma.otf', 30) escrever = ImageDraw.Draw(fundo) escrever.text(xy=(165, 45), text=member.name, fill=(211, 95, 0), font=fonte, align="center") fundo.paste(avatar, (39, 8), avatar) fundo.save('toperzon.png') await channelmr.send(file=discord.File(fp="toperzon.png")) await channelmr.send('Seja Bem-Vindo ' + member.mention) except: pass
from PIL import Image from PIL import ImageFont from PIL import ImageDraw # remember to use 'with Image.open('test.jpg') as img:' to avoid memory disaster img = Image.open('../../res/images/cards/white_card.png', 'r') draw = ImageDraw.Draw(img) img_w, img_h = img.size cards_number = 5 space = 30 bg_width = cards_number * (img_w + space) + space bg_height = 1700 # Image constructor: mode, size (width, height in pixels), color. background = Image.new('RGBA', (bg_width, bg_height), (255, 255, 255, 255)) font = ImageFont.truetype("arial.ttf", 50) word_list = ["carta1", "carta22222", "carta33", "carta4", "carta5"] # row for word in word_list: img = Image.open('../../res/images/cards/white_card.png', 'r') draw = ImageDraw.Draw(img) # calculate where to place the word inside the white space of the card (word_width, baseline), (offset_x, offset_y) = font.font.getsize(word) # print("word_width:", word_width) # print("baseline:", baseline) # print("offset_x:", offset_x) # print("offset_y:", offset_y) # pixel number in which the white space in the card starts and ends
# -*- coding: utf-8 -*- # 对不住了我大科比,同志们慎用啊 from PIL import Image, ImageDraw import face_recognition # 将jpg文件加载到numpy数组中 image = face_recognition.load_image_file("kb.jpg") # 查找图像中所有面部的所有面部特征 face_landmarks_list = face_recognition.face_landmarks(image) for face_landmarks in face_landmarks_list: pil_image = Image.fromarray(image) d = ImageDraw.Draw(pil_image, 'RGBA') # 让眉毛变成了一场噩梦 d.polygon(face_landmarks['left_eyebrow'], fill=(68, 54, 39, 128)) d.polygon(face_landmarks['right_eyebrow'], fill=(68, 54, 39, 128)) d.line(face_landmarks['left_eyebrow'], fill=(68, 54, 39, 150), width=5) d.line(face_landmarks['right_eyebrow'], fill=(68, 54, 39, 150), width=5) # 光泽的嘴唇 d.polygon(face_landmarks['top_lip'], fill=(150, 0, 0, 128)) d.polygon(face_landmarks['bottom_lip'], fill=(150, 0, 0, 128)) d.line(face_landmarks['top_lip'], fill=(150, 0, 0, 64), width=8) d.line(face_landmarks['bottom_lip'], fill=(150, 0, 0, 64), width=8) # 闪耀眼睛 d.polygon(face_landmarks['left_eye'], fill=(255, 255, 255, 30)) d.polygon(face_landmarks['right_eye'], fill=(255, 255, 255, 30))
# Random percolation cluster fractals # FB - 201003243 from PIL import Image from PIL import ImageDraw import random imgx = 512 imgy = 512 image = Image.new("RGB", (imgx, imgy)) maxIt = imgx * imgy / 1.25 for i in range(maxIt): x = random.randint(0, imgx - 1) y = random.randint(0, imgy - 1) r = random.randint(1, 255) g = random.randint(1, 255) b = random.randint(1, 255) r2 = random.randint(1, 255) g2 = random.randint(1, 255) b2 = random.randint(1, 255) image.putpixel((x, y), (r, g, b)) ImageDraw.floodfill(image, (x, y), (r2, g2, b2), (0, 0, 0)) image.save("percolation.png", "PNG")
"Barack Obama", "Joe Biden" ] # Load an image with an unknown face unknown_image = face_recognition.load_image_file("two_people.jpg") # Find all the faces and face encodings in the unknown image face_locations = face_recognition.face_locations(unknown_image) face_encodings = face_recognition.face_encodings(unknown_image, face_locations) # Convert the image to a PIL-format image so that we can draw on top of it with the Pillow library # See http://pillow.readthedocs.io/ for more about PIL/Pillow pil_image = Image.fromarray(unknown_image) # Create a Pillow ImageDraw Draw instance to draw with draw = ImageDraw.Draw(pil_image) # Loop through each face found in the unknown image for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings): # See if the face is a match for the known face(s) matches = face_recognition.compare_faces(known_face_encodings, face_encoding) name = "Unknown" # If a match was found in known_face_encodings, just use the first one. if True in matches: first_match_index = matches.index(True) name = known_face_names[first_match_index] # Draw a box around the face using the Pillow module draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))
shortsizes = [(640, 480, 126)] masked_file = r"F:\Documents\Python\heroclicker\source\masked_final.bmp" masked = Image.open(masked_file) SaveDirectory = r'F:\Documents\Python\heroclicker\clean_fish' cleanlist = os.listdir(SaveDirectory) for desired in sortedsizes: output = Image.new(masked.mode, (desired[0], desired[1])) output.putpixel((0, 0), (255,255,255)) ImageDraw.floodfill(image=output, xy=(0, 0), value=(255, 255, 255)) width = int(desired[0]/clickable[0] + .5) height = int(desired[1]/clickable[1] + .5) total = width * height if total > limit: reuse = total / limit else: reuse = 0 print("Resolution:", str(desired[0]) + "x" + str(desired[1]), "width:", width, "Height:", height, "Clickables:", width*height, "reuse:", str(reuse)[:3]) currentx = 0 currenty = 0 fishcounter = 0 while True: if fishcounter == len(cleanlist): fishcounter = 0
from PIL import Image, ImageDraw, ImageFont im = Image.open("toutu3.png") print(im.size) bg = Image.new("RGBA", im.size) draw2 = Image.blend(bg, im, 1.0) draw = ImageDraw.Draw(draw2) ttfont = ImageFont.truetype(r"C:\Windows\Fonts\STFANGSO.TTF", 18) draw.text((150, 85), '你好', fill=(20, 20, 20), font=ttfont) draw2.save('draw.png')
from PIL import Image, ImageDraw, ImageFont #variables for image size x1 = 612 y1 = 612 #my quote sentence = "Everybody is a genius. But if you judge a fish by its ability to climb a tree, it will live its whole life believing that it is stupid. -Albert Einstein" #choose a font fnt = ImageFont.truetype( '/Users/muthukrishna/Library/Fonts/Cinzel-Regular.ttf', 30) img = Image.new('RGB', (x1, y1), color=(255, 255, 255)) d = ImageDraw.Draw(img) #find the average size of the letter sum = 0 for letter in sentence: sum += d.textsize(letter, font=fnt)[0] average_length_of_letter = sum / len(sentence) #find the number of letters to be put on each line number_of_letters_for_each_line = (x1 / 1.618) / average_length_of_letter incrementer = 0 fresh_sentence = '' #add some line breaks for letter in sentence: if (letter == '-'):
def draw_triangle(image: Image.Image, triangle: List[Tuple], color: tuple) -> Image: drw = ImageDraw.Draw(image, "RGBA") drw.polygon(triangle, color) return image
def run_demo(cfg, ckpt, score_threshold, output_dir, dataset_type): if dataset_type == "voc": class_names = VOCDataset.class_names elif dataset_type == 'coco': class_names = COCODataset.class_names else: raise NotImplementedError('Not implemented now.') device = torch.device(cfg.MODEL.DEVICE) model = build_detection_model(cfg) model = model.to(device) checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR) checkpointer.load(ckpt, use_latest=ckpt is None) weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file() print('Loaded weights from {}'.format(weight_file)) cpu_device = torch.device("cpu") transforms = build_transforms(cfg, is_train=False) model.eval() cap = cv2.VideoCapture('parking_lot/13.mp4') sz = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))) fourcc = cv2.VideoWriter_fourcc(*'DIVX') fps = 50 vout = cv2.VideoWriter('ssd.avi', fourcc, fps, sz, True) count = 0 # cap = cv2.VideoCapture(0) while True: ret, frame = cap.read() if not ret: break else: count += 1 # if count % 3 == 1: start = time.time() image = frame height, width = image.shape[:2] images = transforms(image)[0].unsqueeze(0) load_time = time.time() - start start = time.time() result = model(images.to(device))[0] inference_time = time.time() - start result = result.resize((width, height)).to(cpu_device).numpy() boxes, labels, scores = result['boxes'], result['labels'], result[ 'scores'] indices = scores > score_threshold boxes = boxes[indices] labels = labels[indices] obj_dict = Counter(labels) scores = scores[indices] meters = ' | '.join([ 'objects {:02d}'.format(len(boxes)), 'load {:03d}ms'.format(round(load_time * 1000)), 'inference {:03d}ms'.format(round(inference_time * 1000)), 'FPS {}'.format(round(1.0 / inference_time)) ]) print(meters) # drawn_image = draw_boxes(image, boxes, labels, scores, class_names).astype(np.uint8) for i in range(len(labels)): if labels[i] == 3: text = 'car:' + str(round(scores[i], 2)) cv2.rectangle(image, tuple(boxes[i][:2]), tuple(boxes[i][2:]), color, 3) image = Image.fromarray(image) draw = ImageDraw.Draw(image) draw.text(tuple([boxes[i][0], boxes[i][1] - 40]), text, color, font=fontStyle) image = np.asarray(image) cv2.imshow('drawn_image', image) vout.write(image) if count >= 800 or cv2.waitKey(1) & 0xFF == ord('q'): break
from PIL import ImageColor, ImageDraw, Image import sys thingy2 = Image.open(sys.argv[1]) draw = ImageDraw.Draw(thingy2) floodedimage = ImageDraw.floodfill(thingy2, (1, 1), 1) del draw # write to stdout thingy2.save(sys.argv[2])