def up_image(): if request.method == 'POST' and request.files.get('image_file'): timec = str(time.time()).replace(".", "") file = request.files.get('image_file') img = file.read() img = BytesIO(img) img = Image.open(img, mode="r") # username = request.form.get("name") print("接收图片尺寸: {}".format(img.size)) s = time.time() value = R.rec_image(img) e = time.time() print("识别结果: {}".format(value)) # 保存图片太烦人个 print("保存图片: {}{}_{}.{}".format(api_image_dir, value, timec, image_suffix)) file_name = "{}_{}.{}".format(value, timec, image_suffix) file_path = os.path.join(api_image_dir + file_name) rgb_im = img.convert('RGB') rgb_im.save(file_path) # img.save(file_path) result = { 'time': timec, # 时间戳 'value': value, # 预测的结果 'speed_time(ms)': int((e - s) * 1000) # 识别耗费的时间 } img.close() return jsonify(result) else: content = json.dumps({"error_code": "1001"}) resp = response_headers(content) return resp
def load_images(path): f = open(path, 'rb') profile_images = pickle.load(f) f.close() ids = [] images = [] target_width = 48 target_height = 48 for uid, image in profile_images.items(): image = BytesIO(image) image = Image.open(image) image = image.convert("RGB") if image.size != (target_width, target_height): left = (image.width - target_width) / 2 top = (image.height - target_height) / 2 right = (image.width + target_width) / 2 bottom = (image.height + target_height) / 2 image.crop((left, top, right, bottom)) image.show() image_np = np.array(image, dtype=np.float32) / 255 ids.append(uid) images.append(image_np) ids = np.array(ids) images = np.stack(images) return ids, images
def decode_img(): img = request.form['img'] img = img.split("base64,")[1] img = BytesIO(base64.b64decode(img)) img = Image.open(img) # 读取图像 img = Image.composite(img, Image.new('RGB', img.size, 'white'), img) img = img.convert('L') # 转为灰度 img = img.resize((28, 28), Image.ANTIALIAS) # 压缩为28*28 img = 1 - np.array(img, dtype=np.float32) / 255.0 img = img.reshape(1, 28, 28, 1) return img
def runCommentSemantic(): start_time = time.time() data = {"success": False} if request.method == "POST": image = request.files.get("image", None) if image is not None: try: # Read image by Opencv image = ultis.read_image(image) # classify the input image temp = Handwritten_model.predict_image(image) data["output"] = int(np.argmax(temp)) data["confidence"] = float(np.max(temp)) # indicate that the request was a success data["success"] = True except Exception as ex: data['error'] = ex print(str(ex)) else: image = request.form.get("image", None) if image is not None: try: # Read Image image = image.split("base64,")[1] image = BytesIO(base64.b64decode(image)) image = Image.open(image) image = Image.composite( image, Image.new('RGB', image.size, 'white'), image) image = image.convert('L') image = image.resize((28, 28), Image.ANTIALIAS) image = 1 - np.array(image, dtype=np.float32) / 255.0 # classify the input image temp = Handwritten_model.predict_image(image) data["output"] = int(np.argmax(temp)) data["confidence"] = float(np.max(temp)) # indicate that the request was a success data["success"] = True except Exception as ex: data['error'] = ex print(str(ex)) data['run_time'] = "%.2f" % (time.time() - start_time) # return the data dictionary as a JSON response return jsonify(data)
def predict(): # get data from drawing canvas and save as image img = request.get_data() img = img.split(b"base64,")[1] img = BytesIO(base64.b64decode(img)) img = Image.open(img) img = Image.composite(img, Image.new('RGB', img.size, 'white'), img) img = img.convert('L') img = img.resize((28, 28), Image.ANTIALIAS) img = 1 - np.array(img, dtype=np.uint8) / 255.0 img = img.reshape(1, 28, 28, 1) # reshape image data for use in neural network out = model.predict(img) response = np.array_str(np.argmax(out, axis=1)) return response
def handler(request, context): print('Start predict handler.') if 'http_method' not in request: message = 'Error: Support only "abeja/all-cpu:19.04" or "abeja/all-gpu:19.04".' print(message) return { 'status_code': http.HTTPStatus.BAD_REQUEST, 'content_type': 'application/json; charset=utf8', 'content': { 'message': message } } try: data = request.read() img = BytesIO(data) img = Image.open(img) img = img.convert('RGB') img = img.resize((IMG_ROWS, IMG_COLS)) x = preprocessor.transform(img) x = np.expand_dims(x, axis=0) result = model.predict(x)[0] sorted_result = decode_predictions(result.tolist()) return { 'status_code': http.HTTPStatus.OK, 'content_type': 'application/json; charset=utf8', 'content': { 'result': sorted_result } } except Exception as e: print(str(e)) print(traceback.format_exc()) return { 'status_code': http.HTTPStatus.INTERNAL_SERVER_ERROR, 'content_type': 'application/json; charset=utf8', 'content': { 'message': str(e) } }
def blend_img(img_qr, head_path=None): fingerxxyy = (450, 1042, 630, 1222) headxxyy = (440, 218, 640, 418) qrxxyy = (365, 617, 715, 967) redpacket_path = os.path.join('.', 'static', 'server', 'redpacket') background_path = os.path.join(redpacket_path, 'background.jpg') img_background = Image.open(background_path) img_background = img_background.convert('RGB') finger_background = img_background.crop(fingerxxyy) finger_path = os.path.join(redpacket_path, 'finger.png') img_finger = Image.open(finger_path) img_finger = img_finger.convert('RGB') img_finger = make_background(img_finger, finger_background) if head_path: try: img_head = BytesIO(requests.get(head_path).content) img_head = Image.open(img_head) except: img_head = Image.open(config.DEFAULT_USER_HEAD) else: img_head = Image.open(config.DEFAULT_USER_HEAD) img_head = img_head.convert('RGB') head_background = img_background.crop(headxxyy) img_head = add_border(img_head, (231, 63, 92), 8, 16, head_background) img_qr = Image.open(img_qr) img_qr = img_qr.convert('RGB') qr_background = img_background.crop(qrxxyy) img_qr = make_qr(img_qr, 350, 310, qr_background, (233, 205, 158)) newimg = Image.new("RGB", (1080, 1350), (255, 255, 255)) newimg.paste(img_background) newimg.paste(img_finger, fingerxxyy[:2]) newimg.paste(img_head, headxxyy[:2]) newimg.paste(img_qr, qrxxyy[0:2]) return newimg, img_qr
def make_template(self, user: Union[discord.User, discord.Member], badge: Badge, template: Image) -> Image: """Build the base template before determining animated or not""" if hasattr(user, "roles"): department = ("GENERAL SUPPORT" if user.top_role.name == "@everyone" else user.top_role.name.upper()) status = user.status level = str(len(user.roles)) else: department = "GENERAL SUPPORT" status = "online" level = "1" if str(status) == "online": status = "ACTIVE" if str(status) == "offline": status = "COMPLETING TASK" if str(status) == "idle": status = "AWAITING INSTRUCTIONS" if str(status) == "dnd": status = "MIA" barcode = BytesIO() generate("code39", str(user.id), writer=ImageWriter(self), output=barcode) barcode = Image.open(barcode) barcode = self.remove_white_barcode(barcode) fill = (0, 0, 0) # text colour fill if badge.is_inverted: fill = (255, 255, 255) barcode = self.invert_barcode(barcode) template = Image.open(template) template = template.convert("RGBA") barcode = barcode.convert("RGBA") barcode = barcode.resize((555, 125), Image.ANTIALIAS) template.paste(barcode, (400, 520), barcode) # font for user information font_loc = str(BASE_DIR / os.path.join("lib", "data", "arial.ttf")) try: font1 = ImageFont.truetype(font_loc, 30) font2 = ImageFont.truetype(font_loc, 24) except Exception as e: font1 = None font2 = None # font for extra information draw = ImageDraw.Draw(template) # adds username draw.text((225, 330), str(user.display_name), fill=fill, font=font1) # adds ID Class draw.text( (225, 400), badge.code + "-" + str(user).split("#")[1], fill=fill, font=font1, ) # adds user id draw.text((250, 115), str(user.id), fill=fill, font=font2) # adds user status draw.text((250, 175), status, fill=fill, font=font2) # adds department from top role draw.text((250, 235), department, fill=fill, font=font2) # adds user level draw.text((420, 475), "LEVEL " + level, fill="red", font=font1) # adds user level if badge.badge_name != "discord" and user is discord.Member: draw.text((60, 585), str(user.joined_at), fill=fill, font=font2) else: draw.text((60, 585), str(user.created_at), fill=fill, font=font2) barcode.close() return template
def invert_image(image: BytesIO) -> BytesIO: image = PIL.Image.open(image) inverted_image = ImageOps.invert(image.convert(mode="RGB")) bytes_io = BytesIO() inverted_image.save(bytes_io, "WEBP") return bytes_io
def up_image(): # curl -v http://127.0.0.1:9436/ocr -F "image_file=@./test.jpg" if request.method == 'POST' and request.files.get('image_file'): timec = str(time.time()).replace(".", "") file = request.files.get('image_file') print(file) img_data = file.read() img = BytesIO(img_data) img = Image.open(img, mode="r") # username = request.form.get("name") size = img.size print("接收图片尺寸: {}".format(size)) if size[0] < 20 or size[1] < 14: content = json.dumps({ "error_code": 1003, 'error_message': 'file to small' }) resp = response_headers(content) return resp # 保存图片 file_name = "{}_{}.{}".format('temp', timec, 'jpg') print("保存图片: {}".format(file_name)) file_path = os.path.join(api_image_dir + file_name) with open(file_path, 'wb') as f: f.write(img_data) f.close() s = time.time() value = [] try: image = np.array(img.convert('RGB')) t = time.time() result, image_framed, scale = ocr.model(image) file_name = "{}_{}.{}".format('ocr_', timec, 'jpg') dest_path = os.path.join(api_done_dir + file_name) # os.system('mv '+file_path+' '+dest_path) Image.fromarray(image_framed).save(dest_path) print("Mission complete, it took {:.3f}s".format(time.time() - t)) print("\nRecognition Result:\n") texts = [] for key in result: print(result[key][1]) texts.append(result[key][1]) value.append({ 'loc': [int(x) for x in result[key][0]], 'rate': result[key][2], 'text': result[key][1] }) e = time.time() traceback.print_exc() print("识别结果: {}".format(json.dumps(value, ensure_ascii=False))) result = { 'error_code': 0, 'time': timec, # 时间戳 'value': value, # 预测的结果 'text': u'\n'.join(texts), 'scale': scale, # 'author': 'sloanyyc', 'speed_time(ms)': int((e - s) * 1000) # 识别耗费的时间 } return json.dumps(result, ensure_ascii=False) except Exception as ex: e = time.time() print('识别错误', ex) result = { 'error_code': 1004, 'time': timec, # 时间戳 'value': [], # 预测的结果 'scale': 1, # 'author': 'sloanyyc', 'speed_time(ms)': int((e - s) * 1000) # 识别耗费的时间 } return json.dumps(result, ensure_ascii=False) else: content = json.dumps({ "error_code": 1001, 'error_message': 'only file via form post support' }) resp = response_headers(content) return resp
def make_template(self, user, badge, template): """Build the base template before determining animated or not""" if hasattr(user, "roles"): department = (_("GENERAL SUPPORT") if user.top_role.name == "@everyone" else user.top_role.name.upper()) status = user.status level = str(len(user.roles)) else: department = _("GENERAL SUPPORT") status = "online" level = "1" if str(status) == "online": status = _("ACTIVE") if str(status) == "offline": status = _("COMPLETING TASK") if str(status) == "idle": status = _("AWAITING INSTRUCTIONS") if str(status) == "dnd": status = _("MIA") barcode = BytesIO() temp_barcode = generate("code39", str(user.id), writer=ImageWriter(self), output=barcode) barcode = Image.open(barcode) barcode = self.remove_white_barcode(barcode) fill = (0, 0, 0) # text colour fill if badge.is_inverted: fill = (255, 255, 255) barcode = self.invert_barcode(barcode) template = Image.open(template) template = template.convert("RGBA") barcode = barcode.convert("RGBA") barcode = barcode.resize((555, 125), Image.ANTIALIAS) #pastes the barcode, unused in this instance #template.paste(barcode, (400, 520), barcode) # font for user information font_loc = str(bundled_data_path(self) / "arial.ttf") try: font1 = ImageFont.truetype(font_loc, 30) font2 = ImageFont.truetype(font_loc, 24) except Exception as e: print(e) font1 = None font2 = None # font for extra information draw = ImageDraw.Draw(template) #unused, meant for another cog # adds username #draw.text((225, 330), str(user.display_name), fill=fill, font=font1) # adds ID Class #draw.text((225, 400), badge.code + "-" + str(user).split("#")[1], fill=fill, font=font1) # adds user id #draw.text((250, 115), str(user.id), fill=fill, font=font2) # adds user status #draw.text((250, 175), status, fill=fill, font=font2) # adds department from top role #draw.text((250, 235), department, fill=fill, font=font2) # adds user level #draw.text((420, 475), _("LEVEL ") + level, fill="red", font=font1) # adds user level #if badge.badge_name != "discord" and user is discord.Member: #draw.text((60, 585), str(user.joined_at), fill=fill, font=font2) #else: #draw.text((60, 585), str(user.created_at), fill=fill, font=font2) return template
def scaleImage(image, width=None, height=None, mode="contain", quality=88, result=None, direction=None): """Scale the given image data to another size and return the result as a string or optionally write in to the file-like `result` object. The `image` parameter can either be the raw image data (ie a `str` instance) or an open file. The `quality` parameter can be used to set the quality of the resulting image scales. The return value is a tuple with the new image, the image format and a size-tuple. Optionally a file-like object can be given as the `result` parameter, in which the generated image scale will be stored. The `width`, `height`, `mode` parameters will be passed to :meth:`scalePILImage`, which performs the actual scaling. The generated image is a JPEG image, unless the original is a PNG or GIF image. This is needed to make sure alpha channel information is not lost, which JPEG does not support. """ if isinstance(image, (bytes, str)): image = BytesIO(image) image = PIL.Image.open(image) # When we create a new image during scaling we loose the format # information, so remember it here. format_ = image.format if format_ not in ("PNG", "GIF"): # Always generate JPEG, except if format is PNG or GIF. format_ = "JPEG" elif format_ == "GIF": # GIF scaled looks better if we have 8-bit alpha and no palette format_ = "PNG" icc_profile = image.info.get("icc_profile") image = scalePILImage(image, width, height, mode, direction=direction) # convert to simpler mode if possible colors = image.getcolors(maxcolors=256) if image.mode not in ("P", "L") and colors: if format_ == "JPEG": # check if it's all grey if all(rgb[0] == rgb[1] == rgb[2] for c, rgb in colors): image = image.convert("L") elif format_ == "PNG": image = image.convert("P") if image.mode == "RGBA" and format_ == "JPEG": extrema = dict(zip(image.getbands(), image.getextrema())) if extrema.get("A") == (255, 255): # no alpha used, just change the mode, which causes the alpha band # to be dropped on save image.mode = "RGB" else: # switch to PNG, which supports alpha format_ = "PNG" new_result = False if result is None: result = BytesIO() new_result = True image.save(result, format_, quality=quality, optimize=True, progressive=True, icc_profile=icc_profile) if new_result: result = result.getvalue() else: result.seek(0) return result, format_, image.size
def add_watermark(self, image, watermark, position='scale', opacity=1, format=None): # pylint: disable=redefined-builtin,too-many-arguments,too-many-branches """Adds a watermark to an image and return a new image""" # init image if IImageFile.providedBy(image): image = image.data if isinstance(image, bytes): image = BytesIO(image) elif isinstance(image, str) and not os.path.exists(image): image = StringIO(image) image = Image.open(image) # check format if not format: format = image.format format = format.upper() if format not in ('GIF', 'JPEG', 'PNG'): format = 'JPEG' # check RGBA mode if image.mode != 'RGBA': image = image.convert('RGBA') # init watermark if isinstance(watermark, str) and os.path.exists(watermark): watermark = Image.open(watermark) else: if IImageFile.providedBy(watermark): watermark = Image.open(StringIO(watermark.data)) else: watermark = Image.open(watermark) if opacity < 1: watermark = self._reduce_opacity(watermark, opacity) # create a transparent layer the size of the image and draw the # watermark in that layer. layer = Image.new('RGBA', image.size, (0, 0, 0, 0)) if position == 'tile': for y in range(0, image.size[1], watermark.size[1]): # pylint: disable=invalid-name for x in range(0, image.size[0], watermark.size[0]): # pylint: disable=invalid-name layer.paste(watermark, (x, y)) elif position == 'scale': # scale, but preserve the aspect ratio ratio = min( float(image.size[0]) / watermark.size[0], float(image.size[1]) / watermark.size[1]) w = int(watermark.size[0] * ratio) # pylint: disable=invalid-name h = int(watermark.size[1] * ratio) # pylint: disable=invalid-name watermark = watermark.resize((w, h)) layer.paste(watermark, (int( (image.size[0] - w) / 2), int((image.size[1] - h) / 2))) else: layer.paste(watermark, position) # composite the watermark with the layer new = BytesIO() composite = Image.composite(layer, image, layer) if format == 'JPEG': composite = composite.convert('RGB') composite.save(new, format) return new.getvalue(), format.lower()