def merge(self, image_root='test_merge', step=500, set_label=''): self.delete_invalid_files(image_root) file_list = os.listdir(image_root) shuffle(file_list) count = 0 for i in range(len(file_list) // step + 1): full_name = image_root + "/" + file_list[i * step] temp_image = cv2.imdecode(np.fromfile(full_name, np.uint8), -1) temp_label = [self.get_class_label(file_list[i * step])[1]] img = None for file_path in file_list[i * step + 1: (i + 1) * step]: try: full_name = image_root + "/" + file_path img = cv2.imdecode(np.fromfile(full_name, np.uint8), -1) temp_image = np.hstack((temp_image, img)) temp_label.append(self.get_class_label(file_path)[1]) print("\r已处理%5d张图片, %20s" % (count, file_path), end='') count += 1 except Exception as e: print(img.shape, file_path) raise e print() cv2.imencode('.png', temp_image)[1].tofile(set_label + "_images_%d.png" % i) np.save(set_label + '_labels_%d.npy' % i, np.array(temp_label)) print(len(temp_label), temp_image.shape)
def predict(update: Update, context: CallbackContext) -> None: """Echo the user message.""" if update.message.text: update.message.reply_text(update.message.text) message: Message = update.message photo: List[PhotoSize] = message.photo document: Document = message.document context.bot.send_chat_action(chat_id=message.chat.id, action=ChatAction.UPLOAD_PHOTO, timeout=60000) if photo: p: PhotoSize = photo[-1] file: File = p.get_file(timeout=10000) arr: bytearray = file.download_as_bytearray() nparr = np.frombuffer(arr, np.uint8) inp_img = cv2.imdecode(np.frombuffer(nparr, np.uint8), cv2.IMREAD_UNCHANGED) decode_an_image_file(inp_img) output = cv2.imread('h.png') _, outputBuffer = cv2.imencode('.jpg', output) OutputBase64String = base64.b64encode(outputBuffer).decode('utf-8') message.reply_photo(photo=open('h.png', 'rb')) elif document: file: File = document.get_file(timeout=10000) arr: bytearray = file.download_as_bytearray() nparr = np.frombuffer(arr, np.uint8) inp_img = cv2.imdecode(np.frombuffer(nparr, np.uint8), cv2.IMREAD_UNCHANGED) decode_an_image_file(inp_img) output = cv2.imread('h.png') _, outputBuffer = cv2.imencode('.jpg', output) OutputBase64String = base64.b64encode(outputBuffer).decode('utf-8') message.reply_photo(photo=open('h.png', 'rb')) else: pass
def getImageArrayFromUrl(url: str, kind: str) -> np.ndarray: logging.info(f"Opening {url} as {kind}") if kind == "Image": resp = requests.get(url) image = np.asarray(bytearray(resp.content), dtype="uint8") return cv2.imdecode(image, -1) if kind == "Stream": out, _ = ( ffmpeg .input(url) .output('pipe:', format='image2', vcodec='mjpeg', vframes=1) .global_args('-loglevel', 'warning') .global_args('-hide_banner') .run(capture_stdout=True) ) image = np.asarray(bytearray(out) ,dtype=np.uint8) return cv2.imdecode(image, -1)
def compare_similarity(img1_path, img2_path, threshold=0.8): img1 = cv2.imdecode(np.fromfile(img1_path, dtype=np.uint8), -1) img2 = cv2.imdecode(np.fromfile(img2_path, dtype=np.uint8), -1) if img1.shape != img2.shape: return False, 0 else: gray_1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) gray_2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) score, diff = structural_similarity(gray_1, gray_2, full=True) if score > threshold: return True, score else: return False, score
def handle(conn): global img, imbyt lock.acquire() if imbyt is None: imorg = np.asarray(ImageGrab.grab()) # 全屏截图 # 压缩编码 _, imbyt = cv2.imencode(".jpg", imorg, [cv2.IMWRITE_JPEG_QUALITY, IMQUALITY]) imnp = np.asarray(imbyt, np.uint8) img = cv2.imdecode(imnp, cv2.IMREAD_COLOR) lock.release() lenb = struct.pack(">BI", 1, len(imbyt)) conn.sendall(lenb) conn.sendall(imbyt) while True: cv2.waitKey(100) # 等100纳秒 gb = ImageGrab.grab() # 全屏截图 imgnpn = np.asarray(gb) _, timbyt = cv2.imencode(".jpg", imgnpn, [cv2.IMWRITE_JPEG_QUALITY, IMQUALITY]) imnp = np.asarray(timbyt, np.uint8) imgnew = cv2.imdecode(imnp, cv2.IMREAD_COLOR) # 计算图像差值 imgs = imgnew - img if (imgs != 0).any(): # 画质改变 pass else: continue imbyt = timbyt img = imgnew # 无损压缩 _, imb = cv2.imencode(".png", imgs) l1 = len(imbyt) # 原图像大小 l2 = len(imb) # 差异图像大小 if l1 > l2: # 传差异化图像 lenb = struct.pack(">BI", 0, l2) conn.sendall(lenb) conn.sendall(imb) else: # 传原编码图像 lenb = struct.pack(">BI", 1, l1) conn.sendall(lenb) conn.sendall(imbyt)
def build_test_image(self): _, img = Capture().capture_test_image() img.seek(0) img_array = np.asarray(bytearray(img.read()), dtype=np.uint8) cv2_img = cv2.imdecode(img_array, 0) cv2.rectange(cv2_img, self.coords) cv2.imwrite(self.save_path + 'test_img_box.jpg')
def debug_get(self): t1 = time.time() pipe = subprocess.Popen("{0}/adb/adb.exe shell screencap -p".format( self.path), stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True) image_bytes = pipe.stdout.read() print(image_bytes[0:10]) image_bytes = image_bytes.replace(b'\r\n', b'\n') print(image_bytes[0:10]) image = cv2.imdecode(np.frombuffer(image_bytes, dtype='uint8'), cv2.IMREAD_COLOR) t2 = time.time() print(t2 - t1) try: if int(image.shape[0]) % 16 != 0 or int(image.shape[1] % 9) != 0: print("我草尼碼,我不是叫你看readme了???,眼瞎是不是,你模擬器解析度不是9:16") print("還是說你在拉小提琴頭歪一邊以為16:9是9:16????") print("你螢幕解析度是{0} x {1}".format(int(image.shape[1] % 9), int(image.shape[0]))) elif image.shape[0] != 1920 and image.shape[1] != 1080: print("模擬器解析度原圖為: {0} x {1}".format(image.shape[1], image.shape[0])) image = self.reimage(image) except: print("你模擬器截取出來的圖片爆炸了,有兩個建議選項,一個重灌電腦一個重灌模擬器一個想辦法自己解決") raw = input("按Enter鍵關閉視窗")
def img_in_thumbnail(filename): img = read_img(filename) cvimg = cv.imdecode(np.frombuffer(img, np.uint8), cv.IMREAD_COLOR) cvimg = cv.cvtColor(cvimg, cv.COLOR_BGR2RGB) # corrupting huffman tables index = 2 marker = 0 while marker != 0xDA: marker = img[index + 1] index += 2 marker_length = img[index] * 256 + img[index + 1] if marker == 0xC4: # huffman table img[index + 2:index + marker_length] = bytearray([255]) * (marker_length - 2) index += marker_length # moving img to thumbnail res_multiplier = random.randint(1, 25) resolution = (4 * res_multiplier, 3 * res_multiplier) cvimg = cv.resize(cvimg, resolution) img = (img[:0x4] + (resolution[0] * resolution[1] * 3 + 0x10).to_bytes(2, "big") + img[0x6:0x12] + resolution[0].to_bytes(1, "big") + resolution[1].to_bytes(1, "big") + bytearray(cvimg.flatten()) + img[0x14:]) write_img(filename, img)
def preprocess_image(filestr): image = cv2.imdecode(np.fromstring(filestr, np.uint8), cv2.IMREAD_COLOR) image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) image = cv2.resize(image, (200, 200)) image = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] return quantify_image(image)
def __init__(self, fileobj): # Image as CV2 Array buff = fileobj.read() nparr = np.fromstring(buff, np.uint8) self.cvimg = cv2.imdecode(nparr, 1) self.PRINT_PROCESS = False self.PRINT_RESULT = False
def canny_edge_detection_preview(): """ edge detection preview --- tags: - image parameters: - in: formData name: image type: file required: true description: The image to upload. responses: 200: description: the edge detection preview image content: image/png: schema: type: string format: binary """ if 'image' not in request.files: raise ParameterLostError("image") img = cv2.imdecode( numpy.fromstring(request.files['image'].read(), numpy.uint8), cv2.IMREAD_UNCHANGED) edges = cv2.Canny(img, 100, 200) _, f = cv2.imencode(".png", edges) return send_file(io.BytesIO(f.tobytes()), "image/png")
def imread(img_or_path, flag='color'): """Read an image. Args: img_or_path (ndarray or str): Either a numpy array or image path. If it is a numpy array (loaded image), then it will be returned as is. flag (str): Flags specifying the color type of a loaded image, candidates are `color`, `grayscale` and `unchanged`. Returns: ndarray: Loaded image array. """ if isinstance(img_or_path, np.ndarray): return img_or_path elif isinstance(img_or_path, str): flag = imread_flags[flag] if isinstance(flag, str) else flag cvtools.check_file_exist( img_or_path, 'img file does not exist: {}'.format(img_or_path)) try: "PIL: Open an image file, without loading the raster data" Image.open(img_or_path) # im = cv2.imdecode(np.fromfile(image_name, dtype=np.uint8), cv2.IMREAD_COLOR) except (FileNotFoundError, Image.DecompressionBombError) as e: print(e) return None return cv.imdecode(np.fromfile(img_or_path, dtype=np.uint8), flag) else: raise TypeError('"img" must be a numpy array or a filename')
def main(): if len(sys.argv) != 2: exit( 'Incorrect amount of parameters. Call: python predict.py image.png' ) file = sys.argv[1] if 'http' in file: with urllib.request.urlopen(file) as req: nparray = np.asarray(bytearray(req.read()), dtype=np.uint8) else: stream = open(file, 'rb') bytes = bytearray(stream.read()) nparray = np.array(bytes, dtype=np.uint8) data = cv2.imdecode(nparray, cv2.IMREAD_GRAYSCALE) if data is None: exit('Image is None.') model = load_model( os.path.dirname(os.path.abspath(__file__)) + '/model.h5') ret, threshed = cv2.threshold(data, 180, 255, cv2.THRESH_BINARY) chars_data = [ threshed[:, 6:21], threshed[:, 21:36], threshed[:, 36:51], threshed[:, 51:66] ] output = '' for char_data in chars_data: char_data = np.reshape(char_data, (24, 15, 1)) char_data = np.array([char_data], dtype=float) / 255.0 output += chars[np.argmax(np.round(model.predict(char_data), 3))] print(output)
def generate(chars): # Initialise empty list of lists chars_data = [] chars_labels = [] # Start at -1 since I add one when a new character begins i = -1 for filename in os.listdir("really-simple-captcha/images"): # for each image: read the image, resize and append it to the dataset, and add the appropriate label for it # Also use this workaround instead of imread() because ot UTF-8 filename handling in Windows. stream = open(os.path.join("really-simple-captcha/images", filename), 'rb') bytes = bytearray(stream.read()) nparray = np.array(bytes, dtype=np.uint8) data = cv2.imdecode(nparray, cv2.IMREAD_GRAYSCALE) if data is not None: # If first occurrence, add category. i += 1 if '00001' == filename[1:6] else 0 # Apply threshold ret, threshed = cv2.threshold(data, 180, 255, cv2.THRESH_BINARY) # Already 24x15, so just add channel dimension, and append threshed = np.reshape(threshed, (24, 15, 1)) chars_data.append(threshed) chars_labels.append(i) # create an array of character data, divide the matrix by 255.0 to make values for each pixel within 0 and 1 data = np.array(chars_data) / 255.0 # make a binary matrix with each item in the label labels = to_categorical(chars_labels) return train_test_split(data, labels, test_size=0.1)
def handle(self): msg = self.data img = cv2.imdecode( np.fromstring(base64.b64decode(msg.split(',')[1]), np.uint8), cv2.IMREAD_COLOR) cv2.imshow('image', img) cv2.waitKey(1)
def output(data): img = open_image(BytesIO(data)) img.resize(torch.Size([img.shape[0], 500, 500])) pred_1 = learn.predict(img)[1] mask_1 = pred_1.numpy()[0] pred_2 = learn_2.predict(img)[1] mask_2 = pred_2.numpy()[0] pred_3 = learn_3.predict(img)[1] mask_3 = pred_3.numpy()[0] image = np.asarray(bytearray(data), dtype="uint8") img_cv = cv2.imdecode(image, cv2.IMREAD_COLOR) img_cv = cv2.resize(img_cv,(500, 500)) mask_dark_circles = np.copy(mask_1) mask_dark_circles[mask_dark_circles==3] = 2 contours = find_boundaries(mask_dark_circles, connectivity=1, mode='thick', background=(2)) img_cv[contours == True]= 255 img_cv[mask_1==3]= (255,0,0) img_cv[mask_1 == 4] = 150 img_cv[mask_2 == 5] = (112,72,242) img_cv[mask_3== 6] = 255 t = time.time() cv2.imwrite('./output/' + str(t) + '.png', img_cv, [int(cv2.IMWRITE_PNG_COMPRESSION), 9]) filename = './output/'+str(t)+'.png' with open(filename, "rb") as img_file: my_string = base64.b64encode(img_file.read()).decode('ascii') #return send_file(filename, attachment_filename="output.png") return {"image": (my_string)}
async def ocr(url): async with aiohttp.ClientSession() as session: async with session.get(url) as r: size = int(r.headers['Content-length']) if size > 1e6: img = np.asarray(bytearray(await r.read()), dtype="uint8") flag = cv2.IMREAD_GRAYSCALE if size > 2e6: flag = cv2.IMREAD_REDUCED_GRAYSCALE_2 img = cv2.imdecode(img, flag) _, img = cv2.imencode(os.path.splitext(url)[1], img) data = aiohttp.FormData() data.add_field('apikey', API_KEY) data.add_field('OCREngine', '2') data.add_field('file', img.tobytes(), content_type='image/png', filename='image.png') ocr_url = 'https://api.ocr.space/parse/image' async with session.post(ocr_url, data=data) as r: json = await r.json() else: ocr_url = f'https://api.ocr.space/parse/imageurl?apikey={API_KEY}&OCREngine=2&url={url}' async with session.get(ocr_url) as r: json = await r.json() if json['OCRExitCode'] != 1: return False, '.'.join(json['ErrorMessage']) return True, json['ParsedResults'][0]['ParsedText']
def download_images(self): self.download_progress_bar.config(maximum=len(self.queried_images)) for img_name, img_dict in sorted(self.queried_images.items()): image_name, tmp_img = lungmap_utils.client.get_image_from_lungmap( img_dict['url']) cv_img = cv2.imdecode(np.frombuffer(tmp_img, dtype=np.uint8), cv2.IMREAD_COLOR) rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB) hsv_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2HSV) self.images[image_name] = { 'rgb_img': rgb_image, 'hsv_img': hsv_image, 'corr_rgb_img': None, 'dev_stage': img_dict['dev_stage'], 'mag': img_dict['mag'], 'probes': img_dict['probes'], 'probe_colors': img_dict['probe_colors'], 'probe_structure_map': img_dict['probe_structure_map'] } self.file_list_box.insert(tk.END, image_name) # update progress bar self.download_progress_bar.step() self.download_progress_bar.update()
def callback(self, ros_data): '''Callback function of subscribed topic. Here images get converted and features detected''' if VERBOSE: print('received image of type: "%s"' % ros_data.format) np_arr = np.fromstring(ros_data.data, np.uint8) # Direct conversion to CV2 frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) # OpenCV >= 3.0: # Get frame dimensions frame_height = frame.shape[0] frame_width = frame.shape[1] frame_channels = frame.shape[2] # Image processing res = self.blue_filtering(frame) # Detect centrode cX, cY = self.detect_centrode(res) # Write the point (cX,xY) on "res" image cv2.circle(res, (int(cX), int(cY)), 5, (255, 0, 0), -1) # Normalizing w.r.t the center cX = int(cX - frame_width / 2) cY = int(cY - frame_height / 2) self.greenX_pub.publish(cX) # Print the center of mass coordinates w.r.t the center of image and diplay it if VERBOSE: print(cX, cY) cmd = self.extraction(cX) print(cmd) # Display the result if DISPLAY: cv2.imshow('frame', frame) cv2.imshow("res_center", res) cv2.waitKey(2)
def open_img(fp): if isinstance(fp, BytesIO): file_bytes = np.asarray(bytearray(fp.read()), dtype=np.uint8) img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR) else: img = cv2.imread(fp) return img
def listfilels(rootDir): list_dirs = os.walk(rootDir) # print("list_dirs", list_dirs) for root, dirs, files in list_dirs: # print("haoduo",root,dirs,files) # for d in dirs: # print('第一个', os.path.join(root, d)) # print(type(files),len(files)) for f in range(len(files)): # print("files",files[0]) # fileId = files[f].split('.')[0] # print("fileid", fileId) filepath = os.path.join(root, files[f]) try: # src = cv2.imread(filepath, 1) src = cv2.imdecode(np.fromfile(filepath, dtype=np.uint8), -1) # print("src=", filepath, src.shape) os.remove(filepath) # print("root", root) # print(filepath) gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) cv2.imwrite(root + "\\" + str(f) + ".jpg", gray) # r"C:\Users\29272\Desktop\111\1.jpg" except: # print('进入e') os.remove(filepath) continue
def delete_invalid_files(image_root): for name in os.listdir(image_root): full_name = image_root + "/" + name img = cv2.imdecode(np.fromfile(full_name, np.uint8), -1) if len(img.shape) != 3 or img.shape[2] != 3: print("已删除", name, img.shape) os.remove(full_name)
async def on_message(self, message): msg_author = str(message.author) print(msg_author + ': ' + message.content) # might as well do some creepy logging if msg_author == POKECORD_ID and message.embeds: try: url = message.embeds[0].image.url split_url = url.replace('/', '.').split('.') if 'PokecordSpawn' in split_url: response = await http_client.get(url) byte_stream = await response.content.read() img_array = np.asarray(bytearray(byte_stream), dtype='uint8') img = cv2.imdecode(img_array, cv2.IMREAD_ANYCOLOR) img = cv2.resize(img, dsize=(100, 100), interpolation=cv2.INTER_CUBIC) img = np.array([img]) img = img.reshape(img.shape[0], -1) prediction = clf.predict(img)[0] if HINTS_ENABLED and DELAY > 0: hidden_chars = '* ' * (len(prediction) - 2) hint_name = str(prediction[0] + ' ' + hidden_chars + prediction[-1]) await message.channel.send( 'I think this might be ' + hint_name + '. ' + str(DELAY) + ' seconds until I try to catch it.') await asyncio.sleep(DELAY) await message.channel.send('p!catch ' + prediction) except Exception: return
def screenshots(self): pipe = subprocess.Popen("/adb/adb.exe shell screencap -p", stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True) image_bytes = pipe.stdout.read().replace(b'\r\n', b'\n') target_img = cv2.imdecode(np.fromstring( image_bytes, np.uint8), cv2.IMREAD_COLOR) return target_img
def _check(self): print(self.class_name, "自检开始:") count, not_ok = 1, 0 file_list = os.listdir(self.folder_path) for file_name in file_list: full_name = os.path.join(self.folder_path, file_name) print('\r', count, " / ", len(file_list), end='') count += 1 try: if cv2.imdecode(np.fromfile(full_name, dtype=np.uint8), -1) is None: os.remove(full_name) not_ok += 1 print(" 已删除: ", full_name) else: temp = imghdr.what(full_name) if temp is not None: tmp_pos = file_name.rfind(".") if tmp_pos != -1: os.rename(full_name, os.path.join(self.folder_path, file_name[:tmp_pos + 1] + temp)) else: os.remove(full_name) except Exception as e: os.remove(full_name) not_ok += 1 print(" 已删除: ", full_name) print(e.__class__, str(e)) continue print("\n删除了%d张无法打开的图片" % not_ok) print(self.class_name, "自检完成")
def upload_file(): if request.method == "POST": if request.files: image = [] image.append(request.files["image0"]) image.append(request.files["image1"]) image.append(request.files["image2"]) form = request.form result = request.form["location"] location = request.form["location"] field = request.form["field"] pond = request.form["pond"] #file_1=open("location.txt","a") #file_1.write(image_0.filename+" "+image_1.filename+" "+image_2.filename+" "+location+" "+field+" "+" "+pond) #file_1.write("\n") #file_1.close() img_cv2 = [] for img in image: npimg = np.fromfile(img, np.uint8) img_cv2.append(cv2.imdecode(npimg, cv2.IMREAD_COLOR)) print(img.filename) print(location) print(field) print(pond) return render_template("interface.html", location=location, pond=pond, field=field) #return redirect(request.url,form =form) else: return render_template("interface.html", location=location, pond=pond, field=field)
def wichBtn(self): global path global lab1img, lab2img, lab3img, lab4img path = Qt.QFileDialog.getOpenFileNames() if len(path[0]) == 1: if self.ui.lab11.isVisible(): self.ui.lab11.clear() self.ui.lab11.hide() elif len(path[0]) == 2: if self.ui.lab11.isVisible(): self.ui.lab11.clear() self.ui.lab11.hide() if self.ui.lab22.isVisible(): self.ui.lab22.clear() self.ui.lab22.hide() elif len(path[0]) == 3: if self.ui.lab11.isVisible(): self.ui.lab11.clear() self.ui.lab11.hide() if self.ui.lab22.isVisible(): self.ui.lab22.clear() self.ui.lab22.hide() if self.ui.lab33.isVisible(): self.ui.lab33.clear() self.ui.lab33.hide() elif len(path[0]) == 4: if self.ui.lab11.isVisible(): self.ui.lab11.clear() self.ui.lab11.hide() if self.ui.lab22.isVisible(): self.ui.lab22.clear() self.ui.lab22.hide() if self.ui.lab33.isVisible(): self.ui.lab33.clear() self.ui.lab33.hide() if self.ui.lab44.isVisible(): self.ui.lab44.clear() self.ui.lab44.hide() for i in range(0, len(path[0])): img1 = cv2.imdecode(np.fromfile(path[0][i], dtype=np.uint8), -1) sp = img1.shape img11 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB) _image = QtGui.QImage(img11[:], img11.shape[1], img11.shape[0], img11.shape[1] * 3, QtGui.QImage.Format_RGB888) outimg = QtGui.QPixmap(_image).scaled( sp[1] * (self.ui.lab1.height() / sp[0]), sp[0] * (self.ui.lab1.height() / sp[0])) #设置图片大小 if i == 0: self.ui.lab1.setPixmap(outimg) lab1img = img1 elif i == 1: self.ui.lab2.setPixmap(outimg) lab2img = img1 elif i == 2: self.ui.lab3.setPixmap(outimg) lab3img = img1 else: self.ui.lab4.setPixmap(outimg) lab4img = img1
def index(): if request.method == 'GET': # User is requesting the form return render_template('form.html') elif request.method == 'POST': # User has sent us data image = request.files['image'] city = request.form.get('cities') if image.filename == '' or not allowed_file(image.filename): # Bad input return render_template('error.html') filestr = image.read() #convert string data to numpy array npimg = np.fromstring(filestr, np.uint8) # convert numpy array to image img = cv2.imdecode(npimg, cv2.IMREAD_UNCHANGED) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) output = inference(img, show_result=False, target_shape=(260, 260)) # Loop through faces from output and check for masks mask_count = 0 for face in output: if face[0] == 0: mask_count += 1 face_count = len(output) if face_count > 0: non_mask_count = face_count - mask_count score = non_mask_count / face_count json_data = edict({'id': str(uuid.uuid4()), 'city': city, 'face_count': face_count, 'mask_count': mask_count, 'non_mask_count': non_mask_count, 'score': score}) container.create_item(body=json_data) return render_template('result.html', city=get_city_name(city), face_count=len(output), mask_count=mask_count)
def run(self): print("Starting camera stream...") try: stream = requests.get(self.url, stream=True) except: print("Couldn't connect to camera") return self.stopThread = False streamBytes = bytes() try: while not self.stopThread: streamBytes += stream.raw.read(256) a = streamBytes.find(b'\xff\xd8') b = streamBytes.find(b'\xff\xd9') if a != -1 and b != -1: jpg = streamBytes[a:b + 2] streamBytes = streamBytes[b + 2:] image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR) image = Image.fromarray(image) b, g, r = image.split() image = Image.merge("RGB", (r, g, b)) image = image.resize((image.width * 2, image.height * 2)) image = ImageTk.PhotoImage(image) self.label.config(image=image) self.label.image = image finally: stream.close() print("Camera stream stopped")
def extract_pdf_image(pdf_content, trace_name, page=0): """ ImageMagick (and ghostscript) is used to generate the image. Image is returned as an array of floats with shape (224, 224, 3). :param pdf_content: as binary string object. :param trace_name the filename on the client, for traceability :param page: page number (from 0) :return: array of floats with shape (299, 299, 3), None is returned if no good image produced. """ jpg_content = None pageSpec = "[" + str(page) + "]" # start subprocess # Use pipes so that stderr can be collected (and not just mixed into main process stderr, hiding other errors) # the parameters here must match training to maximize accuracy convert_cmd = ['convert', "pdf:-" + pageSpec, '-background', 'white', '-alpha', 'remove', '-equalize', '-quality', '95', '-thumbnail', '156x', '-gravity', 'north', '-extent', '224x224', "jpg:-"] if logging.getLogger().getEffectiveLevel() == logging.DEBUG: log.debug("ImageMagick Command=" + " ".join(convert_cmd)) t0 = time.time() pp = subprocess.Popen(convert_cmd, bufsize=262144, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) try: pp.stdin.write(pdf_content) except IOError: pass try: outs, errs = pp.communicate(timeout=30) # get jpg bytes jpg_content = outs # check if jpg sufficient size if len(jpg_content) <= 3000: jpg_content = None # probably a blank image, so ignore it log.warning("ignoring blank jpg produced by imagemagick for %s" % trace_name) except subprocess.TimeoutExpired: pp.kill() # drain residue so subprocess can really finish outs, errs = pp.communicate() log.warning("convert command (imagemagick) on %s did not terminate in %.2f seconds, terminating." % (trace_name, time.time()-t0)) if jpg_content is None: return None # convert jpg_content to image as array img_stream = BytesIO(jpg_content) img_array = cv2.imdecode(np.frombuffer(img_stream.read(), np.uint8), cv2.IMREAD_COLOR) if img_array is None: log.warning("imdecode failed for %s" % (trace_name)) return None img_array = img_array.astype(np.float32) #img_array = cv2.imdecode(np.fromstring(img_stream.read(), np.uint8), cv2.IMREAD_COLOR).astype(np.float32) # we have 224x224, resize to 299x299 for shape (224, 224, 3) # ToDo: target size could vary, depending on the pre-trained model, should auto-adjust img299 = cv2.resize(img_array, dsize=(299, 299), interpolation=cv2.INTER_LINEAR) return img299