def run(self, video_path): self.imageCount = 0 self.capture = cv2.VideoCapture(video_path) while True: ret, frame = self.capture.read() if not ret: print("视频解析") break im = darknet.nparray_to_image(frame) boxes = darknet.detect(self.net, self.meta, im) for i in range(len(boxes)): score = boxes[i][1] label = boxes[i][0] xmin = boxes[i][2][0] - boxes[i][2][2] / 2 ymin = boxes[i][2][1] - boxes[i][2][3] / 2 xmax = boxes[i][2][0] + boxes[i][2][2] / 2 ymax = boxes[i][2][1] + boxes[i][2][3] / 2 cv2.rectangle(frame, (int(xmin), int(ymin)), (int(xmax), int(ymax)), (0, 255, 0), 2) cv2.putText(frame, str(label) + str(round(score, 3)), (int(xmin), int(ymin)), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.8, color=(0, 255, 255), thickness=2) frame = cv2.resize(frame, (1024, 900), interpolation=cv2.INTER_CUBIC) cv2.imwrite("./temps/temp" + str(self.imageCount) + ".jpg", frame) self.imageCount += 1 print(self.imageCount) print("finish")
def run(self): self.capture = cv2.VideoCapture(0) self.imageCount = 0 while True: ret, frame = self.capture.read() if not ret: break im = darknet.nparray_to_image(frame) boxes = darknet.detect(self.net, self.meta, im) for i in range(len(boxes)): score = boxes[i][1] label = boxes[i][0] xmin = boxes[i][2][0] - boxes[i][2][2] / 2 ymin = boxes[i][2][1] - boxes[i][2][3] / 2 xmax = boxes[i][2][0] + boxes[i][2][2] / 2 ymax = boxes[i][2][1] + boxes[i][2][3] / 2 cv2.rectangle(frame, (int(xmin), int(ymin)), (int(xmax), int(ymax)), (0, 255, 0), 2) cv2.putText(frame, str(label)[2:-1] + str(round(score, 3)), (int(xmin), int(ymin)), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.8, color=(0, 255, 255), thickness=2) frame = cv2.resize(frame, (1024, 900), interpolation=cv2.INTER_CUBIC) cv2.imwrite("./temps/temp" + str(self.imageCount) + ".jpg", frame) #检测后的图片保存 self.capture_image.emit("./temps/temp" + str(self.imageCount) + ".jpg") #发送完成图片保存的信号 time.sleep(0.05) self.imageCount += 1 if cv2.waitKey(1) & 0xFF == ord('q'): # 按键盘q就停止拍照 break
def run(self): self.imageCount = 0 self.capture = cv2.VideoCapture(self.video_path) while True: ret, frame = self.capture.read() if not ret: break im = darknet.nparray_to_image(frame) boxes = darknet.detect(self.net, self.meta, im) for i in range(len(boxes)): score = boxes[i][1] label = boxes[i][0] xmin = boxes[i][2][0] - boxes[i][2][2] / 2 ymin = boxes[i][2][1] - boxes[i][2][3] / 2 xmax = boxes[i][2][0] + boxes[i][2][2] / 2 ymax = boxes[i][2][1] + boxes[i][2][3] / 2 cv2.rectangle(frame, (int(xmin), int(ymin)), (int(xmax), int(ymax)), (0, 255, 0), 2) cv2.putText(frame, str(label), (int(xmin), int(ymin)), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.8, color=(0, 255, 255), thickness=2) cv2.imwrite("./temps/temp" + str(self.imageCount) + ".jpg", frame) self.add_image.emit("./temps/temp" + str(self.imageCount) + ".jpg") time.sleep(0.05) self.imageCount += 1
def videoDetection(self): answer = QMessageBox.warning( self, "注意", "如果您上次使用了摄像头检测或视频检测后没有导出,当前操作会导致数据丢失,请确认是否要继续!", QMessageBox.No | QMessageBox.Yes, QMessageBox.Yes) if answer == QMessageBox.No: return if os._exists("./temps"): shutil.rmtree("./temps") os.mkdir("./temps") fname, _ = QFileDialog.getOpenFileName(self, '请选择图片文件', ".", "(*.mp4)") fnameSplit = fname.split('.') index = len(fnameSplit) - 1 typeSuffix = fnameSplit[index] #文件名后缀 if typeSuffix != "mp4": QMessageBox.critical(self, "文件类型错误", "您未指定视频或指定的文件不是mp4文件,请确认!", QMessageBox.Yes, QMessageBox.Yes) else: #正确的图片路径 cap = cv2.VideoCapture(fname) while True: ret, frame = cap.read() if ret: im = darknet.nparray_to_image(frame) r = darknet.detect(self.net, self.meta, im) else: break cap.release() cv2.destroyAllWindows()
def classify(self, img): self.log("Preparing image...") # Convert to c image c_img = darknet.nparray_to_image(img) self.log("Done, classifying...") result = darknet.detect(self.network, self.metadata, c_img) self.log("Done, drawing boxes...") new_image = self.draw_boxes(img, result) self.log("Done") return new_image
def imageDetection(self): answer = QMessageBox.warning( self, "注意", "如果您上次使用了摄像头检测或视频检测后没有导出,当前操作会导致数据丢失,请确认是否要继续!", QMessageBox.No | QMessageBox.Yes, QMessageBox.Yes) if answer == QMessageBox.No: return if os.path.exists("./temps"): shutil.rmtree("./temps") os.mkdir("./temps") detection_image, _ = QFileDialog.getOpenFileName( self, '请选择图片文件', ".", "(*.jpg)") fnameSplit = detection_image.split('.') index = len(fnameSplit) - 1 typeSuffix = fnameSplit[index] # 文件名后缀 if typeSuffix != "jpg": QMessageBox.critical(self, "文件类型错误", "您未指定图片或指定的文件不是jpg文件,请确认!", QMessageBox.Yes, QMessageBox.Yes) else: # 正确的图片路径 frame = cv2.imread(detection_image) im = darknet.nparray_to_image(frame) boxes = darknet.detect(self.net, self.meta, im) for i in range(len(boxes)): score = boxes[i][1] label = boxes[i][0] xmin = max(5, boxes[i][2][0] - boxes[i][2][2] / 2) #可以根据坐标关系调整文字位置 ymin = max(5, boxes[i][2][1] - boxes[i][2][3] / 2) xmax = boxes[i][2][0] + boxes[i][2][2] / 2 ymax = boxes[i][2][1] + boxes[i][2][3] / 2 cv2.rectangle(frame, (int(xmin), int(ymin)), (int(xmax), int(ymax)), (0, 255, 0), 2) cv2.putText(frame, str(label)[2:-1] + str(round(score, 3)), (int(xmin), int(ymin)), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.8, color=(255, 0, 0), thickness=2) frame = cv2.resize(frame, (1024, 900), interpolation=cv2.INTER_CUBIC) cv2.imwrite("./temps/temp" + ".jpg", frame) #检测后的图片保存 self.showImage("./temps/temp" + ".jpg")
thrs = [float(el) for el in args.threshold.split(',')] if len(thrs) == 1: thrs = thrs * len(classnames) assert ( len(thrs) == len(classnames) ), "Provide thresholds amount the same as classnames amount or single" thrs = {cls: thrs[i] for i, cls in enumerate(classnames)} print(cfg, met, wgh) dn.set_gpu(0) net = dn.load_net(cfg.encode('utf-8'), wgh.encode('utf-8'), 0) meta = dn.load_names(met.encode('utf-8')) for i, sample in enumerate(dataset): print("{} ({} from {})".format(sample['imgFn'], i + 1, len(dataset))) img = cv2.imread(sample['imgFn']) dets = dn.detect(net, meta, dn.nparray_to_image(img)) dets = convertWithThreshold(dets, thrs) if verbose: print('\t{}'.format(sample['objs'])) print('\t{}'.format(dets)) estimateDets(dets, sample['objs'], classnames, generalEstimation) for classname, estimation in perClassEstimations.items(): estimateDets(dets, sample['objs'], (classname, ), estimation) estimationTable.analyseDetections(dets, sample['objs']) print("General estimation results:") generalEstimation.printStats() print("Per class estimation results:") for classname, estimation in perClassEstimations.items(): print("----- " + classname + " -----") estimation.printStats()
origin_img = "data/dog.jpg" out_img = "data/dog_test.jpg" def showPicResult(image): img = cv2.imread(image) cv2.imwrite(out_img, img) for i in range(len(r)): x1 = r[i][2][0] - r[i][2][2] / 2 y1 = r[i][2][1] - r[i][2][3] / 2 x2 = r[i][2][0] + r[i][2][2] / 2 y2 = r[i][2][1] + r[i][2][3] / 2 im = cv2.imread(out_img) cv2.rectangle(im, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 3) #This is a method that works well. cv2.imwrite(out_img, im) cv2.imshow('yolo_image_detector', cv2.imread(out_img)) cv2.waitKey(0) cv2.destroyAllWindows() net = dn.load_net(str.encode("cfg/yolov3.cfg"), str.encode("yolov3.weights"), 0) meta = dn.load_meta(str.encode("cfg/coco.data")) image = dn.nparray_to_image(cv2.imread(origin_img)) r = dn.detect(net, meta, image) #r = dn.detect(net, meta, "data/dog.jpg") print r showPicResult(origin_img)
cv2.imshow('yolo_image_detector', image) #cv2.waitKey(0) #cv2.destroyAllWindows() net = dn.load_net(str.encode("cfg/yolov3.cfg"), str.encode("yolov3.weights"), 0) meta = dn.load_meta(str.encode("cfg/coco.data")) #print rq cap = cv2.VideoCapture(video_path) while (cap.isOpened()): ret, frame = cap.read() #cv2.imshow("lalala",frame) start = time.time() image = dn.nparray_to_image(frame) r = dn.detect(net, meta, image) showPicResult(frame) end = time.time() print r #fps = cap.get(cv2.CAP_PROP_FPS) seconds = end - start fps = 1 / seconds print "fps: ", fps for i in range(len(r)): if 'person' in r[i]: print "ALERT!" if cv2.waitKey(1) & 0xFF == ord('q'): break #cv2.release()
def __call__(self, img, threshold): im = dn.nparray_to_image(img) print("Created np image") dets = dn.detect(self.net, self.meta, im, threshold) return self._fromYoloFormat(dets)
meta = darknet.load_meta( b"C:/Users/user/anaconda3/Lib/site-packages/darknet/data/1117/obj.data") cap = cv2.VideoCapture( "C:/Users/user/anaconda3/Lib/site-packages/darknet/data/1117/suwon_test.mp4" ) print(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) while (cap.isOpened()): ret, image = cap.read() image = cv2.resize(image, dsize=(480, 640), interpolation=cv2.INTER_AREA) print(image.shape) if not ret: break frame = darknet.nparray_to_image(image) r = darknet.detect_image(net, meta, frame) boxes = [] for k in range(len(r)): width = r[k][2][2] height = r[k][2][3] center_x = r[k][2][0] center_y = r[k][2][1] bottomLeft_x = center_x - (width / 2) bottomLeft_y = center_y - (height / 2) x, y, w, h = bottomLeft_x, bottomLeft_y, width, height boxes.append((x, y, w, h)) for k in range(len(boxes)):