def detection(): if request.method == 'POST': image_file = request.form['image_file'] if image_file: filename = image_file try: detect_people(filename) except Exception: pass return render_template('./result.html', image_file=filename)
def _pred_class(image, net, ln, idx, color=(0, 0, 255)): # 検出実行 results = detect_people(image, net, ln, personIdx=idx) # 結果をループ for (i, (prob, bbox, centroid)) in enumerate(results): # バウンディングボックスとセントロイド座標を抽出し、アノテーションの色を初期化 (startX, startY, endX, endY) = bbox # (cX, cY) = centroid # 外接箱を描く cv2.rectangle(image, (startX, startY), (endX, endY), color, 2) text = f'{coco_label_name}: {round(prob*100, 1)}%' y = startY - 10 if startY - 10 > 10 else startY + 10 cv2.putText(image, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2) return image
def _pred_frame(frame, idx): # 検出 results = detect_people(frame, net, ln, personIdx=idx) # 最小のsocial distanceに違反するインデックスのセットを初期化するe violate = set() # 少なくとも2人の検出があることを確認します(ペアワイズ距離マップを計算するために必要です) if len(results) >= 2: # 結果からすべてのセントロイドを抽出し、すべてのペアのセントロイド間のユークリッド距離を計算する centroids = np.array([r[2] for r in results]) D = dist.cdist(centroids, centroids, metric="euclidean") # 距離行列の上三角形をループ for i in range(0, D.shape[0]): for j in range(i + 1, D.shape[1]): # 2つのセントロイドのペア間の距離が設定されたピクセル数よりも小さいかどうかを確認 if D[i, j] < config.MIN_DISTANCE: # 違反セットをセントロイドペアのインデックスで更新 violate.add(i) violate.add(j) # 結果をループ for (i, (prob, bbox, centroid)) in enumerate(results): # バウンディングボックスとセントロイド座標を抽出し、アノテーションの色を初期化 (startX, startY, endX, endY) = bbox (cX, cY) = centroid color = (0, 255, 0) # インデックスペアが違反集合内に存在する場合, その色を更新 if i in violate: color = (0, 0, 255) # (1)人物の周りに外接箱を描き、(2)人物のセントロイド座標を描く cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2) cv2.circle(frame, (cX, cY), 5, color, 1) # 出力フレーム上に社会的距離の違反の総数を描画 text = "Social Distancing Violations: {}".format(len(violate)) cv2.putText(frame, text, (10, frame.shape[0] - 25), cv2.FONT_HERSHEY_SIMPLEX, 0.85, (0, 0, 255), 3) return frame
else: vs = cv2.VideoCapture(0) writer = None image_placeholder = st.empty() while True: (grabbed, frame) = vs.read() if not grabbed: break frame = imutils.resize(frame, width=700) results = detect_people(frame, net, ln, personIdx=LABELS.index("person")) violate = set() if len(results) >= 2: centroids = np.array([r[2] for r in results]) D = dist.cdist(centroids, centroids, metric="euclidean") for i in range(0, D.shape[0]): for j in range(i + 1, D.shape[1]): if D[i, j] < MIN_DISTANCE: violate.add(i)
last_layer = model.getLayerNames() last_layer = [last_layer[i[0] - 1] for i in model.getUnconnectedOutLayers()] print("INPUT LOADED") cap = cv2.VideoCapture(args["input"] if args["input"] != "" else 0) writer = None while True: (access, frame) = cap.read() if not access: break count = 0 frame = imutils.resize(frame, width=900) results = detect_people(frame, model, last_layer, Labels) a = set() if len(results) >= 2: centroids = np.array([r[3] for r in results]) for i in range(0, len(centroids)): for j in range(i + 1, len(centroids)): D = np.linalg.norm(centroids[i] - centroids[j]) if D < 125: a.add(i) a.add(j) for (i, (prob, bounding_box, classes, centroid)) in enumerate(results): if i in a: X_start, Y_start, X_end, Y_end = bounding_box cv2.rectangle(frame, (X_start, Y_start), (X_end, Y_end), (0, 0, 255), 1)
def main(filename=""): labelsPath = os.path.sep.join( [os.getcwd().replace("\\", "/") + "/yolo-coco/coco.names"]) LABELS = open(labelsPath).read().strip().split("\n") weightsPath = os.path.sep.join( [os.getcwd().replace("\\", "/") + "/yolo-coco/yolov3.weights"]) configPath = os.path.sep.join( [os.getcwd().replace("\\", "/") + "/yolo-coco/yolov3.cfg"]) print("[INFO] loading YOLO from disk...") net = cv2.dnn.readNetFromDarknet(configPath, weightsPath) if config.USE_GPU: print("[INFO] setting preferable backend and target to CUDA...") net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA) net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA) ln = net.getLayerNames() ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()] print("[INFO] accessing video stream...") vs = cv2.VideoCapture(filename.replace("\\", "/") if filename != "" else 0) writer = None while True: (grabbed, frame) = vs.read() if not grabbed: break frame = imutils.resize(frame, width=700) results = detect_people(frame, net, ln, personIdx=LABELS.index("person")) violate = set() if len(results) >= 2: centroids = np.array([r[2] for r in results]) D = dist.cdist(centroids, centroids, metric="euclidean") for i in range(0, D.shape[0]): for j in range(i + 1, D.shape[1]): if D[i, j] < config.MIN_DISTANCE: violate.add(i) violate.add(j) for (i, (prob, bbox, centroid)) in enumerate(results): (startX, startY, endX, endY) = bbox (cX, cY) = centroid color = (0, 255, 0) if i in violate: color = (0, 0, 255) cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2) cv2.circle(frame, (cX, cY), 5, color, 1) text = "Social Distancing Violations: {}".format(len(violate)) cv2.putText(frame, text, (10, frame.shape[0] - 25), cv2.FONT_HERSHEY_SIMPLEX, 0.85, (0, 0, 255), 3) cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF if key == ord("q"): break