def findObject(): f = open('/home/ubuntu/object_find.txt', 'r') object_name = f.readlines() print('object_name[0]: ' + object_name[0]) f.close() #tmp_file = request.files['abc'] #tmp_file.save('/home/ubuntu/tmp.jpg') get_frame('extra/yolo.jpg', False) result = detect_image('/home/ubuntu/IOT_WEB_SERVER/static/extra/yolo.jpg') print("result : " + str(result)) str_result = '' for i in result: str_result = str_result + translateEtoK.get(i['class'], '') + ' ' print(str_result) if object_name[0] in str_result: print("물건을 찾은 경우입니다 ###########") str_result = "전방에 " + str(object_name[0]) + " 찾았습니다" else: print("물건을 찾지 못한 경우입니다 ################") print(str(object_name[0]) + " " + str_result) str_result = "0" # 찾는 물품이 없을경우 0을 전달함으로써 아틱이 식별할 수 있게끔 해줌 return Response(str_result, status=200, mimetype='text/plain')
def detect_img(yolo, length, image, path): for i in range(length): try: img = Image.open(path[i]) except: print('Open Error! Try again!') continue else: r_image = yolo.detect_image(img, image[i]) # cv2.imwrite("images/"+image[i], np.asarray(r_image)[..., ::-1]) yolo.yolo.close_session()
def detect_img(yolo, images_path): for image_path in images_path: try: image = Image.open(image_path) except: print('Open Error! Try again!') else: r_image = yolo.detect_image(image) result = np.asarray(r_image) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) cv2.waitKey(0) cv2.destroyAllWindows()
def image(): get_frame('extra/yolo.jpg', False) result = detect_image('/home/ubuntu/IOT_WEB_SERVER/static/extra/yolo.jpg') if not result: str_result = '인식된 주요 물체가 없습니다' else: print("result" + str(result)) str_result = '' for i in result: str_result = str_result + translateEtoK.get(i['class'], '') + ' ' print(str_result) str_result = removeDuplicates(str_result) str_result = str_result + '있습니다' print(str_result) return Response(str_result, status=200, mimetype='text/plain')
def detect_img(yolo, images_path): accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() stop_flag = False while True: for image_path in images_path: try: image = Image.open(image_path) except: print('Open Error! Try again!') else: image = yolo.detect_image(image) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", result) k = cv2.waitKey(33) if k == 27: # Esc key to stop stop_flag = True break elif k == -1: # normally -1 returned,so don't print it continue else: print(k) # else print its value if (stop_flag is True): break cv2.destroyAllWindows()
def main(yolo): start = time.time() #Definition of the parameters max_cosine_distance = 0.5 #余弦距离的控制阈值 nn_budget = None nms_max_overlap = 0.3 #非极大抑制的阈值 counter = [] #deep_sort # model_filename = 'model_data/market1501.pb' model_filename = 'model_data/mars-small128.pb' encoder = gdet.create_box_encoder(model_filename, batch_size=1) metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget) tracker = Tracker(metric) video_capture = cv2.VideoCapture(args["input"]) fps = 0.0 while True: ret, frame = video_capture.read() # frame shape 640*480*3 if ret != True: break t1 = time.time() boxs, class_names = yolo.detect_image(frame) features = encoder(frame, boxs) detections = [ Detection(bbox, 1.0, feature) for bbox, feature in zip(boxs, features) ] # Run non-maxima suppression. # boxes = np.array([d.tlwh for d in detections]) # scores = np.array([d.confidence for d in detections]) # indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores) # detections = [detections[i] for i in indices] # detections = detections[:] # Call the tracker tracker.predict() tracker.update(detections) i = int(0) indexIDs = [] # for det in detections: # bbox = det.to_tlbr() # cv2.rectangle(frame,(int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,255,255), 2) # print(tracker.tracks) for track in tracker.tracks: if not track.is_confirmed() or track.time_since_update > 1: continue #boxes.append([track[0], track[1], track[2], track[3]]) indexIDs.append(int(track.track_id)) counter.append(int(track.track_id)) bbox = track.to_tlbr() color = [int(c) for c in COLORS[indexIDs[i] % len(COLORS)]] cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (color), 3) cv2.putText(frame, str(track.track_id), (int(bbox[0]), int(bbox[1] - 50)), 0, 5e-3 * 150, (color), 2) if len(class_names) > 0: class_name = class_names[0] cv2.putText(frame, str(class_names[0]), (int(bbox[0]), int(bbox[1] - 20)), 0, 5e-3 * 150, (color), 2) i += 1 #bbox_center_point(x,y) center = (int( ((bbox[0]) + (bbox[2])) / 2), int(((bbox[1]) + (bbox[3])) / 2)) #track_id[center] pts[track.track_id].append(center) thickness = 5 #center point cv2.circle(frame, (center), 1, color, thickness) #draw motion path for j in range(1, len(pts[track.track_id])): if pts[track.track_id][j - 1] is None or pts[ track.track_id][j] is None: continue thickness = int(np.sqrt(64 / float(j + 1)) * 2) cv2.line(frame, (pts[track.track_id][j - 1]), (pts[track.track_id][j]), (color), thickness) #cv2.putText(frame, str(class_names[j]),(int(bbox[0]), int(bbox[1] -20)),0, 5e-3 * 150, (255,255,255),2) count = len(set(counter)) fps = (fps + (1. / (time.time() - t1))) / 2 cv2.putText(frame, "Total Object Counter: " + str(count), (int(20), int(120)), 0, 5e-3 * 200, (0, 255, 0), 2) cv2.putText(frame, "Current Object Counter: " + str(i), (int(20), int(80)), 0, 5e-3 * 200, (0, 255, 0), 2) cv2.putText(frame, "FPS: %f" % (fps), (int(20), int(40)), 0, 5e-3 * 200, (0, 255, 0), 3) cv2.namedWindow("YOLOv4_Deep_SORT", 0) cv2.resizeWindow('YOLOv4_Deep_SORT', 1024, 768) cv2.imshow('YOLOv4_Deep_SORT', frame) # Press Q to stop! if cv2.waitKey(1) & 0xFF == ord('q'): break print(" ") print("[Finish]") end = time.time() if len(pts[track.track_id]) != None: print(args["input"][43:57] + ": " + str(count) + " " + str(class_name) + ' Found') else: print("[No Found]") video_capture.release() cv2.destroyAllWindows()
#!/usr/bin/env python # -*- coding:utf-8 -*- # author : kly time:2019/4/14 import yolo import os from PIL import Image import glob if __name__ == '__main__': yolo = yolo.YOLO() path = "D:\work//new\Test\INA-T//*.jpg" outdir = "D:\work//new\Test\out" for jpgfile in glob.glob(path): img = Image.open(jpgfile) num = jpgfile[-10:-4] print(num) img = yolo.detect_image(img, num) #img.save(os.path.join(outdir, os.path.basename(jpgfile))) print('ok') yolo.close_session()
if top - label_size[1] >= 0: text_origin = np.array([left, top - label_size[1]]) else: text_origin = np.array([left, top + 1]) # My kingdom for a good redistributable image drawing library. for i in range(thickness): draw.rectangle([left + i, top + i, right - i, bottom - i], outline=self.colors[c]) draw.rectangle( [tuple(text_origin), tuple(text_origin + label_size)], fill=self.colors[c]) draw.text(text_origin, label, fill=(0, 0, 0), font=font) del draw end = timer() print(end - start) return image def close_session(self): self.sess.close() if __name__ == "__main__": path = "d:/pics/test.jpg" fra = cv2.imread(path) img = Image.fromarray(fra) image = yolo.detect_image(img) cv2.imwrite("d:/pics/result.jpg", image)
import yolo from PIL import Image if __name__ == '__main__': image_path = "./1.jpg" image = Image.open(image_path) yolo = yolo.YOLO() r_image = yolo.detect_image(image=image) r_image.show()