def handle_frames(frame): detection_results = api.get_person_bbox(frame, thr=0.6) bounding_boxs = [] for bbox in detection_results: logger.info('coordinates: {} {}. '.format(bbox[0], bbox[1])) x1 = int(bbox[0][0]) y1 = int(bbox[0][1]) x2 = int(bbox[1][0]) y2 = int(bbox[1][1]) person = frame[y1:y2, x1:x2, :] identify_name, score = compare.run(person, origin_f, origin_name) if (identify_name in ["MJ2", "MJ3", "MJ4"]): identify_name = "MJ" elif (identify_name in ["QY1", "QY2"]): identify_name = "QY" print("identify name:{}, score:{}".format(identify_name, round(1 - score, 2))) bounding_boxs.append([(x1, y1), (x2, y2), identify_name + ' ' + str(round(1 - score, 2))]) #img = cam_detection.draw_rectangle(img, (x1,y1,x2,y2), identify_name+' '+str(round((1-score), 2))) for obj in bounding_boxs: print(obj) cv2.putText(frame, obj[2], (obj[0][0], obj[0][1] - 5), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 2) frame = cv2.rectangle(frame, obj[0], obj[1], (0, 255, 0), 2) return frame
def detect_person(frame): detection_results = api.get_person_bbox(frame, thr=0.5) #persons = [] #for bbox in detection_results: # x1 = int(bbox[0][0]) # y1 = int(bbox[0][1]) # x2 = int(bbox[1][0]) # y2 = int(bbox[1][1]) #person = frame[y1:y2, x1:x2, :] #persons.append(person) #return detection_results, persons return detection_results, frame
def handle_frames(frame): detection_results = api.get_person_bbox(frame, thr=0.6) if len(detection_results) == 2: cmd_1 = " curl -v -d 'nodeName=edge' 'http://192.168.1.105:5001/listen' " res = subprocess.Popen(cmd_1, shell=True) #res.terminate() #try: # sys.exit(0) #except: # print('die') #finally: # print('cleanup') sys.exit(0) #break bounding_boxs = [] for bbox in detection_results: logger.info('coordinates: {} {}. '. format(bbox[0], bbox[1])) x1 = int(bbox[0][0]) y1 = int(bbox[0][1]) x2 = int(bbox[1][0]) y2 = int(bbox[1][1]) person = frame[y1:y2, x1:x2, :] identify_name, score = compare.run(person, origin_f, origin_name) if(identify_name in [ "MJ1", "MJ2", "MJ3", "MJ4", "MJ5"]): identify_name = "Person_1" elif(identify_name in ["QY1", "QY2", "QY3", "QY4", "QY5"]): identify_name = "Person_2" print("identify name:{}, score:{}".format(identify_name, round(1-score, 2))) bounding_boxs.append([(x1,y1), (x2,y2), identify_name+' '+str(round(1-score, 2))]) #img = cam_detection.draw_rectangle(img, (x1,y1,x2,y2), identify_name+' '+str(round((1-score), 2))) for obj in bounding_boxs: print(obj) cv2.putText(frame, obj[2], (obj[0][0], obj[0][1] - 5), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 2) frame = cv2.rectangle(frame, obj[0], obj[1], (0, 255, 0), 2) return frame
def handle_frames(frame): global tracker_list global max_age global min_hits global track_id_list detection_results = api.get_person_bbox(frame, thr=0.5) x_box =[] if len(tracker_list) > 0: for trk in tracker_list: x_box.append([(trk.box[0],trk.box[1]),(trk.box[2],trk.box[3])]) #should be changed into the right format instead of the .box format matched, unmatched_dets, unmatched_trks = assign_detections_to_trackers(x_box, detection_results, iou_thrd = 0.2) # Deal with matched detections if matched.size >0: for trk_idx, det_idx in matched: z = detection_results[det_idx] z = np.expand_dims([n for a in z for n in a], axis=0).T tmp_trk= tracker_list[trk_idx] tmp_trk.kalman_filter(z) xx = tmp_trk.x_state.T[0].tolist() xx =[xx[0], xx[2], xx[4], xx[6]] x_box[trk_idx] = xx tmp_trk.box =xx tmp_trk.hits += 1 tmp_trk.no_losses = 0 # Deal with unmatched detections if len(unmatched_dets)>0: for idx in unmatched_dets: z = detection_results[idx] x1 = int(z[0][0]) y1 = int(z[0][1]) x2 = int(z[1][0]) y2 = int(z[1][1]) person = frame[y1:y2, x1:x2, :] identify_name, score = compare.run(person, origin_f, origin_name) if(identify_name in [ "MJ1", "MJ2", "MJ3", "MJ4", "MJ5"]): identify_name = "Person_1" elif(identify_name in ["QY1", "QY2", "QY3", "QY4", "QY5"]): identify_name = "Person_2" print("identify name:{}, score:{}".format(identify_name, round(1-score, 2))) #generate a new tracker for the person z = np.expand_dims([n for a in z for n in a], axis=0).T tmp_trk = Tracker() # Create a new tracker x = np.array([[z[0], 0, z[1], 0, z[2], 0, z[3], 0]]).T tmp_trk.x_state = x tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx =[xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx tmp_trk.id = track_id_list.popleft() # assign an ID for the tracker tmp_trk.personReID_info['personID'] = identify_name #assign the reidentified personID for the tracker tracker_list.append(tmp_trk) x_box.append(xx) # Deal with unmatched tracks if len(unmatched_trks)>0: for trk_idx in unmatched_trks: tmp_trk = tracker_list[trk_idx] tmp_trk.no_losses += 1 tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx =[xx[0], xx[2], xx[4], xx[6]] tmp_trk.box =xx x_box[trk_idx] = xx # The list of tracks to be annotated and draw the figure good_tracker_list =[] for trk in tracker_list: if ((trk.hits >= min_hits) and (trk.no_losses <=max_age)): good_tracker_list.append(trk) x_cv2 = trk.box trackerID_str="Unknown Person:"+str(trk.id) if trk.personReID_info['personID'] == "Unknown": frame= draw_box_label(frame, x_cv2,personReID_info={'personID':trackerID_str}) # Draw the bounding boxes for unknown person else: frame= draw_box_label(frame, x_cv2,personReID_info=trk.personReID_info) # Draw the bounding boxes for re-identified person #book keeping deleted_tracks = filter(lambda x: x.no_losses > max_age, tracker_list) for trk in deleted_tracks: track_id_list.append(trk.id) tracker_list = [x for x in tracker_list if x.no_losses<=max_age] # #the original codes # for bbox in detection_results: # logger.info('coordinates: {} {}. '. # format(bbox[0], bbox[1])) # x1 = int(bbox[0][0]) # y1 = int(bbox[0][1]) # x2 = int(bbox[1][0]) # y2 = int(bbox[1][1]) # person = frame[y1:y2, x1:x2, :] # identify_name, score = compare.run(person, origin_f, origin_name) # if(identify_name in [ "MJ1", "MJ2", "MJ3", "MJ4", "MJ5"]): # identify_name = "Person_1" # elif(identify_name in ["QY1", "QY2", "QY3", "QY4", "QY5"]): # identify_name = "Person_2" # # print("identify name:{}, score:{}".format(identify_name, round(1-score, 2))) # # bounding_boxs.append([(x1,y1), (x2,y2), identify_name+' '+str(round(1-score, 2))]) # #img = cam_detection.draw_rectangle(img, (x1,y1,x2,y2), identify_name+' '+str(round((1-score), 2))) # for obj in bounding_boxs: # print(obj) # cv2.putText(frame, obj[2], (obj[0][0], obj[0][1] - 5), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 2) # frame = cv2.rectangle(frame, obj[0], obj[1], (0, 255, 0), 2) return frame
yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + bytearray(outputFrame) + b'\r\n') # if the `q` key was pressed, break from the loop #if key == ord("q"): # break @app.route('/video_feed') def video_feed(): #Video streaming route. Put this in the src attribute of an img tag return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame') @app.route('/') def index(): """Video streaming home page.""" return render_template('index.html') if __name__ == '__main__': img = cv2.imread('example.jpg') detection_test = api.get_person_bbox(img, thr=0.5) print(detection_test) img = handle_frames(img) #plt.imshow(img[:, :, ::-1]) print("show frame") #plt.show() app.run(host='0.0.0.0', port='5000') #gen_frames()
fps = 0 count = 0 total_count = 10 total_time = 0 size = (int(cap.get(3)), int(cap.get(4))) # Size Video dev = serial.Serial("COM17", baudrate=9600) result = cv2.VideoWriter('out.avi', cv2.VideoWriter_fourcc(*'MJPG'),10, size) count_people = 0 while(cap.isOpened()): start = time.time() berhasil, img = cap.read() # img = imutils.resize(img, width=min(400, img.shape[1])) if berhasil: bbox_list = api.get_person_bbox(img, thr=0.6) # print(bbox_list) last_count = count_people count_people = 0 for i in bbox_list: cv2.rectangle(img, i[0], i[1], (125, 255, 51), thickness=2) count_people+=1 #detect People if (count_people > 0): dev.write(b'1') else : dev.write(b'0') text_fps = "FPS =" + str((int)(fps))
def handle_frames(frame): global tracker_list global max_age global min_hits global track_id_list #connect to database conn = sqlite3.connect('handwash.db', isolation_level=None) #print("Opened database successfully") cur = conn.cursor() #detect hand try: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) except: print("Error converting to RGB") #print(type(frame)) boxes, scores = detector_utils.detect_objects(frame,detection_graph,sess) # draw bounding boxes on frame hand_in_sink, hand_in_patient = detector_utils.draw_box_on_image_washhand( \ num_hands_detect, score_thresh, scores, boxes, im_width, \ im_height, frame, sink_loc, patient_loc) try: frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) except: print("Error converting to BGR") #detect person detection_results = api.get_person_bbox(frame, thr=0.50) x_box =[] if len(tracker_list) > 0: for trk in tracker_list: x_box.append([(trk.box[0],trk.box[1]),(trk.box[2],trk.box[3])]) #should be changed into the right format instead of the .box format matched, unmatched_dets, unmatched_trks = assign_detections_to_trackers(x_box, detection_results, iou_thrd = 0.2) # Deal with matched detections if matched.size >0: for trk_idx, det_idx in matched: z = detection_results[det_idx] z = np.expand_dims([n for a in z for n in a], axis=0).T tmp_trk= tracker_list[trk_idx] tmp_trk.kalman_filter(z) xx = tmp_trk.x_state.T[0].tolist() xx =[xx[0], xx[2], xx[4], xx[6]] x_box[trk_idx] = xx tmp_trk.box =xx tmp_trk.hits += 1 tmp_trk.no_losses = 0 # Deal with unmatched detections if len(unmatched_dets)>0: for idx in unmatched_dets: z = detection_results[idx] x1 = int(z[0][0]) y1 = int(z[0][1]) x2 = int(z[1][0]) y2 = int(z[1][1]) person = frame[y1:y2, x1:x2, :] identify_name, score = compare.run(person, origin_f, origin_name) if(identify_name in [ "QY1", "QY2", "QY3", "QY4", "QY5", "QY6"]): identify_name = "Doctor" elif(identify_name in ["YN1", "YN2", "YN3", "YN4", "YN5", "YN6"]): identify_name = "Nurse" print("identify name:{}, score:{}".format(identify_name, round(1-score, 2))) #generate a new tracker for the person z = np.expand_dims([n for a in z for n in a], axis=0).T tmp_trk = Tracker() # Create a new tracker x = np.array([[z[0], 0, z[1], 0, z[2], 0, z[3], 0]]).T tmp_trk.x_state = x tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx =[xx[0], xx[2], xx[4], xx[6]] tmp_trk.box = xx tmp_trk.id = track_id_list.popleft() # assign an ID for the tracker tmp_trk.personReID_info['personID'] = identify_name #assign the reidentified personID for the tracker #assign the tracker attribute to new tracker when loose tracking a person but re_id him if len(unmatched_trks)>0: for trk_idx in unmatched_trks: trk_old = tracker_list[trk_idx] if trk_old.personReID_info['personID'] == identify_name: tmp_trk.have_washed_hand = trk_old.have_washed_hand tmp_trk.hand_clean = trk_old.hand_clean tmp_trk.have_touched_pat = trk_old.have_touched_pat tmp_trk.violate_rule = trk_old.violate_rule tracker_list.append(tmp_trk) x_box.append(xx) # Deal with unmatched tracks if len(unmatched_trks)>0: for trk_idx in unmatched_trks: tmp_trk = tracker_list[trk_idx] tmp_trk.no_losses += 1 tmp_trk.predict_only() xx = tmp_trk.x_state xx = xx.T[0].tolist() xx =[xx[0], xx[2], xx[4], xx[6]] tmp_trk.box =xx x_box[trk_idx] = xx # The list of tracks to be annotated and draw the figure good_tracker_list =[] for trk in tracker_list: if ((trk.hits >= min_hits) and (trk.no_losses <=max_age)): good_tracker_list.append(trk) x_cv2 = trk.box trackerID_str="Unknown Person:"+str(trk.id) if trk.personReID_info['personID'] == "Unknown": trk.personReID_info['personID'] = "Unknown Person:"+str(trk.id) # Change the personID for unknown person frame= draw_box_label(frame, x_cv2, personReID_info=trk.personReID_info) # Draw the bounding boxes for person #book keeping deleted_tracks = filter(lambda x: x.no_losses > max_age, tracker_list) #judge whether the person has washed hand before leaving and add the deleted tracker into the tracke_id_list for trk in deleted_tracks: print(trk.box, trk.hits) if (trk.box[2] >= 640 or trk.box[1]<0) and (trk.hits >= 10): if trk.have_touched_pat and (not trk.hand_clean): if trk.violate_rule == 2: trk.violate_rule = 3 else: trk.violate_rule = 1 person_tracker_info = "ctime {}, person_ID {}, sub_ID {}".format(int(time.time()), trk.personReID_info['personID'], str(trk.id)) alarm = " washed_hand {},touched_patient {},violate_rule {}".format(str(trk.have_washed_hand),str(trk.have_touched_pat),str(trk.violate_rule)) print(trk.personReID_info['personID']+":"+person_tracker_info+alarm) info = "insert into HANDEMO (PERSON, CTIME, HLOC, PLOC, HAND, PATIENT, JUDGE) \ values ('{}', {}, '{}', '{}', {}, {}, {})".format(trk.personReID_info['personID'], int(time.time()), '', '', \ int(trk.have_washed_hand), int(trk.have_touched_pat), int(trk.violate_rule)) cur.execute(info) if trk.violate_rule == 1 or trk.violate_rule == 3: cmd = "play After.wav" subprocess.Popen(cmd, shell=True) if trk.violate_rule == 2: cmd = "play Before.wav" subprocess.Popen(cmd, shell=True) #if trk.violate_rule != 0: # cmd1 = "play Beep.wav" # subprocess.Popen(cmd1, shell=True) track_id_list.append(trk.id) tracker_list = [x for x in tracker_list if x.no_losses<=max_age] #judge whether this guy has washed has hands #for all detected hand in sink if len(hand_in_sink): for w_h_box in hand_in_sink: for trk in good_tracker_list: if wash_hand_detector(trk,w_h_box): person_tracker_info = "ctime {}, person_ID {}, sub_ID {}".format(int(time.time()), trk.personReID_info['personID'] , str(trk.id)) location_info = " hand_location {}, person_location {}".format(str(w_h_box), str(trk.box)) alarm = " washed_hand {},touched_patient {},violate_rule {}".format(str(trk.have_washed_hand),str(trk.have_touched_pat),str(trk.violate_rule)) #alarm = "washed_hand {},touched_patient {},hand_clean {}".format(str(trk.have_washed_hand),str(trk.have_touched_pat),str(trk.hand_clean)) cv2.putText(frame,alarm, (w_h_box[0][0],w_h_box[0][1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255,191,0), 1, cv2.LINE_AA) print(trk.personReID_info['personID']+":"+person_tracker_info+location_info+alarm) info = "insert into HANDEMO (PERSON, CTIME, HLOC, PLOC, HAND, PATIENT, JUDGE) \ values ('{}', {}, '{}', '{}', {}, {}, {})".format(trk.personReID_info['personID'], int(time.time()), str(w_h_box), str(trk.box), \ int(trk.have_washed_hand), int(trk.have_touched_pat), int(trk.violate_rule)) cur.execute(info) if trk.violate_rule == 1 or trk.violate_rule == 3: cmd = "play After.wav" subprocess.Popen(cmd, shell=True) if trk.violate_rule == 2: cmd = "play Before.wav" subprocess.Popen(cmd, shell=True) #if trk.violate_rule != 0: # cmd1 = "play Beep.wav" # subprocess.Popen(cmd1, shell=True) #for all detected hand in patient if len(hand_in_patient): for t_p_box in hand_in_patient: for trk in good_tracker_list: if touch_patient_detector(trk,t_p_box): person_tracker_info = "ctime {}, person_ID {}, sub_ID {}".format(int(time.time()), trk.personReID_info['personID'], str(trk.id)) location_info = " hand_location {}, person_location {}".format(str(t_p_box), str(trk.box)) alarm = " washed_hand {},touched_patient {},violate_rule {}".format(str(trk.have_washed_hand),str(trk.have_touched_pat),str(trk.violate_rule)) #alarm = "washed_hand {},touched_patient {},hand_clean {}".format(str(trk.have_washed_hand),str(trk.have_touched_pat),str(trk.hand_clean)) cv2.putText(frame,alarm, (t_p_box[0][0],t_p_box[0][1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0,255,255), 1, cv2.LINE_AA) print(trk.personReID_info['personID']+":"+person_tracker_info+location_info+alarm) info = "insert into HANDEMO (PERSON, CTIME, HLOC, PLOC, HAND, PATIENT, JUDGE) \ values ('{}', {}, '{}', '{}', {}, {}, {})".format(trk.personReID_info['personID'], int(time.time()), str(t_p_box), str(trk.box), \ int(trk.have_washed_hand), int(trk.have_touched_pat), int(trk.violate_rule)) cur.execute(info) if trk.violate_rule == 1 or trk.violate_rule == 3: cmd = "play After.wav" subprocess.Popen(cmd, shell=True) if trk.violate_rule == 2: cmd = "play Before.wav" subprocess.Popen(cmd, shell=True) #if trk.violate_rule != 0: # cmd1 = "play Beep.wav" # subprocess.Popen(cmd1, shell=True) return frame
import cv2 from pedestrian_detection_ssdlite import api video = cv2.VideoCapture('pedestrian_video.mp4') while True: (read_successful, frame) = video.read() if read_successful is not True: break bbox_list = api.get_person_bbox(frame, thr=0.6) print(bbox_list) for i in bbox_list: cv2.rectangle(frame, i[0], i[1], (125, 255, 51), thickness=2) cv2.putText(frame, 'Person', (i[0][0], i[0][1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (36,255,12), thickness=2) cv2.imshow('Pedestrain detector ', frame) key = cv2.waitKey(1) if key==81 or key==113: break video.release()