def main(): video = '../video/video4.mp4' if not path.exists(video): raise FileNotFoundError('Video does not exist') cap = cv2.VideoCapture(video) detector = Detectors() tracker = Tracker(60, 30, 100, True) while True: try: # Покадрово зчитуємо відео ret, frame = cap.read() # Виявляємо центроїди на зображенні centers = detector.detect(frame) if len(centers) > 0: # Відстежуємо об'єкт з допомогою фільтру Калмана tracker.update(centers) cv2.waitKey(50) except cv2.error: break cap.release() cv2.destroyAllWindows()
def main(): # Create opencv video capture object cap = cv2.VideoCapture('project2.avi') # Create Object Detector detector = Detectors() # Create Object Tracker tracker = Tracker(40, 8, 5, 100) # Variables initialization track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (0, 255, 255), (255, 0, 255), (255, 127, 255), (127, 0, 255), (127, 0, 127)] out = cv2.VideoWriter('Tracking2_wait8.avi', cv2.VideoWriter_fourcc(*'DIVX'), 5, (200, 200)) while (True): ret, frame = cap.read() if ret == True: centers = detector.Detect(frame) # If centroids are detected then track them if (len(centers) >= 0): # Track object using Kalman Filter tracker.Update(centers) # For identified object tracks draw tracking line # Use various colors to indicate different track_id for i in range(len(tracker.tracks)): if (len(tracker.tracks[i].trace) > 1): for j in range(len(tracker.tracks[i].trace) - 1): # Draw trace line x1 = tracker.tracks[i].trace[j][0][0] y1 = tracker.tracks[i].trace[j][1][0] x2 = tracker.tracks[i].trace[j + 1][0][0] y2 = tracker.tracks[i].trace[j + 1][1][0] clr = tracker.tracks[i].track_id % 9 cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)), track_colors[clr], 2) # Display the resulting tracking frame cv2.imshow('Tracking', frame) cv2.waitKey(100) out.write(frame) else: break cap.release() cv2.destroyAllWindows() out.release()
def run(config): seconds = 0 phase_controller = Phase_controller(STEP_SIZE, GREEN_LIGHT_DUR, YELLOW_LIGHT_DUR) detectors = Detectors(phase_controller) traffic = Traffic_generator(STEP_SIZE) light_decision_system = config.factory(config.out_dir, detectors) #generators average_waiting_time = Average_waiting_time(config.out_dir) while seconds < SIMULATION_DURATION: # start = time.time() # while time.time() - start < TRAINING_DURATION: traci.simulationStep() traffic.next_step() phase_controller.simulation_step() seconds += STEP_SIZE detectors.update() if phase_controller.is_yellow() and phase_controller.is_end_of_yellow( ): # transition from yellow to green average_waiting_time.record(seconds) light_decision_system.next_step() current_phase = light_decision_system.get_predicted_phase() phase_controller.set_phase(current_phase) detectors.num_passed_light = 0 elif phase_controller.is_end_of_green(): current_phase = phase_controller.get_phase() current_phase += 1 phase_controller.set_phase(current_phase) traci.close() sys.stdout.flush()
def main(): video = 'video/video4.mp4' if not path.exists(video): raise FileNotFoundError('Video does not exist') cap = cv2.VideoCapture(video) detector = Detectors() tracker = Tracker(60, 30, 100, False) while True: try: ret, frame = cap.read() centers = detector.detect(frame) if len(centers) > 0: tracker.update(centers) cv2.waitKey(50) except cv2.error: break cap.release() cv2.destroyAllWindows()
def main(): """Main function for multi object tracking Usage: $ python2.7 objectTracking.py Pre-requisite: - Python2.7 - Numpy - SciPy - Opencv 3.0 for Python Args: None Return: None """ # Create opencv video capture object cap = cv2.VideoCapture('data/TrackingBugs.mp4') """ Resolution: 596 x 336 Frame rate: 30 fps """ # Create Object Detector detector = Detectors() # Create Object Tracker tracker = Tracker(30, 30, 10, 100) # Variables initialization skip_frame_count = 0 track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (0, 255, 255), (255, 0, 255), (255, 127, 255), (127, 0, 255), (127, 0, 127)] pause = False # Infinite loop to process video frames while (True): # Capture frame-by-frame ret, frame = cap.read() # Make copy of original frame orig_frame = copy.copy(frame) # Skip initial frames that display logo if (skip_frame_count < 15): skip_frame_count += 1 continue # Detect and return centeroids of the objects in the frame centers = detector.Detect(frame) # If centroids are detected then track them if (len(centers) > 0): # Track object using Kalman Filter tracker.Update(centers) # For identified object tracks draw tracking line # Use various colors to indicate different track_id for i in range(len(tracker.tracks)): position = tracker.tracks[i].position() error = tracker.tracks[i].position_error() cv2.circle(frame, (position[0], position[1]), error, (200, 200, 200), 2) if (len(tracker.tracks[i].trace) > 1): for j in range(len(tracker.tracks[i].trace) - 1): # Draw trace line x1 = tracker.tracks[i].trace[j][0][0] y1 = tracker.tracks[i].trace[j][1][0] x2 = tracker.tracks[i].trace[j + 1][0][0] y2 = tracker.tracks[i].trace[j + 1][1][0] clr = tracker.tracks[i].track_id % 9 cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)), track_colors[clr], 2) # Display the resulting tracking frame cv2.imshow('Tracking', frame) # Display the original frame cv2.imshow('Original', orig_frame) # Slower the FPS cv2.waitKey(1) # Check for key strokes k = cv2.waitKey(50) & 0xff if k == 27: # 'esc' key has been pressed, exit program. break if k == 112: # 'p' has been pressed. this will pause/resume the code. pause = not pause if (pause is True): print("Code is paused. Press 'p' to resume..") while (pause is True): # stay in this loop until key = cv2.waitKey(30) & 0xff if key == 112: pause = False print("Resume code..!!") break # When everything done, release the capture cap.release() cv2.destroyAllWindows()
def main(tiffStep): """Main function for multi object tracking """ #tiffStep = 512 # Create Object Detector detector = Detectors() # Create Object Tracker tracker = Tracker(200, 50, 25, 100) # Variables initialization pause = False track_colors = [random_color() for x in xrange(256)] # Infinite loop to process video frames stTmAv = time.time() outFName = imFolder + '_traced_0-' + str(tiffStep) + '.tiff' #memmap_image = tifffile.memmap(outFName, shape=(tiffStep, newx, newy, 3), dtype='uint8') imgs = np.zeros((tiffStep, newy, newx, 3), dtype=np.uint8) tTm = 0 stTm = time.time() for fr in xrange(len(flyContours[0])): # Capture frame-by-frame frame = getBgSubIm((flyContours[0][fr], flyContours[1])) frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR) outFrame = cv2.cvtColor(flyContours[0][fr], cv2.COLOR_GRAY2BGR) # Detect and return centeroids of the objects in the frame centers = detector.DetectCM(frame) # If centroids are detected then track them if (len(centers) > 0): # Track object using Kalman Filter tracker.Update(centers) # For identified object tracks draw tracking line # Use various colors to indicate different track_id for i in range(len(tracker.tracks)): if (len(tracker.tracks[i].trace) > 1): for j in range(len(tracker.tracks[i].trace) - 1): # Draw trace line x1 = tracker.tracks[i].trace[j][0][0] y1 = tracker.tracks[i].trace[j][1][0] x2 = tracker.tracks[i].trace[j + 1][0][0] y2 = tracker.tracks[i].trace[j + 1][1][0] clr = tracker.tracks[i].track_id cv2.line(outFrame, (int(x1), int(y1)), (int(x2), int(y2)), track_colors[clr], 2) cv2.circle(outFrame, (int(x2), int(y2)), 2, track_colors[clr], 2) #cv2.circle(outFrame, (int(x1), int(y1)), 2, (255,25,255), 2) cv2.circle(outFrame, (int(x2), int(y2)), 2, track_colors[clr], 1) ## Display the resulting tracking frame #cv2.imshow('Tracking', outFrame) #cv2.waitKey(1) # outFName = imFolder+'_traced/'+flist[fr].split('/')[-1] # cv2.imwrite(outFName, outFrame) img = cv2.resize(outFrame, (newx, newy)) imN = (fr % tiffStep) if (imN == 0 and fr > 0): outFName = imFolder + '_traced_' + str( fr - tiffStep) + '-' + str(fr) + '.tiff' startNT(imageio.mimwrite, (outFName, imgs)) imgs = np.zeros((tiffStep, newy, newx, 3), dtype=np.uint8) #memmap_image = tifffile.memmap(outFName, shape=(tiffStep, newx, newy, 3), dtype='uint8') #memmap_image[imN] = img tm = time.time() fps = (tiffStep / (tm - stTm)) tTm += tm - stTm print('FPS: %0.3f (frame# %d)' % (fps, fr)) stTm = tm #else: # #print fr, imN imgs[imN] = img imageio.mimwrite( imFolder + '_traced_' + str( (fr / tiffStep) * tiffStep) + '-' + str(fr) + '.tiff', imgs[:imN]) print('Tracking average FPS: %0.3f' % (float(fr) / (time.time() - stTmAv)) ) #(1.0/(tm-stTm))) cv2.destroyAllWindows()
def main(): """Main function for multi object tracking Usage: $ python3.6.8 objectTracking.py Pre-requisite: - Python3.6.8 - Numpy - SciPy - Opencv 4.1.0.25 for Python Args: None Return: None """ # Create opencv video capture object directory = '1' count_start = 40 ratio = 2 / 98 count_end = count_start + 800 cap = cv.VideoCapture("./video/data/"+directory+".avi") output_data = pd.DataFrame() //读写csv # Create Object Detector detector = Detectors(mode=0) //0-background subtraction using image 1-background online training # Create Object Tracker ang_mode = 1 tracker = Tracker(dist_thresh = 200, ang_thresh=120, max_frames_to_skip=5, max_trace_length=3, trackIdCount=0,ang_mode=ang_mode) # Variables initialization track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (0, 255, 255), (255, 0, 255), (235, 14, 192), (0, 69, 255), (0, 252, 100), (58, 53, 159), (240, 94, 28), (145, 173, 112), (0, 0, 0), (62, 129, 27), (129, 172, 93), (81, 125, 34), (104, 163, 233), (45, 128, 199), (0, 225, 34), (104, 0, 164), (45, 60, 0), (45, 0, 199), (176, 224, 230), (65, 105, 225), (106, 90, 205), (135, 206, 235), (56, 94, 15), (8, 46, 84), (127, 225, 222), (64, 24, 208), (127, 225, 0), (61, 145, 64), (0, 201, 87), (34, 139, 34), (124, 252, 0), (50, 205, 50), (189, 252, 201), (107, 142, 35), (48, 128, 20), (255, 153, 18), (235, 142, 85), (255, 227, 132), (218, 165, 205), (227, 168, 205), (255, 97, 0), (237, 145, 33)]*2 //pause = False count = 0 pd_index = 0 # Infinite loop to process video frames while (True): count = count + 1 # Capture frame-by-frame ret, frame = cap.read() # When everything done, release the capture if (frame is None): cap.release() cv.destroyAllWindows() # Make copy of original frame orig_frame = copy.copy(frame) # If centroids are detected then track them if ( count_start < count < count_end): # Detect and return centeroids of the objects in the frame detections, _, _, _, _ = detector.Detect(frame) # Track object using Kalman Filter tracker.Update(detections) # For identified object tracks draw tracking line # Use various colors to indicate different track_id font = cv.FONT_HERSHEY_SIMPLEX blank = np.zeros_like(frame) blank += 255 x_margin = blank.shape[1] y_margin = blank.shape[0] for i in range(len(tracker.tracks)): if (len(tracker.tracks[i].trace) > 1): for j in range(len(tracker.tracks[i].trace)-1): # Draw trace line x1 = tracker.tracks[i].trace[j][0][0] y1 = tracker.tracks[i].trace[j][1][0] x2 = tracker.tracks[i].trace[j+1][0][0] y2 = tracker.tracks[i].trace[j+1][1][0] #clr = tracker.tracks[i].track_id % len(track_colors) clr = i cv.line(frame, (int(x1), int(y1)), (int(x2), int(y2)), track_colors[clr], 2) x3 = tracker.tracks[i].trace[-1][0][0] y3 = tracker.tracks[i].trace[-1][1][0] x3_r = x3 y3_r = y3 if x3 < 50: x3 = 60 if y3 < 50: y3 = 50 if x3 > x_margin-250: x3 =x_margin-250 x3 = int(x3) y3 = int(y3) len_x = len(str(int(x3_r * ratio))) len_y = len(str(int(y3_r * ratio))) len_deg = len(str(int(tracker.tracks[i].angles[-1]))) str_x = str(x3_r * ratio)[:len_x + 2] str_y = str(y3_r * ratio)[:len_y + 2] str_deg = str(tracker.tracks[i].angles[-1])[:len_deg + 2] #clr = tracker.tracks[i].track_id % len(track_colors) clr = tracker.tracks[i].track_id if tracker.tracks[i].b_detected[-1]: frame = Draw_single(frame, tracker.tracks[i].trace_raw[-1], tracker.tracks[i].angles[-1], track_colors[clr], b_point=tracker.tracks[i].b_detected[-1],b_draw_ang = ang_mode) else: frame = Draw_single(frame, tracker.tracks[i].position, tracker.tracks[i].angles[-1], track_colors[clr], b_point=tracker.tracks[i].b_detected[-1],b_draw_ang = ang_mode) frame = cv.putText(frame, "Fish" + str(clr) + ":", (int(x3) - 60, int(y3) - 20), font, 0.7, (215, 0, 0), 2) if ang_mode == 1: frame = cv.putText(frame, "(" + str_x + "mm," + str_y + "mm," + str_deg + "deg)", (int(x3) + 20, int(y3) - 20), font, 0.6, (0, 165, 255), 2) else: frame = cv.putText(frame, "(" + str_x + "mm," + str_y + "mm)", (int(x3) + 20, int(y3) - 20), font, 0.6, (0, 165, 255), 2) blank = Draw_single(blank, tracker.tracks[i].trace[-1], tracker.tracks[i].angles[-1], track_colors[clr], dist=60, dist1=15, b_point=False) blank = cv.putText(blank, "Fish"+str(i)+":", (int(x3)-60, int(y3)-20), font, 0.7, (215, 0, 0), 2) blank = cv.putText(blank, "(" + str_x + "mm," + str_y + "mm," + str_deg + "deg)", (int(x3) + 20, int(y3) - 20), font, 0.6, (0, 165, 255), 2) # frame = cv.putText(frame, "Fish"+str(i), (int(x3)-30, int(y3)-20), font, 0.7, (215, 0, 0), 2) #write image # cv.imwrite("./output/new40/mask/" + str(count) + ".jpg", detector.fgmask) cv.imwrite("./output/"+directory+"/and/" + str(count) + ".jpg", detector.frame_and) cv.imwrite("./output/"+directory+"/illu/" + str(count) + ".jpg", blank) cv.imwrite("./output/"+directory+"/final/" + str(count) + ".jpg", frame) # cv.imwrite("./output/no text/" + str(count) + ".jpg", frame) frame = cv.putText(frame, str(count), (50, 50), font, 1, (0, 0, 255), 2) # Display the resulting tracking frame cv.namedWindow('Tracking',cv.WINDOW_NORMAL) cv.imshow('Tracking', frame) # cv.imshow('blank', blank) # cv.imshow('mask', detector.fgmask) # cv.imshow('and', detector.frame_and) cv.waitKey(1) #cv.imwrite("./output/" + "frame" + str(count) + ".jpg", frame) # write data for i in range(len(tracker.tracks)): trace = np.array(tracker.tracks[i].trace_save).reshape(-1, 2) trace_x = trace[-1, 0] trace_y = trace[-1, 1] trace_raw = np.array(tracker.tracks[i].trace_raw).reshape(-1, 2) trace_raw_x = trace_raw[-1, 0] trace_raw_y = trace_raw[-1, 1] angle = np.array(tracker.tracks[i].angles)[-1] bendangle = np.array(tracker.tracks[i].bendangles)[-1] state = np.array(tracker.tracks[i].states)[-1] b_detected = np.array(tracker.tracks[i].b_detected)[-1] pd_frame = len(tracker.tracks[i].angles) t_id = tracker.tracks[i].track_id fish_dict = {"Position_x": trace_x, "Position_y": trace_y, "Detection_x": trace_raw_x, "Detection_y": trace_raw_y, "Orientation": angle, "Bendangles":bendangle, "State": state, "b_detected": b_detected, "Frame": pd_frame, "ID": t_id} fish_data = pd.DataFrame(fish_dict,index=[pd_index]) pd_index += 1 output_data = pd.concat([output_data, fish_data]) output_data.to_csv("./output/"+directory+"/data.csv") elif count > count_end: break else: pass
for n in range(size): for m in range(size): data = getDataForOneD(dataDate, near_detectors[n, m], start_time, end_time) ax = fig.add_subplot(gs[n, m]) ax.set_xlim([start_time, end_time]) ax.set_ylim([723, 745]) if m != 0: ax.set_yticks([]) if n != size - 1: ax.set_xticks([]) ax.annotate(near_detectors[n, m], **anno_opts) ax.plot(data[0], data[1], lw=1, c='green') plt.show() if __name__ == '__main__': # For testing purposes only date = DataDate('130511', Detectors()) getDataForOneD(date, '1415', dt.datetime.strptime('10:00:00', '%H:%M:%S'), dt.datetime.strptime('13:00:00', '%H:%M:%S'), dt.datetime.strptime('12:20:00', '%H:%M:%S'))
def task1_1(mogthr, inputpath, dataset): # Create opencv video capture object path = inputpath + 'in%06d.jpg' cap = cv2.VideoCapture(path) # Create Object Detector detector = Detectors(thr=mogthr, dataset=dataset) # Create Object Tracker if dataset == 'highway': tracker = Tracker(200, 0, 60, 100) # Tracker(200, 0, 200, 100) elif dataset == 'traffic': tracker = Tracker(200, 0, 60, 100) # Tracker(50, 0, 90, 100) elif dataset == 'ownhighway': tracker = Tracker(45, 0, 60, 100) # Tracker(50, 0, 90, 100) # Variables initialization skip_frame_count = 0 track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (0, 255, 255), (255, 0, 255), (255, 127, 255), (127, 0, 255), (127, 0, 127)] pause = False if dataset == 'highway': pts1 = np.float32([[120, 100], [257, 100], [25, 200], [250, 200]]) elif dataset == 'traffic': pts1 = np.float32([[0, 50], [160, 15], [110, 190], [320, 110]]) elif dataset == 'ownhighway': pts1 = np.float32([[190, 100], [290, 100], [60, 200], [250, 200]]) pts2 = np.float32([[0, 0], [320, 0], [0, 240], [320, 240]]) M = cv2.getPerspectiveTransform(pts1, pts2) print M counter = 0 # Infinite loop to process video frames while True: counter += 1 # Capture frame-by-frame ret, frame = cap.read() # Stop when no frame if frame is None: break # Make copy of original frame orig_frame = copy.copy(frame) # Skip initial frames that display logo #if (skip_frame_count < 200): # skip_frame_count += 1 # continue # Detect and return centeroids of the objects in the frame centers, xd, yd, wd, hd = detector.Detect(frame, counter) #print xd vel = [] # If centroids are detected then track them if (len(centers) > 0): # Track object using Kalman Filter tracker.Update(centers, dataset) # For identified object tracks draw tracking line # Use various colors to indicate different track_id for i in range(len(tracker.tracks)): if (len(tracker.tracks[i].trace) > 1): vel = [] a=0 for j in range(5, len(tracker.tracks[i].trace) - 1): a=a+1 # Draw trace line x1 = tracker.tracks[i].trace[j][0][0] y1 = tracker.tracks[i].trace[j][1][0] x2 = tracker.tracks[i].trace[j + 1][0][0] y2 = tracker.tracks[i].trace[j + 1][1][0] clr = tracker.tracks[i].track_id % 9 cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)), track_colors[clr], 2) if dataset == 'highway': if y1 > 100 and y2 < 200: x1r, y1r, z1r = np.dot(M,[x1, y1, 1]) x2r, y2r, z2r = np.dot(M, [x2, y2, 1]) x1r, y1r = x1r/z1r, y1r/z1r x2r, y2r = x2r / z2r, y2r / z2r dist = np.float( np.sqrt(((int(x2r) - int(x1r)) ** 2) + ((int(y2r) - int(y1r)) ** 2))) * np.float( 30) / 20 * np.float(24) / 5 # euclidean distance between two points vel.append(dist) if dataset == 'ownhighway': if y1 > 100 and y2 < 200: x1r, y1r, z1r = np.dot(M,[x1, y1, 1]) x2r, y2r, z2r = np.dot(M, [x2, y2, 1]) x1r, y1r = x1r/z1r, y1r/z1r x2r, y2r = x2r / z2r, y2r / z2r dist = np.float( np.sqrt(((int(x2r) - int(x1r)) ** 2) + ((int(y2r) - int(y1r)) ** 2))) * np.float( 18) / 20 * np.float(24) / 5 # euclidean distance between two points vel.append(dist) if not vel == []: #if i==1: #print xd[i]#'value ' + xd[i] + ' value ' + yd[i]#+ ' frame '+frame+ ' vel ' +vel # if dataset == 'ownhighway': # #if i==0: # print counter,i, xd,np.mean(vel) # if counter>0: # a=0 # # # #if xd==[] # # if len(vel)<4: #and int(np.mean(vel))>100: # # cv2.putText(frame, ' vel ' + str(int(np.mean(vel))), (int(xd[a]), int(yd[a] - 4)), # # cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1, cv2.LINE_AA) # if len(vel)>3:# and int(np.mean(vel))>100: # cv2.putText(frame, ' vel ' + str(int(np.mean(vel[-3:-1]))), (int(xd[0]), int(yd[0])), # cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1, cv2.LINE_AA) # #cv2.putText(frame, ' vel ' + str(int(np.mean(vel))), (int(xd[0]), int(yd[0] - 4)), # # cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1, cv2.LINE_AA) # #print int(np.mean(vel)),i,j if dataset == 'ownhighway': #print i, xd if len(vel)<10: cv2.putText(frame, ' vel ' + str(int(np.mean(vel))), (int(xd[0]), int(yd[0] - 4)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1, cv2.LINE_AA) else: cv2.putText(frame, ' vel ' + str(int(np.mean(vel[-10:-1]))), (int(xd[0]), int(yd[0] - 4)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1, cv2.LINE_AA) if dataset == 'highway': #print i, xd if len(vel)<20: cv2.putText(frame, ' vel ' + str(int(np.mean(vel))), (int(xd[i]), int(yd[i] - 4)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1, cv2.LINE_AA) else: cv2.putText(frame, ' vel ' + str(int(np.mean(vel[-20:-1]))), (int(xd[i]), int(yd[i] - 4)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1, cv2.LINE_AA) #print int(np.mean(vel)), i, j # x1 = tracker.tracks[i].trace[-2][0][0] # y1 = tracker.tracks[i].trace[-2][1][0] # x2 = tracker.tracks[i].trace[-1][0][0] # y2 = tracker.tracks[i].trace[-1][1][0] # if dataset == 'highway': # if y1 > 100 and y2 < 200: # x1r, y1r, z1r = np.dot(M,[x1, y1, 1]) # x2r, y2r, z2r = np.dot(M, [x2, y2, 1]) # x1r, y1r = x1r/z1r, y1r/z1r # x2r, y2r = x2r / z2r, y2r / z2r # # # # # dist = np.float(np.sqrt(((int(x2r) - int(x1r))**2) + ((int(y2r) - int(y1r))**2))) * np.float(30)/20 * np.float(24)/5#euclidean distance between two points # vel.append(dist) # cv2.putText(frame, ' vel '+str(int(dist)), (int(xd[i]), int(yd[i]-4)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,255,0),1,cv2.LINE_AA) #print (x1, x2, y1, y2, dist, i) # x1r,y1r = M * [x1,y1,1] #(x,y,1) = M * (xold,yold,1) # x2r, y2r = M * [x2,y2,1] # # vel.append[j] = int(np.sqrt(((int(x2r) - int(x1r)) ** 2) + ( # (int(y2r) - int(y1r)) ** 2))) * int(3/50) * int(24/5) # * (m/pixel) * (frame/sec) # euclidean distance between two points # # if len(vel[j] > 10): # return#velocity = np.mean(vel[j](i:i-10)) # * (m/pixel) * (frame/sec) #print 'car '+ str(i) +' velocity '+ str(dist) #(x pixels every frame) -> * (m/pixel) * (frame/sec) = (m/sec) # Display homography dst = cv2.warpPerspective(frame, M, (320, 240)) cv2.imshow('Homography', dst) cv2.imwrite('../week5/results/hom' + str(counter) + '.png', dst) # Display the resulting tracking frame cv2.imshow('Tracking', frame) cv2.imwrite('../week5/results/out' + str(counter) + '.png', frame) cv2.imwrite('out' + str(frame) + '.jpg',frame) # Display the original frame cv2.imshow('Original', orig_frame) # Slower the FPS cv2.waitKey(1) # Check for key strokes k = cv2.waitKey(1) & 0xff if k == 27: # 'esc' key has been pressed, exit program. break if k == 112: # 'p' has been pressed. this will pause/resume the code. pause = not pause if (pause is True): print("Code is paused. Press 'p' to resume..") while (pause is True): # stay in this loop until key = cv2.waitKey(30) & 0xff if key == 112: pause = False print("Resume code..!!") break # When everything done, release the capture cap.release() cv2.destroyAllWindows()
11: 'Could not find previous lv0: ', 20: 'Could not make tempature rate: ', 21: 'Could not find previous tempature: ', } warning = switcher.get(id) + comments + '\n' self.__warnings[id].append(warning) if ACTIVE_WARNINGS or id == 1: print(warning) #%% if __name__ == '__main__': # For testing purposes only day = DataDate(input('What date? '), Detectors()) command_completer = WordCompleter(['exit', 'findRates', 'animate', 'loadRaw', 'save', 'clear', 'printl0', 'printNLDN', 'loadRaw TASD', 'loadRaw NLDN', 'findNLDNcoors']) while True: user_input = prompt(u'D> ', history=FileHistory('history.txt'), auto_suggest=AutoSuggestFromHistory(), completer=command_completer ) if user_input == 'exit': break elif user_input == 'findRates': day.findRates() elif user_input == 'findNLDNcoors':
def main(): # Create opencv video capture object cap = cv2.VideoCapture('G:/cmu/colonoscopy/New folder/Cold.mp4') #cap = cv2.VideoCapture('G:/cmu/colonoscopy/imagemark/Color-Tracker-master/Retroflect-at-end.mp4') # Create Object Detector detector = Detectors() # Create Object Tracker tracker = Tracker(160, 1000, 5, 100) # Variables initialization skip_frame_count = 0 track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (0, 255, 255), (255, 0, 255), (255, 127, 255), (127, 0, 255), (127, 0, 127)] pause = False num = 0 frame_num = 0 # Infinite loop to process video frames while (True): frame_num += 1 print(frame_num) # Capture frame-by-frame ret, frame = cap.read() frame = frame[30:550, 400:930] #frame = frame[40:400,130:450] # Make copy of original frame orig_frame = copy.copy(frame) # Skip initial frames that display logo if (skip_frame_count < 15): skip_frame_count += 1 continue # Detect and return centeroids of the objects in the frame centers = detector.Detect1(orig_frame) # If centroids are detected then track them if (len(centers) > 0): text = 'Biopsy' cv2.putText(orig_frame, text, (100, 100), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2, lineType=cv2.LINE_AA) # Track object using Kalman Filter tracker.Update(centers) # For identified object tracks draw tracking line # Use various colors to indicate different track_id for i in range(len(tracker.tracks)): if (len(tracker.tracks[i].trace) > 1): for j in range(len(tracker.tracks[i].trace) - 1): # Draw trace line x1 = tracker.tracks[i].trace[j][0][0] y1 = tracker.tracks[i].trace[j][1][0] x2 = tracker.tracks[i].trace[j + 1][0][0] y2 = tracker.tracks[i].trace[j + 1][1][0] clr = tracker.tracks[i].track_id % 9 cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)), track_colors[clr], 2) # Display the resulting tracking frame cv2.imshow('Tracking', frame) # Display the original frame cv2.imshow('Original', orig_frame) print(num) # Slower the FPS cv2.waitKey(20) # Check for key strokes k = cv2.waitKey(50) & 0xff if k == 27: # 'esc' key has been pressed, exit program. break if k == 112: # 'p' has been pressed. this will pause/resume the code. pause = not pause if (pause is True): print("Code is paused. Press 'p' to resume..") while (pause is True): # stay in this loop until key = cv2.waitKey(30) & 0xff if key == 112: pause = False print("Resume code..!!") break # When everything done, release the capture cap.release() cv2.destroyAllWindows()
def Trace_tracking(images): """tracking mutiple traces""" #extract tfirst file name first_filename = os.path.splitext(images[0])[0] #extract first file index #first_number = int(filter(str.isdigit, first_filename)) first_number = int(re.search(r'\d+', first_filename).group(0)) #define trace result path outfile = save_path_track + str('{:04}'.format(first_number)) + '.txt' print(outfile) image_path = os.path.join(dir_path, images[0]) #load image from image path frame = cv2.imread(image_path) #Determine the width and height from the first image height, width, channels = frame.shape length = len(images) print("Image sequence size: {0} {1} {2}\n".format(width, height, length)) #Create Object Detector detector = Detectors() #detector = Root_Detectors(pattern_id) # Create Object Tracker, arguments: # dist_thresh, max_frames_to_skip, max_trace_length, trackIdCount #distance threshold. When exceeds the threshold, track will be deleted and new track is created tracker = Tracker(dist_thresh, max_frames_to_skip, max_trace_length, trackIdCount) # Variables initialization skip_frame_count = 0 #frame ID ID = 0 #stem_track = np.zeros(3) #initilize parameters for record radius and center locations radius_track = [] centers_track = [] #Begin of process each image for tracking ################################################################################### # loop to process video frames for frame_ID, image in enumerate(images): # Capture frame-by-frame image_path = os.path.join(dir_path, image) #load image frame frame = cv2.imread(image_path) # exit the loop if reach the end frame if ID == len(images): print("End of frame sequence!") break # Make copy of original frame orig_frame = copy.copy(frame) print("Processing frame {}...".format(frame_ID)) # Detect and return centeroids of the objects in the frame (centers, radius_rec) = detector.Detect(frame, ID, radius_min, radius_max) # record radius and center locations radius_track.append(radius_rec) centers_track.append(centers) #centers, stem_center = detector.Detect_root(frame, ID, pattern_id, stem_track) #centers = detector.Detect_root_blob(frame, ID, pattern_id, stem_track) # If centroids are detected then track them if (len(centers) > 0): # Track object using Kalman Filter tracker.Update(centers) print("Tracker size: {}...".format(len(tracker.tracks))) #End of process each image for tracking ################################################################################### radius_track = np.hstack(radius_track) coord_radius = [] # combine x, y coordinates for i in range(0, len(centers_track)): for j in range(0, len(centers_track[i])): coord_radius.append(np.array(centers_track[i][j])) coord_radius = np.array(coord_radius) #start index value along Z axis offset = first_number # write output as txt file with open(outfile, 'w') as f: #loop all tracked objects for i in range(len(tracker.tracks)): if (len(tracker.tracks[i].trace) > 2): #accquire dimension of current tracker dim = len(tracker.tracks[i].trace) #extract point data from current tracker point = np.asarray(tracker.tracks[i].trace) #print(type(tracker.tracks[i].trace)) # accquire shape of points nsamples, nx, ny = point.shape #reshape points point = point.reshape((nsamples, nx * ny)) #extract x,y,z coordinates x = np.asarray(point[:, 0]).flatten() y = np.asarray(point[:, 1]).flatten() z = np.asarray(range(offset, dim + offset)).flatten() #curve fitting of xy trace in 2D space #popt, pcov = curve_fit(func, x, y) #y = func(x, *popt) #compute average radius avg_radius = center_radius(x, y, radius_track, coord_radius) #reshape radius array r = np.asarray(avg_radius * np.ones((len(x), 1))).flatten() #print("Average radius: {0} \n".format(avg_radius)) # write out tracing trace result #if ( (len(x) == len(y) == len(z)) and (np.count_nonzero(x) == dim) and (np.count_nonzero(y) == dim) and sum(x) !=0 ): if ((len(x) == len(y) == len(z)) and sum(x) != 0): # save trace points as txt file f.write("#Trace {0} \n".format(i)) np.savetxt(f, np.c_[x, y, z, r], fmt='%-7.2f') #else: #print("Inconsistant length pf 3D array!") #ax.scatter(x, y, z, c = 'b', marker = 'o') #ax.plot(x, y, z, label='Tracking root trace') #f.write("#End\n") # write end mark and close file f.write("#End\n") f.close()
def main(): # Create opencv video capture object cap = cv2.VideoCapture('/home/deepak/innovation_lab_files/vid1_new.mp4') # Create Object Detector detector = Detectors() # Create Object Tracker tracker = Tracker(160, 30, 5, 100) # Variables initialization skip_frame_count = 0 track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (0, 255, 255), (255, 0, 255), (255, 127, 255), (127, 0, 255), (127, 0, 127)] pause = False # Infinite loop to process video frames mainlist = [[None, None]] * 1000 CarCount = 0 NoneCarCount = 0 NoneVehicle = 0 while (True): # Capture frame-by-frame ret, frame = cap.read() # Make copy of original frame orig_frame = copy.copy(frame) # Skip initial frames that display logo if (skip_frame_count < 15): skip_frame_count += 1 continue # Detect and return centeroids of the objects in the frame centers = detector.Detect(frame) newcenter = [] # If centroids are detected then track them if (len(centers) > 0): # Track object using Kalman Filter tracker.Update(centers) # For identified object tracks draw tracking line # Use various colors to indicate different track_id # print(len(tracker.tracks)) for i in range(len(tracker.tracks)): if (len(tracker.tracks[i].trace) > 4): # print(tracker.tracks[i].trace) for j in range(len(tracker.tracks[i].trace) - 1): # Draw trace line x1 = tracker.tracks[i].trace[j][0][0] y1 = tracker.tracks[i].trace[j][1][0] x2 = tracker.tracks[i].trace[j + 1][0][0] y2 = tracker.tracks[i].trace[j + 1][1][0] clr = tracker.tracks[i].track_id % 9 cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)), track_colors[clr], 2) cv2.putText(frame, str(tracker.tracks[i].track_id), (int(x1), int(y1)), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2) newcenter.append( [int(x1), int(y1), tracker.tracks[i].track_id]) # Display the resulting tracking frame # cv2.line(frame,(0,0),(100,100),(22,122,222),8) cv2.line(frame, (200, 600), (960, 600), (139, 0, 0), 8) cv2.putText(frame, 'Car Count =' + str(CarCount), (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (92, 142, 215), 3) cv2.putText(frame, 'Non Car Count =' + str(NoneCarCount), (300, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (92, 142, 215), 3) print(CarCount + NoneCarCount) # cv2.line(frame,(150,450),(280,370),(139,0,0),8) cv2.imshow('Tracking', frame) for j in range(len(centers)): for i in range(len(newcenter)): a = newcenter[i][0] b = newcenter[i][1] e = newcenter[i][2] c = centers[j][0][0] d = centers[j][1][0] temp_len = np.sqrt((a - c) * (a - c) + (b - d) * (b - d)) if (temp_len < 7): if (mainlist[e][0] != None): c = mainlist[e][0] d = mainlist[e][1] if ((d <= 600) and (c >= 200) and (c <= 960) and (a >= 200) and (a <= 960) and b >= 600): CarCount += 1 s1 = orig_frame.shape[0] s2 = orig_frame.shape[1] # print('this') # print(s1) # print(s2) # print(a) # print(b) # print('this') # if((a-120>=0) and (a+120<=s2) and (b-120>=0) and (b+120<=s1)): try: img = orig_frame[a - 80:a + 80, b - 80:b + 80] # cv2.imshow("cropped", img) img = cv2.resize(img, (img_width, img_height)) arr = np.array(img).reshape( (3, img_width, img_height)) arr = np.expand_dims(arr, axis=0) prediction = model.predict(arr)[0] # print(prediction) bestclass = '' bestconf = -1 best = [ 'non-vehicle', 'vehicle', 'non-vehicle', 'non-vehicle', 'non-vehicle', 'non-vehicle', 'non-vehicle', 'non-vehicle', 'non-vehicle', 'vehicle' ] for n in [0, 1, 2]: if (prediction[n] > bestconf): bestclass = n bestconf = prediction[n] if (bestclass != 1 and bestclass != 9): NoneVehicle += 1 if (NoneVehicle % 10 == 2): CarCount -= 1 NoneCarCount += 1 # else : except: print('this is already vehicle') mainlist[e][0] = a mainlist[e][1] = b else: mainlist[e][0] = a mainlist[e][1] = b newcenter.pop(i) break # for i in range(len(newcenter)): # mainlist[newcenter[i][2]][0]=newcenter[i][0] # mainlist[newcenter[i][2]][1]=newcenter[i][1] # Display the original frame # cv2.imshow('Original', orig_frame) # Slower the FPS cv2.waitKey(50) # Check for key strokes k = cv2.waitKey(50) & 0xff if k == 27: # 'esc' key has been pressed, exit program. break if k == 112: # 'p' has been pressed. this will pause/resume the code. pause = not pause if (pause is True): print("Code is paused. Press 'p' to resume..") while (pause is True): # stay in this loop until key = cv2.waitKey(30) & 0xff if key == 112: pause = False print("Resume code..!!") break # When everything done, release the capture print("this is final car count ") print(CarCount) cap.release() cv2.destroyAllWindows()
def main(): """Main function for multi object tracking Usage: $ python2.7 objectTracking.py Pre-requisite: - Python2.7 - Numpy - SciPy - Opencv 3.0 for Python Args: None Return: None """ # Create opencv video capture object cap = cv2.VideoCapture( 'C:\Users\user\Documents\Iot-Tracking\CV2\kalman_filter_multi_object_tracking-master\data\RAW_ Moment van mows down pedestrians in Barcelona caught on camera (DISTURBING FOOTAGE).mp4' ) #cap = cv2.VideoCapture(0) # Create Object Detector detector = Detectors() # Create Object Tracker tracker = Tracker(160, 30, 5, 100) # Variables initialization skip_frame_count = 0 track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (0, 255, 255), (255, 0, 255), (255, 127, 255), (127, 0, 255), (127, 0, 127)] pause = False # Infinite loop to process video frames while (True): # Capture frame-by-frame ret, frame = cap.read() # Make copy of original frame orig_frame = copy.copy(frame) # Skip initial frames that display logo if (skip_frame_count < 15): skip_frame_count += 1 continue # Detect and return centeroids of the objects in the frame centers = detector.Detect(frame) # If centroids are detected then track them if (len(centers) > 0): # Track object using Kalman Filter tracker.Update(centers) # For identified object tracks draw tracking line # Use various colors to indicate different track_id for i in range(len(tracker.tracks)): if (len(tracker.tracks[i].trace) > 1): for j in range(len(tracker.tracks[i].trace) - 1): # Draw trace line x1 = tracker.tracks[i].trace[j][0][0] y1 = tracker.tracks[i].trace[j][1][0] x2 = tracker.tracks[i].trace[j + 1][0][0] y2 = tracker.tracks[i].trace[j + 1][1][0] clr = tracker.tracks[i].track_id % 9 #cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)), track_colors[clr], 2) # Display the resulting tracking frame cv2.imshow('Tracking', frame) # Display the original frame cv2.imshow('Original', orig_frame) # Slower the FPS cv2.waitKey(50) # Check for key strokes k = cv2.waitKey(1) & 0xff if k == ord("q"): # 'esc' key has been pressed, exit program. break if k == ord( "p"): # 'p' has been pressed. this will pause/resume the code. pause = not pause if (pause is True): print("Code is paused. Press 'p' to resume..") while (pause is True): # stay in this loop until key = cv2.waitKey(1) & 0xff if key == 112: pause = False print("Resume code..!!") break # When everything done, release the capture cap.release() cv2.destroyAllWindows()
def main(): # Create opencv video capture object # cap = cv2.VideoCapture('data/TrackingBugs.mp4') cap = cv2.VideoCapture('data/video_3_bin.mp4') # Create Object Detector detector = Detectors() # Create Object Tracker tracker = Tracker(160, 30, 5, 100) # Variables initialization skip_frame_count = 0 track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (0, 255, 255), (255, 0, 255), (255, 127, 255), (127, 0, 255), (127, 0, 127)] pause = False frame_count = 1 # Infinite loop to process video frames while True: # Capture frame-by-frame ret, frame = cap.read() # Make copy of original frame orig_frame = copy.copy(frame) # Convert binary image to greyscale frame[frame != 0] = 255 # Skip initial frames that display logo if skip_frame_count < 1: skip_frame_count += 1 continue # Detect and return centroids of the objects in the frame if ret: print "Processing frame " + format(frame_count) frame_count += 1 centers = detector.Detect(frame) # If centroids are detected then track them if len(centers) > 0: # Track object using Kalman Filter tracker.Update(centers) # For identified object tracks draw tracking line # Use various colors to indicate different track_id for i in range(len(tracker.tracks)): if len(tracker.tracks[i].trace) > 1: for j in range(len(tracker.tracks[i].trace) - 1): # Draw trace line x1 = tracker.tracks[i].trace[j][0][0] y1 = tracker.tracks[i].trace[j][1][0] x2 = tracker.tracks[i].trace[j + 1][0][0] y2 = tracker.tracks[i].trace[j + 1][1][0] clr = tracker.tracks[i].track_id % 9 cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)), track_colors[clr], 2) # Display the resulting tracking frame cv2.imshow('Tracking', frame) # Display the original frame cv2.imshow('Original', orig_frame) # Slower the FPS cv2.waitKey(50) # # # Check for key strokes # k = cv2.waitKey(50) & 0xff # if k == 27: # 'esc' key has been pressed, exit program. # break # if k == 112: # 'p' has been pressed. this will pause/resume the code. # pause = not pause # if pause is True: # print "Code is paused. Press 'p' to resume.." # while pause is True: # # stay in this loop until # key = cv2.waitKey(30) & 0xff # if key == 112: # pause = False # print "Resume code..!!" # break else: print "All frames were processed" break # When everything done, release the capture cap.release() cv2.destroyAllWindows()