def generate_straight_lines_array(frames, m, K, v_speedomether=None, v_limit=10, alpha=0.8, mask=None): print('\t generatin straight lines array:') file = open('points_data.txt', 'w') r_sq = sf.get_mahalanobis_distance_sq_by_probability(alpha) r = np.sqrt(r_sq) K_inv = np.linalg.inv(K) frame_nums = len(frames) status = np.zeros((frame_nums), dtype=np.int) times = collections.deque(maxlen=frame_nums) for i in range(1, frame_nums): t0 = time.time() area1 = sf.get_box(frames[i - 1], mask) area2 = sf.get_box(frames[i], mask) if v_speedomether is not None: if v_speedomether[i] < v_limit: continue pt, st = pai.find_OF_crossing_pt(area1, area2, method='lk') if not st: status[i] = status[i - 1] continue pt += mask[0] r_point_sq = sf.get_mahalanobis_distance_sq(pt, m, K_inv) r_point = np.sqrt(r_point_sq) #print('{:.2f} vs {:.2f}'.format(r_point, 3 * r)) if r_point < 3 * r: status[i] = 1 file.write('{}|{:.02f} vs {:.02f} ==> {}\n'.format( pt, r_point, 3 * r, status[i])) tk = time.time() T = tk - t0 times.append(T) T = (sum(times) / len(times)) * (frame_nums - i) t_min = T // 60 t_sec = T % 60 print('\tTime left: {}min {:.02f}sec'.format(int(t_min), t_sec)) file.close() return status
def get_grid(frame, mask, m, k): ''' Getting array of points, that have a coordinates of grid. ''' area1 = sf.get_box(frame, mask) y, x, z = area1.shape N = m * k # greed points points = np.zeros((N, 1, 2), dtype=np.float32) Ax = mask[0][0] Ay = mask[0][1] Bx = mask[1][0] By = mask[1][1] y, x, z = frame.shape y0 = Ay + 0.02 * (By - Ay) x0 = Ax + 0.02 * (Bx - Ax) yk = By - 0.02 * (By - Ay) xk = Bx - 0.02 * (Bx - Ax) dy = (yk - y0) / m dx = (xk - x0) / k for i in range(m * k): points[i][0][0] = x0 + dx * (i % k) points[i][0][1] = y0 + dy * (i // k) return points
def action(method): video = 'test1.mp4' cam = cv2.VideoCapture(video) lengh = np.int(cam.get(cv2.CAP_PROP_FRAME_COUNT)) print(lengh) _, frame1 = cam.read() y, x, z = frame1.shape x1, y1, x2, y2 = x // 3, 3*y//5, 2*x//3, 0.94 * y // 1 p1 = np.array([x1, y1]) p2 = np.array([x2, y2]) mask = [p1, p2] times = deque(maxlen=lengh) #times = deque(maxlen=lengh) for itt in range(lengh): t0 = time.time() _, frame2 = cam.read() if not _: break frame2 = draw.draw_rectangle(frame2, mask, color=draw.cyan) area1 = sf.get_box(frame1, mask) area2 = sf.get_box(frame2, mask) y_, x_, z_ = area1.shape print(y_, x_) points1 = np.zeros((100, 2), dtype = np.float) yy0 = 0.02 * y_ // 1 dy = (y_ - 2 * yy0) // 10 xx0 = 0.02 * x_ // 1 dx = (x_ - 2*xx0) // 10 for i in range(10): for j in range(10): points1[i][0] = xx0 + i * dx points1[i][1] = yy0 + j * dy points1, points2 = pai.find_opt_flow(area1, area2, method=method) N = len(points1) N_ = len(points2) print(' N is {} and {}'.format(N, N_)) out = frame2.copy() norms = deque(maxlen=N) for i in range(N): norms.append(np.linalg.norm(points1[i] - points2[i])) mid_norm = sum(norms) / N for i in range(N): p1 = points1[i] + mask[0] p2 = points2[i] + mask[0] if np.linalg.norm(p1 - p2) < mid_norm: out = draw.draw_point(out, p1, radius=3) out = draw.draw_point(out, p2, radius=3) out = draw.draw_arrow(out, p1, p2) out = draw.draw_text(img=out, pt=(3*x//4, 80), text='points: {}'.format(N),color=draw.blue, font_scale=1, line_type=2) out = draw.draw_text(img=out, pt=(0, 80), text='{}'.format(itt),color=draw.blue, font_scale=1, line_type=2) # out = draw.draw_text(out, (3*x//4, 80), text_properties) # small = cv2.resize(out, (0,0), fx=0.7, fy=0.7) # cv2.imshow('frame', small) cv2.imwrite('out/{}.jpg'.format(itt), out) # #cv2.imshow('area', area) # # k = cv2.waitKey(20) frame1 = frame2 tk = time.time() times.append(tk - t0) T = sum(times) / len(times) T = T * (lengh - itt) t_min = int(T // 60) t_sec = T % 60 print('{} min {:.02f} sec'.format(t_min, t_sec)) cv2.destroyAllWindows()
plt.show() plt.plot(x[:1400], v_mtr[:1400], x[:1400], v_gps[:1400], x[:1400], y[:1400], x[:1400], v_new[:1400]) plt.show() sys.exit() video = cv2.VideoCapture(data) times = collections.deque(maxlen=lengh) for i in range(1, lengh): t0 = time.time() print(' saving: {} / {}'.format(i, lengh)) out = frames[i].copy() out = draw.draw_point(out, m) out = draw.draw_mahalanobis_ellipse(out, r, m, K, color=draw.red) out = draw.draw_mahalanobis_ellipse(out, 3 * r, m, K, color=draw.blue) area1 = sf.get_box(frames[i - 1], mask) area2 = sf.get_box(frames[i], mask) pt, st = pai.find_OF_crossing_pt(area1, area2, method='lk') r_p = None if st: if not mask is None: pt += mask[0] if np.linalg.norm(pt) > np.linalg.norm( (img_dim[1], img_dim[0])): pt = (max(img_dim) / 2) * pt / np.linalg.norm(pt) r_point_sq = sf.get_mahalanobis_distance_sq(pt, m, K_inv) r_p = np.sqrt(r_point_sq) out = cv2.line(out, (int(m[0]), int(m[1])), (int(pt[0]), int(pt[1])), color=draw.green) out = draw.draw_point(out, pt)
m = 100 k = 100 N = m * k video = 'C://Users//moshe.f//Desktop//TEST//calibration//23.mp4' cam = cv2.VideoCapture(video) _, frame1 = cam.read() y, x, z = frame1.shape # рабочая область x1, y1, x2, y2 = x // 5, y // 5, 4 * x // 5, 0.92 * y // 1 p1 = np.array([x1, y1]) p2 = np.array([x2, y2]) mask = [p1, p2] # ___________________ area1 = sf.get_box(frame1, mask) y_, x_, z_ = area1.shape # greed points points1 = np.zeros((m * k, 1, 2), dtype=np.float32) yy0 = 0.02 * y_ // 1 dy = (y_ - 2 * yy0) // m xx0 = 0.02 * x_ // 1 dx = (x_ - 2 * xx0) // k for i in range(m * k): points1[i][0][0] = xx0 + dx * (i % k) points1[i][0][1] = yy0 + dy * (i // k) #______________________ source_data = 'data.txt' out_img = 'out.jpg' template_x, template_y, template_eps = read(source_data)
def make(video='C://Users//moshe.f//Desktop//TEST//calibration//23.mp4', m=100, k=100, nf=50, min_number_of_points=5, out_data=None, out_pic=None): cam = cv2.VideoCapture(video) lengh = np.int(cam.get(cv2.CAP_PROP_FRAME_COUNT)) print(lengh) lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.3)) _, frame1 = cam.read() y, x, z = frame1.shape # рабочая область x1, y1, x2, y2 = x // 5, y // 5, 4 * x // 5, 0.92 * y // 1 p1 = np.array([x1, y1]) p2 = np.array([x2, y2]) mask = [p1, p2] # ___________________ times = deque(maxlen=lengh) area1 = sf.get_box(frame1, mask) y_, x_, z_ = area1.shape # greed points points1 = np.zeros((m * k, 1, 2), dtype = np.float32) yy0 = 0.02 * y_ // 1 dy = (y_ - 2 * yy0) // m xx0 = 0.02 * x_ // 1 dx = (x_ - 2 * xx0) // k for i in range(m*k): points1[i][0][0] = xx0 + dx * (i % k) points1[i][0][1] = yy0 + dy * (i // k) #______________________ sumX = np.zeros(m*k, dtype=np.float32) sumY = np.zeros(m*k, dtype=np.float32) sumX2 = np.zeros(m*k, dtype=np.float32) sumY2 = np.zeros(m*k, dtype=np.float32) Num = np.zeros(m*k, dtype=np.int) avr_x = np.zeros(m*k, dtype=np.float32) avr_y = np.zeros(m*k, dtype=np.float32) std_x2 = np.zeros(m*k, dtype=np.float32) std_y2 = np.zeros(m*k, dtype=np.float32) eps = np.zeros(m*k, dtype=np.float32) avr_eps = 0 counter = 0 # data collection for itt in range(nf): t0 = time.time() _, frame2 = cam.read() area1 = sf.get_box(frame1, mask) area2 = sf.get_box(frame2, mask) points2, st, err = cv2.calcOpticalFlowPyrLK(cv2.cvtColor(area1, cv2.COLOR_BGR2GRAY), cv2.cvtColor(area2, cv2.COLOR_BGR2GRAY), points1, None, **lk_params) for i in range(m*k): if st[i] == 1: addX = points2[i][0][0] - points1[i][0][0] addY = points2[i][0][1] - points1[i][0][1] Num[i] += 1 sumX[i] += addX sumY[i] += addY sumX2[i] += addX ** 2 sumY2[i] += addY ** 2 frame1 = frame2 tk = time.time() times.append(tk - t0) if itt % (nf // 10) == 0: T = (sum(times) / len(times)) * (nf - itt) t_min = int(T // 60) t_sec = T % 60 print('{} | {} min {:.02f} sec'.format(itt, t_min, t_sec)) times.clear() # data analysise for i in range(m*k): t0 = time.time() if Num[i] < min_number_of_points: eps[i] = -1 else: avr_x[i] = sumX[i] / Num[i] avr_y[i] = sumY[i] / Num[i] std_x2[i] = sumX2[i] / Num[i] - avr_x[i] ** 2 std_y2[i] = sumY2[i] / Num[i] - avr_y[i] ** 2 eps[i] = np.sqrt(std_x2[i] + std_y2[i]) if np.isnan(eps[i]): sys.exit('Arg sqrt in eps is bad in step {}!!! arg = {}'.format(i, std_x2[i] + std_y2[i])) tk = time.time() times.append(tk - t0) if i % 10 == 0: T = (sum(times) / len(times)) * (m*k - i) t_min = np.int(T // 60) t_sec = T % 60 print('calculate {} | {} min {} sec'.format(i, t_min, t_sec)) times.clear() with open('trace/eps.txt', 'w') as f: for i in range(m*k): f.write('{}\n'.format(eps[i])) # average eps avr_eps = avr(eps, -1) # print(' >>> {} <<< '.format(i)) print('avr_eps = {}'.format(avr_eps)) color = draw.black frame2 = draw.draw_rectangle(frame2, mask, color=draw.cyan) #frame2 = sf.get_box(frame2, mask) if out_pic is not None: for i in range(m*k): #print(i, m*k) t0 = time.time() th = 2 if eps[i] > avr_eps: color = draw.red elif eps[i] == -1: color = draw.purple else: color = draw.green if np.isnan(eps[i]): th = 3 color = draw.black pt1 = points1[i][0] + mask[0] #dpt = np.array([0, 0]) #if not np.isnan(avr_x[i]) and not np.isnan(avr_y[i]): dpt = np.array([avr_x[i], avr_y[i]]) pt2 = pt1 + dpt frame2 = draw.draw_point(frame2, pt1, radius=1, color=draw.blue) frame2 = draw.draw_point(frame2, pt2, radius=1, color=draw.blue) frame2 = draw.draw_arrow(frame2, pt2, pt1, color, thickness=th) #frame2 = draw.draw_text(frame2, pt1, text='({}|{})'.format(i // k, i % k), font_scale=0.25, line_type=1) tk = time.time() times.append(tk - t0) if i % (m*k // 10) == 0: T = (sum(times) / len(times)) * (m*k - i) t_min = np.int(T // 60) t_sec = T % 60 print(' drawing: {} | {} min {} sec'.format(i, t_min, t_sec)) cv2.imwrite(out_pic, frame2) if out_data is not None: with open(out_data, 'w') as f: for i in range(m*k): line = '{}|{}|{}|{}|{}\n'.format(i // k, i % k, avr_x[i], avr_y[i], eps[i]) f.write(line) # out = frame2.copy() # for i in range(N): # pt1 = pts1[i] + mask[0] # pt2 = pts2[i] + mask[0] # out = draw.draw_point(out, pt1, radius=3) # out = draw.draw_point(out, pt2, radius=3) # out = draw.draw_arrow(out, pt1, pt2) # cv2.imwrite('out/{}.jpg'.format(itt), out) print('done') return avr_x, avr_y, eps
def collect_OF_crossing_pts( cap, # Capture object analysed_frame_nb, threshold_value_of_speed = 20, mask = None, method = 'lk', trace = False, save_points = False, output = 'out/calibration/', verbose = True ): """ This function collect the crossing point steming from the optical flow, during a certain number of frames set by argument "analysed_frame_nb". INPUT: * cap: an object with at least 3 properties cap.current_frame, cap.current_time and cap.current_speed, and a method cap.capture() that updates these properties at each call; * analysed_frame_nb: the desired number of frame pair to be processed; * threshold_value_of_speed: the speed under which the optical flow will not be taken into account; * mask: a 2-list the form [top_left, bottom_right], where top_left is the top_left vertex of the desired mask in the image, and bottom_right is its bottom right vertex. If mask is None, then all the image is taken into account; * method = 'lk': optical flow finding method; * trace = False: boolean value trace, saving points in a file; * save_points = False : if it is desired to save the OF in a file * output = 'out/calibration/': a folder address to store the data in the case where save_points has been set to True; * verbose = True: be or don't be verbose OUTPUT: * A list of points representing the computed point at infinity at each frame. The real point at infinity is then very close to be the mean of these points. """ itt = 0 frame_itt = 0 file = -1 n_frames = analysed_frame_nb if trace and not os.path.exists('out'): os.mkdir('out') if trace and not os.path.exists('out/calibration/'): os.mkdir('out/calibration/') if save_points: out = os.path.join(output, 'points.txt') file = open(out, "w") file.close() pts_out = np.zeros([n_frames, 2]) cap.capture() #capture current frame and related info frame1 = cap.current_frame speed1 = cap.current_speed if save_points: file = open(os.path.join(output, 'points.txt'), "a") while frame_itt < n_frames: cap.capture() #capture current frame and related info frame2 = cap.current_frame if frame2 is None: break speed2 = cap.current_speed if trace and not os.path.exists('out/calibration/' + str(itt) + '/'): os.mkdir('out/calibration/' + str(itt) + '/') if is_speed_ok(speed1, speed2, threshold_value_of_speed): area1 = sf.get_box(frame1, mask) area2 = sf.get_box(frame2, mask) pt, st = find_OF_crossing_pt( area1, area2, method=method, trace=trace, path='out/calibration/' + str(itt) + '/') else: pt, st = [np.NaN, np.NaN], False if verbose: print('bad speed', flush = True) if st: pt = pt + mask[0] print('CALIBRATION: {} %'.format(frame_itt / n_frames * 100)) pts_out[frame_itt, :] = pt frame_itt += 1 if save_points: file.write("{}|{}|{}\n".format(itt, pt[0], pt[1])) frame1 = frame2 speed1 = speed2 itt += 1 if save_points: file.close() return pts_out
r = np.sqrt(r_sq) path = 'out/' if not os.path.exists(path): os.mkdir(path) inf_pt_list = deque(maxlen = 5000) i = 0 capture = cv2.VideoCapture(video) is_captured1, f1 = capture.read() is_captured2, f2 = capture.read() is_captured = is_captured1 and is_captured2 while is_captured: area1 = sf.get_box(f1, mask) area2 = sf.get_box(f2, mask) pt, st = find_OF_crossing_pt(area1, area2, method='lk') print(i, flush = True) if st: pt = pt + mask[0] inf_pt_list.append(pt) out = f2.copy() r_i_sq = sf.get_mahalanobis_distance_sq(pt, m, K_inv) r_i = np.sqrt(r_i_sq) # drawing point, arrow and text text = dict(text='r = ' + str(r_i), font_scale=1, line_type=2, color=draw.blue) if r_i_sq > r_sq: