def __init__(self, outputDest, trackAlgorithm='mosse', drawKp=False, doClassify=False, doActRecog=True, outFps=30, retrain=False, matchEdges=False, filterVelocity=True): self.tracks = [] self.trackCount = 0 self.doClassify = doClassify self.doActRecog = doActRecog self.drawKeypoints = drawKp self.trackAlgorithm = trackAlgorithm self.vCubeDimens = (75, 100, 45) self.outFps = outFps self.doTrackWrite = False self.rect_sel = RectSelector('frame', self.onrect) self.lastFrame = None if self.doClassify: self.classifier = ObjectClassifier() if self.doActRecog: self.actionRecognizer = ActionRecognize( 'actions/', self.vCubeDimens, matchEdges=matchEdges, retrain=retrain, filterVelocity=filterVelocity) self.outputFile = open(outputDest, 'w') self.frameCount = 0
def __init__(self, video_src, paused=False): self.cap = video.create_capture(video_src) _, self.frame = self.cap.read() cv2.imshow('frame', self.frame) self.rect_sel = RectSelector('frame', self.onrect) self.trackers = [] self.paused = paused
def __init__(self, video_src, paused=False): self.cap = video.create_capture(video_src) _, self.frame = self.cap.read() cv2.imshow('frame', self.frame) self.rect_sel = RectSelector('frame', self.onrect) self.trackers = [ ] #之所以命名为 trackers ,而不是 tracker,并且使用list来容纳多个元素,是因为 MOSSE 例程中支持多目标跟踪,可以通过添加跟踪器来构建多目标跟踪 self.paused = paused
def __init__(self, cap, paused=False): self.cap = cap self.frame = self.cap.read() print(self.frame.shape) cv2.imshow('frame', self.frame) self.rect_sel = RectSelector('frame', self.onrect) self.trackers = [] self.paused = paused
def __init__(self, video_src, paused=False): #self.cap = video.create_capture(video_src) self.cap = VideoCapture( "rtsp://*****:*****@192.168.1.64:554/Streaming/Channels/102" ) _, self.frame = self.cap.read() cv.imshow('frame', self.frame) self.rect_sel = RectSelector('frame', self.onrect) self.trackers = [] self.paused = paused
def __init__(self, video_src, paused=False): self.cap = video.create_capture(video_src) _, self.frame = self.cap.read() # self.out is the output video writer fourcc = cv2.VideoWriter_fourcc(*'XVID') self.out = cv2.VideoWriter('output.avi', fourcc, 30, (self.frame.shape[1], self.frame.shape[0])) cv2.imshow('frame', self.frame) # For manually selecting objects to track. Not using, but not removing either. self.rect_sel = RectSelector('frame', self.onrect) self.trackers = [] self.paused = paused
def __init__(self, video_src, paused=False): self.cap = video.create_capture(video_src) _, self.frame = self.cap.read() # self.video_src = video_src # self.frames = os.listdir(video_src) # print self.frames # self.frame = cv2.imread(os.path.join(video_src, self.frames[0])) print 'FrameShape: ', self.frame.shape cv2.imshow('Video', self.frame) self.rect_sel = RectSelector('Video', self.onrect) self.paused = paused self.trackers = []
def __init__(self, cap, paused=False): self.cap = cap _, self.frame = self.cap.read() self.croph, self.cropw = 1080, 1920 self.frameh, self.framew, _ = self.frame.shape self.original = self.frame self.frame = imutils.resize(self.frame, width=self.framew / 2, height=self.framew / 2) cv2.imshow('frame', self.frame) self.rect_sel = RectSelector('frame', self.onrect) self.trackers = [] self.paused = paused
def __init__(self, video_src, robotq, appq, launchspeed, swatted): self.cap = video.create_capture(video_src) _, self.frame = self.cap.read() cv2.namedWindow('frame') self.row = 0 self.bounceshot = 0 cv2.createTrackbar('row', 'frame', 0, 2, self.onrow) cv2.createTrackbar('speed', 'frame', 0, 512, self.onspeed) cv2.createTrackbar('bounceshot', 'frame', 0, 1, self.onbounceshot) cv2.imshow('frame', self.frame) self.rect_sel = RectSelector('frame', self.onrect) self.trackers = [] self.robotq = robotq self.appq = appq self.launchspeed = launchspeed self.swatted = swatted
piThread = PiThread(getPiData) piThread.start() print('connecting...') d = ardrone.ARDrone() print('waiting for video...') while d.image is None: pass print('loading calibration info') mtx, dist, newcameramtx = getCalibrationInfo() out = None # cv2.imshow('frame', np.array([FRAME_HEIGHT, FRAME_WIDTH, 3])) #######Changed rs = RectSelector('frame', startTracking) print('battery remaining:', d.navdata['demo']['battery']) updateBattery(d.navdata['demo']['battery']) if d.navdata['state']['emergency'] == 1: d.reset() while True: frame = cv2.cvtColor(np.array(d.image), cv2.COLOR_RGB2BGR) # frame = cv2.undistort(frame, mtx, dist) frame = cv2.undistort(frame, mtx, dist, None, newcameramtx) if rs.dragging: rs.draw(frame) if isTracking: tracker.update(frame) r = tracker.get_position()
def get_manul(sample_path, save_path): sample_rects = [] file_idx = 0 out_flag = False def Onrect(rect): sample_rects.append(rect) def filter_extr(file_name): return os.path.splitext(file_name)[-1] in ['.JPG', '.jpg', '.bmp'] cv2.namedWindow('Frame', 1) rect_sel = RectSelector('Frame', Onrect) sample_files = os.listdir(sample_path) sample_files = filter(filter_extr, sample_files) for file in sample_files: sample_rects = [] frame = cv2.imread(os.path.join(sample_path, file)) frame = frame[:, :, ::-1] file_name = os.path.splitext(file)[0] while True: vis = frame.copy() rect_sel.draw(vis) draw_str(vis, (20, 20), file_name) for rect in sample_rects: x = rect[0] y = rect[1] w = rect[2] - rect[0] h = rect[3] - rect[1] draw_str(vis, (rect[0], rect[1] - 5), '(%d,%d,%d,%d)' % (x, y, w, h)) draw_rect(vis, rect) cv2.imshow('Frame', vis) ch = cv2.waitKey(1) if ch == 27: cv2.destroyAllWindows() out_flag = True break if ch == ord('n'): sample_rects = [] break if ch == ord('s'): num_rects = len(sample_rects) print num_rects coor_file_name = file_name + '.txt' coor_file_path = os.path.join(sample_path, coor_file_name) fp = open(coor_file_path, 'wb') for idx_rect, rect in enumerate(sample_rects): x0, y0, x1, y1 = rect x_c = (x0 + x1) * 1. / 2 y_c = (y0 + y1) * 1. / 2 w = (x1 - x0) * 1. h = (y1 - y0) * 1. coor_res = '%f %f %f %f' % (x_c / vis.shape[1], y_c / vis.shape[0], w / vis.shape[1], h / vis.shape[0]) fp.write("0" + " " + coor_res + "\n") fp.close() if ch == ord('r'): sample_rects = [] if out_flag: break
def Open(self): self.paused = False cv.namedWindow(titleWindow) self.rect_sel = RectSelector(titleWindow, self.onrect) self.trackers = [] PyStreamRun(self.OpenCVCode, titleWindow)