def select_roi(self, roi=None): if roi is None: roi = tracking.select_roi(self.frame) self.roi = roi # ROI for saving must be at least 65x65 otherwise buffer might be to small roisave = self.roi.copy() nx, ny = roisave['x2'] - roisave['x1'], roisave['y2'] - roisave['y1'] if nx < 64: roisave['x1'] -= floor((64 - nx) / 2) nx = 64 roisave['x2'] = roisave['x1'] + nx if ny < 64: roisave['y1'] -= floor((64 - ny) / 2) ny = 64 roisave['y2'] = roisave['y1'] + ny self.roisave = roisave self.nxsave, self.nysave = nx, ny self.roi_selected = True # Re-init tracker if necessary if self.dotrack: self.eye = tracking.resize_roi(self.frame, self.roi) self.tracker = tracking.Tracker(self.eye)
def process_one_frame(self): # grab frame (locked to clock if maxfreq is defined) t = clock() if self.maxfreq: tick = t*self.maxfreq ticklast = floor(self.tlast*self.maxfreq+0.0001) # add a small quantity to avoid numerical error resulting in a value 1 less than it should if tick-ticklast<1: # we did not miss a tick, wait for next tick ticknext = ceil(tick) sleep((ticknext-tick)/self.maxfreq) t = ticknext/self.maxfreq self.maxgap = max(self.maxgap,t-self.tlast) self.tlast = t ret, self.frame = self.film.read() # no frame -> indicate that there is some "idle" time if not ret: if self.DOREC: if not self.idle: self.idle = True print 'some idle time' sleep(.001) return else: self.film.release() self.film = cv2.VideoCapture("mouseeyetracking.avi") ret, self.frame = self.film.read() else: self.idle = False # follow speed of processing frames t = clock() self.nprocessed += 1 if t > self.t0 + 1: fps = self.nprocessed / (t - self.t0) self.grabdesc = " (%.1ffps, max %.0fms gap)" % (fps,self.maxgap*1000) self.t0 = t self.nprocessed = 0 self.maxgap = 0 # make frame single channel if self.frame.ndim == 3: self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY) # display frame cv2.imshow('movie', self.frame) # change between acquisition states # note that acquisition will never start as long as ROI and filename are not selected if self.running and self.acqstate == 'off': self.acqstate = 'wait' self.curacq += 1 self.status = 'waiting for trigger ' + str(self.curacq) + '/' + str(self.numacq) # open audio stream to detect trigger self.audio.load() # open movie for writing fourcc = cv2.VideoWriter_fourcc(*'i420') self.curname = self.filename + '_%.3i' % self.curacq self.out = cv2.VideoWriter(self.curname + '.avi', fourcc, 60.0, (self.nxsave, self.nysave)) # prepare for saving time vector and tracking results self.timevector = [] if self.acqstate == 'wait': if self.audio.check(): # once trigger will be detected, audio stream will be automatically closed self.acqstate = 'on' self.acqstart = clock() self.status = 'ACQUISITION ' + str(self.curacq) + '/' + str(self.numacq) if self.dotrack: self.tracker.startsave() elif self.acqstate == 'on' and (clock() - self.acqstart) > self.acqlen: self.acqstate = 'off' self.status = '' if self.dotrack: self.tracker.dosave = False # close output movie, save time vector and tracking results self.out.release() savedata = {'timevector': self.timevector} if self.dotrack: savedata['xshift'] = self.tracker.xshift savedata['yshift'] = self.tracker.yshift savedata['rshift'] = self.tracker.rshift savemat(self.curname + '.mat', savedata) if self.dotrack: pass # finished repetition, or user interrupted them? if self.curacq == self.numacq or not self.running: self.buttons['startstop'].setChecked(False) self.curacq = 0 # save if self.acqstate == 'on': eyesave = tracking.resize_roi(self.frame, self.roisave) eyesave = eyesave.reshape((self.nysave, self.nxsave, 1)).repeat(3, axis=2) self.timevector.append(clock() - self.acqstart) #print 'write', eyesave.dtype, eyesave.shape self.out.write(eyesave) # track if self.dotrack: self.tracker.track(self.eye) # display eye if self.roi_selected: self.eye = tracking.resize_roi(self.frame, self.roi) scale = 4 img = np.repeat(np.repeat(self.eye, scale, axis=0), scale, axis=1) if self.dotrack: circle = self.tracker.fit*scale cv2.circle(img, (int(circle[0]),int(circle[1])),int(circle[2]),255,1) cv2.imshow('eye', img) # update status self.statusbar.setText(self.status + self.grabdesc)
def process_one_frame(self): # grab frame (locked to clock if maxfreq is defined) t = clock() if self.maxfreq: tick = t * self.maxfreq ticklast = floor( self.tlast * self.maxfreq + 0.0001 ) # add a small quantity to avoid numerical error resulting in a value 1 less than it should if tick - ticklast < 1: # we did not miss a tick, wait for next tick ticknext = ceil(tick) sleep((ticknext - tick) / self.maxfreq) t = ticknext / self.maxfreq self.maxgap = max(self.maxgap, t - self.tlast) self.tlast = t ret, self.frame = self.film.read() # no frame -> indicate that there is some "idle" time if not ret: if self.DOREC: if not self.idle: self.idle = True print 'some idle time' sleep(.001) return else: self.film.release() self.film = cv2.VideoCapture("mouseeyetracking.avi") ret, self.frame = self.film.read() else: self.idle = False # follow speed of processing frames t = clock() self.nprocessed += 1 if t > self.t0 + 1: fps = self.nprocessed / (t - self.t0) self.grabdesc = " (%.1ffps, max %.0fms gap)" % (fps, self.maxgap * 1000) self.t0 = t self.nprocessed = 0 self.maxgap = 0 # make frame single channel if self.frame.ndim == 3: self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY) # display frame cv2.imshow('movie', self.frame) # change between acquisition states # note that acquisition will never start as long as ROI and filename are not selected if self.running and self.acqstate == 'off': self.acqstate = 'wait' self.curacq += 1 self.status = 'waiting for trigger ' + str( self.curacq) + '/' + str(self.numacq) # open audio stream to detect trigger self.audio.load() # open movie for writing fourcc = cv2.VideoWriter_fourcc(*'i420') self.curname = self.filename + '_%.3i' % self.curacq self.out = cv2.VideoWriter(self.curname + '.avi', fourcc, 60.0, (self.nxsave, self.nysave)) # prepare for saving time vector and tracking results self.timevector = [] if self.acqstate == 'wait': if self.audio.check( ): # once trigger will be detected, audio stream will be automatically closed self.acqstate = 'on' self.acqstart = clock() self.status = 'ACQUISITION ' + str(self.curacq) + '/' + str( self.numacq) if self.dotrack: self.tracker.startsave() elif self.acqstate == 'on' and (clock() - self.acqstart) > self.acqlen: self.acqstate = 'off' self.status = '' if self.dotrack: self.tracker.dosave = False # close output movie, save time vector and tracking results self.out.release() savedata = {'timevector': self.timevector} if self.dotrack: savedata['xshift'] = self.tracker.xshift savedata['yshift'] = self.tracker.yshift savedata['rshift'] = self.tracker.rshift savemat(self.curname + '.mat', savedata) if self.dotrack: pass # finished repetition, or user interrupted them? if self.curacq == self.numacq or not self.running: self.buttons['startstop'].setChecked(False) self.curacq = 0 # save if self.acqstate == 'on': eyesave = tracking.resize_roi(self.frame, self.roisave) eyesave = eyesave.reshape( (self.nysave, self.nxsave, 1)).repeat(3, axis=2) self.timevector.append(clock() - self.acqstart) #print 'write', eyesave.dtype, eyesave.shape self.out.write(eyesave) # track if self.dotrack: self.tracker.track(self.eye) # display eye if self.roi_selected: self.eye = tracking.resize_roi(self.frame, self.roi) scale = 4 img = np.repeat(np.repeat(self.eye, scale, axis=0), scale, axis=1) if self.dotrack: circle = self.tracker.fit * scale cv2.circle(img, (int(circle[0]), int(circle[1])), int(circle[2]), 255, 1) cv2.imshow('eye', img) # update status self.statusbar.setText(self.status + self.grabdesc)
nprocessed = 0 if frame.ndim == 3: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) cv2.imshow('image2',frame) if cv2.waitKey(1) & 0xFF == ord('q'): break # user select ROI roi = tracking.select_roi(frame) else: # fixed ROI roi = {'x1': 222, 'y1': 163, 'x2': 268, 'y2': 210} eye = tracking.resize_roi(frame, roi) # OUTPUT MOVIE if dosave: # ROI for saving must be at least 65x65 otherwise buffer might be to small roisave = roi.copy() nxsave, nysave = roi['x2']-roi['x1'], roi['y2']-roi['y1'] if nxsave<65: roisave['x1'] = roi['x1'] - floor((65-nxsave)/2) nxsave = 65 roisave['x2'] = roisave['x1'] + nxsave if nysave<65: roisave['y1'] = roi['y1'] - floor((65-nysave)/2) nysave = 65 roisave['y2'] = roisave['y1'] + nysave