def setup(self, imgPath): imgName, ext = os.path.splitext(os.path.basename(imgPath)) self.imgName = imgName self.env = Environment(self.settings) self.env.loadStaticStimulus(self.settings.batch + '/' + imgPath) self.eye = Eye(self.settings, self.env) self.periphMap = PeripheralAttentionalMap(self.env.height, self.env.width, self.settings) self.centralMap = CentralAttentionalMap(self.env.height, self.env.width, self.settings) self.conspMap = ConspicuityMap(self.env.height, self.env.width, self.settings) self.priorityMap = PriorityMap(self.env.height, self.env.width, self.settings) self.fixHistMap = FixationHistoryMap(self.env.height, self.env.width, self.env.hPadded, self.env.wPadded, self.settings) self.LongTermMemory = LTM(self.settings) self.visualTaskExecutive = vTE(self.settings) self.TaskRelevanceMap = TRM(self.env.height, self.env.width, self.settings) if self.settings.task_relevance == 1: #learn representations if not done previously self.LongTermMemory.learn() #get task relevance (initial) self.TaskRelevanceMap.setTRM( self.visualTaskExecutive.get_relevance(self.LongTermMemory, self.env.scene))
def eye_detection(self, p_r_eye, p_l_eye): # define the eyes rectangle x1 = self.x x2 = x1 + self.w y1 = self.y y2 = y1 + self.h * 2 / 3 eye_rect = self.img[y1:y2, x1:x2] #eyes1 = d.get('haarcascade_mcs_eyepair_big').detectMultiScale(self.eye_rect, 1.2, 1) eyes_cascade = cv2.CascadeClassifier( 'C:\Users\Katia\Anaconda3\pkgs\opencv-3.1.0-np111py35_1\Library\etc\haarcascades\haarcascade_eye.xml' ) eyes = eyes_cascade.detectMultiScale(eye_rect, 1.1, 1) for eye in eyes: eye[0] += x1 eye[1] += y1 r_eye = self.search_eye(eyes, p_r_eye, p_l_eye, "r") l_eye = self.search_eye(eyes, p_l_eye, p_r_eye, "l") # search for eyes pair if r_eye is None and l_eye is None: for i, eye in enumerate(eyes): x, y, w, h = eye.ravel() eye_center = (x + w / 2, y + h / 2) for j in range(i + 1, len(eyes)): x1, y1, w1, h1 = eyes[j].ravel() eye1_center = (x1 + w1 / 2, y1 + h1 / 2) dx = abs(x1 - x) dy = abs(y1 - y) if dx > (w + w1) / 2 and dx < 2 * self.w / 3 and dy < ( h + h1) / 2: if x < x1: r_eye = eye l_eye = eyes[j] else: r_eye = eyes[j] l_eye = eye if r_eye is not None: self.organs_dict["r_eye"] = Eye(self.img, r_eye, 'r') self.organs_counter += 1 if l_eye is not None: self.organs_dict["l_eye"] = Eye(self.img, l_eye, 'l') self.organs_counter += 1
def setup(self, imgPath): imgName, ext = os.path.splitext(os.path.basename(imgPath)) self.imgName = imgName self.env = Environment(self.settings) if self.settings.batch: self.env.loadStaticStimulus(self.settings.batch + '/' + imgPath) else: self.env.loadStaticStimulus(imgPath) self.eye = Eye(self.settings, self.env) self.periphMap = PeripheralAttentionalMap(self.env.height, self.env.width, self.settings) self.centralMap = CentralAttentionalMap(self.env.height, self.env.width, self.settings) self.conspMap = ConspicuityMap(self.env.height, self.env.width, self.settings) self.priorityMap = PriorityMap(self.env.height, self.env.width, self.settings) self.fixHistMap = FixationHistoryMap(self.env.height, self.env.width, self.env.hPadded, self.env.wPadded, self.settings)
class Controller: def __init__(self, settings): self.env = None self.eye = None self.settings = settings self.imageList = [] #save results self.saveResults = False if self.settings.saveFix: if os.path.exists(self.settings.saveDir): if self.settings.overwrite: self.saveResults = True else: os.makedirs(self.settings.saveDir) self.saveResults = True #get input images def getInputImages(self): if self.settings.input: self.imageList.append(self.settings.input) else: #list all images in the directory self.imageList = [ f for f in listdir(self.settings.batch) if any( f.endswith(ext) for ext in ['jpg', 'bmp', 'png', 'gif']) ] def setup(self, imgPath): imgName, ext = os.path.splitext(os.path.basename(imgPath)) self.imgName = imgName self.env = Environment(self.settings) if self.settings.batch: self.env.loadStaticStimulus(self.settings.batch + '/' + imgPath) else: self.env.loadStaticStimulus(imgPath) self.eye = Eye(self.settings, self.env) self.periphMap = PeripheralAttentionalMap(self.env.height, self.env.width, self.settings) self.centralMap = CentralAttentionalMap(self.env.height, self.env.width, self.settings) self.conspMap = ConspicuityMap(self.env.height, self.env.width, self.settings) self.priorityMap = PriorityMap(self.env.height, self.env.width, self.settings) self.fixHistMap = FixationHistoryMap(self.env.height, self.env.width, self.env.hPadded, self.env.wPadded, self.settings) #computes fixations for each image and each subject def run(self): self.getInputImages() for imgPath in self.imageList: for i in range(self.settings.numSubjects): self.setup(imgPath) self.computeFixations() if self.saveResults: currentSaveDir = '{}/{}/'.format(self.settings.saveDir, self.imgName) if not os.path.exists(currentSaveDir): os.makedirs(currentSaveDir) self.fixHistMap.dumpFixationsToMat( '{}/fixations_{}.mat'.format(currentSaveDir, self.imgName, i)) cv2.imwrite( '{}/fixations_{}.png'.format(currentSaveDir, self.imgName), self.env.sceneWithFixations.astype(np.uint8)) def computeFixations(self): for i in range(self.settings.maxNumFixations): t0 = time.time() self.eye.viewScene() t_fov = time.time() - t0 print('[FOVEATE] Time elapsed {:0.03f}'.format(t_fov)) prevGazeCoords = self.eye.gazeCoords t0 = time.time() self.periphMap.computeBUSaliency(self.eye.viewFov) self.periphMap.computePeriphMap( self.settings.blendingStrategy == 1) t_periph = time.time() - t0 print('[PeriphMap] Time elapsed {:0.03f}'.format(t_periph)) t0 = time.time() self.centralMap.centralDetection(self.eye.viewFov) self.centralMap.maskCentralDetection() t_central = time.time() - t0 print('[CentralMap] Time elapsed {:0.03f}'.format(t_central)) #self.conspMap.computeConspicuityMap(self.periphMap.periphMap, self.centralMap.centralMap) #this is not used anywhere, for now commenting out t0 = time.time() self.priorityMap.computeNextFixationDirection( self.periphMap.periphMap, self.centralMap.centralMap, self.fixHistMap.getFixationHistoryMap()) t_priority = time.time() - t0 print('[PriorityMap] Time elapsed {:0.03f}'.format(t_priority)) print('PrevGazeCoords=[{}, {}]'.format(prevGazeCoords[0], prevGazeCoords[1])) self.eye.setGazeCoords(self.priorityMap.nextFixationDirection) self.env.drawFixation(self.eye.gazeCoords.astype(np.int32)) t0 = time.time() self.fixHistMap.decayFixations() t_ior = time.time() - t0 print('[IOR] Time elapsed {:0.03f}'.format(t_priority)) t0 = time.time() self.fixHistMap.saveFixationCoords(prevGazeCoords) t_save = time.time() - t0 print('[SaveFix] Time elapsed {:0.03f}'.format(t_save)) if self.settings.visualize: t0 = time.time() if i == 0: plt.close('all') fig = plt.figure(1, figsize=(13, 7), facecolor='white') gs = gridspec.GridSpec(2, 3) plt.show(block=False) plt.ion() plt.clf() axes = [] axes.append( self.add_subplot( fig, cv2.cvtColor(self.eye.viewFov, cv2.COLOR_BGR2RGB), 'Foveated View', gs[0, 0])) axes.append( self.add_subplot( fig, self.periphMap.periphMap, 'Peripheral Map: ' + self.settings.PeriphSalAlgorithm, gs[0, 1])) axes.append( self.add_subplot( fig, self.centralMap.centralMap, 'Central Map: ' + self.settings.CentralSalAlgorithm, gs[1, 0])) axes.append( self.add_subplot(fig, self.priorityMap.priorityMap, 'Priority Map', gs[1, 1])) axes.append( self.add_subplot( fig, cv2.cvtColor( self.env.sceneWithFixations.astype(np.uint8), cv2.COLOR_BGR2RGB), 'Image: {} \n Fixation #{}/{}'.format( self.imgName, i + 1, self.settings.maxNumFixations), gs[:, -1])) gs.tight_layout(fig) fig.canvas.draw() t_vis = time.time() - t0 print('[vis] Time elapsed {:0.03f}'.format(t_vis)) def add_subplot(self, fig, img, title, plot_idx): ax = fig.add_subplot(plot_idx) ax.set_title(title, fontsize=10) ax.set_xlabel('[{:10.3f}, {:10.3f}]'.format(np.min(img), np.max(img))) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) ax.imshow(img) return ax
def test_generate_batches_of_manual_too_big(self): test_eye = Eye(test_image, test_image, test_image, 10, 10) self.assertEqual(1, len(test_eye.get_batches_of_manual()))
class Controller: def __init__(self, settings): self.env = None self.eye = None self.settings = settings self.imageList = [] #save results self.saveResults = False if self.settings.saveFix: if os.path.exists(self.settings.saveDir): if self.settings.overwrite: self.saveResults = True else: os.makedirs(self.settings.saveDir) self.saveResults = True print(self.saveResults) #get input images def getInputImages(self): if self.settings.input: self.imageList.append(self.settings.input) else: #list all images in the directory self.imageList = [ f for f in listdir(self.settings.batch) if any( f.endswith(ext) for ext in ['jpg', 'bmp', 'png', 'gif']) ] def setup(self, imgPath): imgName, ext = os.path.splitext(os.path.basename(imgPath)) self.imgName = imgName self.env = Environment(self.settings) self.env.loadStaticStimulus(self.settings.batch + '/' + imgPath) self.eye = Eye(self.settings, self.env) self.periphMap = PeripheralAttentionalMap(self.env.height, self.env.width, self.settings) self.centralMap = CentralAttentionalMap(self.env.height, self.env.width, self.settings) self.conspMap = ConspicuityMap(self.env.height, self.env.width, self.settings) self.priorityMap = PriorityMap(self.env.height, self.env.width, self.settings) self.fixHistMap = FixationHistoryMap(self.env.height, self.env.width, self.env.hPadded, self.env.wPadded, self.settings) self.LongTermMemory = LTM(self.settings) self.visualTaskExecutive = vTE(self.settings) self.TaskRelevanceMap = TRM(self.env.height, self.env.width, self.settings) if self.settings.task_relevance == 1: #learn representations if not done previously self.LongTermMemory.learn() #get task relevance (initial) self.TaskRelevanceMap.setTRM( self.visualTaskExecutive.get_relevance(self.LongTermMemory, self.env.scene)) #computes fixations for each image and each subject def run(self): self.getInputImages() for imgPath in self.imageList: for i in range(self.settings.numSubjects): self.setup(imgPath) self.computeFixations() if self.saveResults: currentSaveDir = self.settings.saveDir if not os.path.exists(currentSaveDir): os.makedirs(currentSaveDir) #self.fixHistMap.dumpFixationsToMat('{}/fixations_{}.mat'.format(currentSaveDir, self.imgName, i)) cv2.imwrite( '{}/fixations_{}.png'.format(currentSaveDir, self.imgName), self.env.sceneWithFixations.astype(np.uint8)) #cv2.imwrite('{}/conspMap_{}.png'.format(currentSaveDir, self.imgName), self.conspMap.conspMap) #cv2.imwrite('{}/priorityMap_{}.png'.format(currentSaveDir, self.imgName), self.priorityMap.priorityMap) #cv2.imwrite('{}/fixHistMap_{}.png'.format(currentSaveDir, self.imgName), self.fixHistMap.fixHistMap) def computeFixations(self): if self.settings.visualize: fig = plt.figure(1, figsize=(13, 7), facecolor='white') gs = gridspec.GridSpec(2, 3) plt.show(block=False) plt.ion() for i in range(self.settings.maxNumFixations): print('fixation {}'.format(i)) if self.saveResults: currentSaveDir = self.settings.saveDir if not os.path.exists(currentSaveDir): os.makedirs(currentSaveDir) self.eye.viewScene() self.periphMap.computeBUSaliency(self.eye.viewFov) self.periphMap.computePeriphMap( self.settings.blendingStrategy == 1) self.centralMap.centralDetection(self.eye.viewFov) self.centralMap.maskCentralDetection() self.conspMap.computeConspicuityMap(self.periphMap.periphMap, self.centralMap.centralMap) self.priorityMap.computeNextFixationDirection( self.periphMap.periphMap, self.centralMap.centralMap, self.fixHistMap.getFixationHistoryMap()) prevGazeCoords = self.eye.gazeCoords self.eye.setGazeCoords(self.priorityMap.nextFixationDirection) self.env.drawFixation(self.eye.gazeCoords.astype(np.int32)) self.fixHistMap.decayFixations() self.fixHistMap.saveFixationCoords(prevGazeCoords) if self.settings.visualize: self.add_subplot( cv2.cvtColor(self.eye.viewFov, cv2.COLOR_BGR2RGB), 'Foveated View', gs[0, 0]) self.add_subplot(self.periphMap.periphMap, 'Peripheral Map', gs[0, 1]) self.add_subplot(self.centralMap.centralMap, 'Central Map', gs[1, 0]) self.add_subplot(self.priorityMap.priorityMap, 'Priority Map', gs[1, 1]) self.add_subplot( cv2.cvtColor(self.env.sceneWithFixations.astype(np.uint8), cv2.COLOR_BGR2RGB), 'Image: {} \n Fixation #{}/{}'.format( self.imgName, i, self.settings.maxNumFixations), gs[:, -1]) if i == 0: gs.tight_layout(fig) fig.canvas.draw() if self.saveResults: self.fixHistMap.dumpFixationsToMat('{}/{}.mat'.format( currentSaveDir, self.imgName, i)) def add_subplot(self, img, title, plot_idx): ax = plt.subplot(plot_idx) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) ax.set_title(title, fontsize=10) ax.set_xlabel('[{:10.3f}, {:10.3f}]'.format(np.min(img), np.max(img))) plt.imshow(img)
def test_generate_batches_of_manual(self): test_eye = Eye(test_image, test_image, test_image, 2, 2) self.assertEqual(len(test_batches), len(test_eye.get_batches_of_manual())) self.assertEqual(len(test_batches), len(test_eye.get_batches_of_manual()))
def test_build_image_from_batches_too_big(self): test_eye = Eye(test_image, test_image, test_image, 10, 10) test_batches = test_eye.get_batches_of_raw() image = test_eye.build_image_from_batches(test_batches) self.assertTrue(array_equal(test_image, image))
def test_get_batches_too_big(self): test_eye = Eye(test_image, test_image, test_image, 10, 10) batches = test_eye.get_batches_of_raw() batch = batches[0].reshape(test_image.shape) self.assertEqual(1, len(batches)) self.assertTrue(array_equal(batch, test_image))
def test_build_image_from_batches_odd(self): test_eye = Eye(test_image_odd, test_image_odd, test_image_odd, 2, 2) test_batches = test_eye.get_batches_of_raw() image = test_eye.build_image_from_batches(test_batches) self.assertTrue(array_equal(test_image_odd_output, image))
def test_get_batches_2(self): test_eye = Eye(test_image_odd, test_image_odd, test_image_odd, 2, 2) batches = test_eye.get_batches_of_raw() self.assertTrue(array_equal(batches, test_batches_odd))