def show_init_stimulus(self, ns, eyetracker, mouse, gaze_debug, debug_sq):
     # Show two stimuli and initial frame of movie
     self.win.callOnFlip(self.add_event, ns, eyetracker, 'ima1', 'stim start', self.code_table)
     for i in range(self.init_stim_frames):
         self.init_frame.draw()
         for image in self.images.values():
             image.draw()
         draw_eye_debug(gaze_debug, eyetracker, mouse)
         if debug_sq is not None:
             debug_sq.draw()
         self.win.flip()
    def run(self, ns, eyetracker, mouse, gaze_debug, debug_sq):
        """
        Run trial
        :param ns: netstation connection
        :param eyetracker: eyetracker
        :return:
        """

        # Show actors on random sides of screen
        left_actor = None
        right_actor = None
        if np.random.rand() < 0.5:
            self.actors[0].stim.pos = [-12, 0]
            self.actors[1].stim.pos = [12, 0]
            left_actor = self.actors[0].actor
            right_actor = self.actors[1].actor
        else:
            self.actors[0].stim.pos = [12, 0]
            self.actors[1].stim.pos = [-12, 0]
            left_actor = self.actors[1].actor
            right_actor = self.actors[0].actor

        event.clearEvents()
        mouse.clickReset()
        resp=None
        self.attn_video.reload(self.win)
        self.win.callOnFlip(send_event, ns, eyetracker, 'pgat', 'pg attn', {'left': left_actor, 'rght': right_actor})
        while resp is None:
            while not self.attn_video.stim.status == visual.FINISHED and resp is None:
                self.attn_video.stim.draw()
                self.win.flip()
                buttons, times = mouse.getPressed(getTime=True)
                if buttons[0] and times[0]>0:
                    resp = 'l'
                elif buttons[2] and times[2]>0:
                    resp = 'r'
            self.attn_video.reload(self.win)

        # Draw images
        self.win.callOnFlip(send_event, ns, eyetracker, 'pgst', 'pg start', {'left': left_actor, 'rght': right_actor})
        for i in range(self.duration_frames):
            for actor in self.actors:
                actor.stim.draw()
            draw_eye_debug(gaze_debug, eyetracker, mouse)
            if gaze_debug is not None:
                self.left_roi.draw()
                self.right_roi.draw()
            if debug_sq is not None:
                debug_sq.draw()
            self.win.flip()
        send_event(ns, eyetracker, 'pgen', "pg end", {'left': left_actor, 'rght': right_actor})
    def play_movie(self, ns, eyetracker, mouse, gaze_debug):
        self.images['l'].pos = [-self.peripheral_offset, 0]
        self.images['r'].pos = [self.peripheral_offset, 0]

        # Play movie
        self.win.callOnFlip(self.add_event, ns, eyetracker, 'mov1', 'movie start', self.code_table)

        attending_frames = 0
        while not self.video_stim.stim.status == visual.FINISHED:

            # Draw video frames and stimuli
            self.video_stim.stim.draw()
            for image in self.images.values():
                image.draw()
            draw_eye_debug(gaze_debug, eyetracker, mouse)

            self.win.flip()

            # Get gaze position from eyetracker or mouse
            gaze_position = (0, 0)
            if eyetracker is not None:
                gaze_position = eyetracker.getCurrentGazePosition()
                gaze_position = (0.5 * (gaze_position[0] + gaze_position[2]),
                                 0.5 * (gaze_position[1] + gaze_position[3]))
            elif mouse is not None:
                gaze_position = mouse.getPos()

            # Check if looking at face
            if fixation_within_tolerance(gaze_position, self.init_frame.pos, 10, self.win):
                if gaze_debug is not None:
                    gaze_debug.fillColor = (-1, -1, 1)
                attending_frames += 1
                if attending_frames == 1:
                    self.win.callOnFlip(self.add_event, ns, eyetracker, 'att2', 'attn face', self.code_table)
            else:
                if gaze_debug is not None:
                    gaze_debug.fillColor = (1, -1, -1)
        if gaze_debug is not None:
            gaze_debug.fillColor = (1, -1, -1)
    def run(self, ns, eyetracker, mouse, gaze_debug, distractor_set, debug_sq):
        """
        Run the block
        :param ns: connection to netstation
        :param eyetracker: connection to eyetracker
        :returns True if task should continue, False if should quit
        """

        # Compute trial order
        n_movies = len(self.stimuli)
        vid_order = range(n_movies)
        if n_movies < self.trials:
            vid_order = []
            while len(vid_order) < self.trials:
                vid_order.extend(range(n_movies))
        np.random.shuffle(vid_order)

        # Start netstation recording
        send_event(ns, eyetracker, 'blk1', "block start", {'code': self.code})

        # Run trials
        for t in range(self.trials):

            # Synch with netstation in between trials
            if ns is not None:
                ns.sync()

            # Compute random delay period
            iti_frames = self.min_iti_frames+int(np.random.rand()*(self.max_iti_frames-self.min_iti_frames))

            # Reset movie to beginning
            video_idx = vid_order[t]
            self.stimuli[video_idx].reload(self.win)

            # clear any keystrokes before starting
            event.clearEvents()

            # Play movie
            self.win.callOnFlip(self.add_trial_event, ns, eyetracker, 'mov1', 'movie start',
                                {'code': self.code,
                                 'mvmt': self.stimuli[video_idx].movement,
                                 'actr': self.stimuli[video_idx].actor})
            while not self.stimuli[video_idx].stim.status == visual.FINISHED:
                self.stimuli[video_idx].stim.draw()
                draw_eye_debug(gaze_debug, eyetracker, mouse)
                if debug_sq is not None:
                    debug_sq.draw()
                self.win.flip()

            # Tell netstation the movie has stopped
            self.add_trial_event(ns, eyetracker, 'mov2', 'movie end', {})

            # Black screen for delay
            for i in range(iti_frames):
                self.win.flip()

            for trial_event in self.trial_events:
                if ns is not None:
                    ns.send_event(trial_event.code, label=trial_event.label, timestamp=trial_event.timestamp, table=trial_event.table)
            self.trial_events=[]

            # Check user input
            all_keys = event.getKeys()
            if len(all_keys):
                # Quit experiment
                if all_keys[0].upper() in ['Q', 'ESCAPE']:
                    return all_keys[0].upper()
                # Pause block
                elif all_keys[0].upper() == 'P':
                    self.pause()
                # End block
                elif all_keys[0].upper() == 'E':
                    return all_keys[0].upper()
                # Show distractors
                elif all_keys[0].upper() == 'D':
                    distractor_set.show_pictures_and_sounds()
                # Show distractor video
                elif all_keys[0].upper() == 'V':
                    distractor_set.show_video()

                event.clearEvents()

        # Stop netstation recording
        send_event(ns, eyetracker, 'blk2', 'block end', {'code': self.code})
        return []
    def highlight_peripheral_stimulus_click(self, ns, eyetracker, mouse, gaze_debug):
        # Set which stimulus to highlight
        self.highlight.pos = self.images[self.attention].pos

        resp = None
        mouse.clickReset()

        highlight_on = False
        idx = 0
        attending_frames = 0
        self.win.callOnFlip(self.add_event, ns, eyetracker, 'ima2', 'attn start', self.code_table)
        while resp is None and idx < self.max_attending_frames:
            # Draw init frame of movie and two stimuli
            self.init_frame.draw()
            current_pos = self.images[self.attention].pos
            if idx % 5 == 0:
                new_pos = current_pos
                if current_pos[1] == 0 or current_pos[1] == -1:
                    new_pos[1] = 1
                else:
                    new_pos[1] = -1
                self.images[self.attention].setPos(new_pos)
                self.highlight.setPos(self.images[self.attention].pos)

            for image in self.images.values():
                image.draw()

            # Highlight stimulus
            if idx % 5 == 0:
                if highlight_on:
                    highlight_on = False
                else:
                    highlight_on = True
                if highlight_on:
                    self.highlight.draw()

            draw_eye_debug(gaze_debug, eyetracker, mouse)

            self.win.flip()
            idx += 1

            # Get gaze position from eyetracker or mouse
            gaze_position = (0, 0)
            if eyetracker is not None:
                gaze_position = eyetracker.getCurrentGazePosition()
                gaze_position = (0.5 * (gaze_position[0] + gaze_position[2]),
                                 0.5 * (gaze_position[1] + gaze_position[3]))
            elif mouse is not None:
                gaze_position = mouse.getPos()

            # Check if looking at right stimulus
            if fixation_within_tolerance(gaze_position, self.images[self.attention].pos,
                                         self.images[self.attention].size[0] / 2.0+3, self.win):
                if gaze_debug is not None:
                    gaze_debug.fillColor = (-1, -1, 1)
                attending_frames += 1
                if attending_frames == 1:
                    self.win.callOnFlip(self.add_event, ns, eyetracker, 'att1', 'attn stim', self.code_table)
            else:
                if gaze_debug is not None:
                    gaze_debug.fillColor = (1, -1, -1)
                attending_frames = 0

            buttons, times = mouse.getPressed(getTime=True)
            if buttons[0]:
                resp='l'
            elif buttons[2]:
                resp = 'r'

            # Check user input
            all_keys = event.getKeys()
            if len(all_keys):
                if all_keys[0].upper() in ['Q', 'ESCAPE'] or all_keys[0].upper() == 'P' or all_keys[0].upper() == 'E' or\
                                all_keys[0].upper() == 'G' or all_keys[0].upper() == 'D':
                    return all_keys[0].upper()
                event.clearEvents()

        if gaze_debug is not None:
            gaze_debug.fillColor = (1, -1, -1)

        if idx>=self.max_attending_frames and resp is None:
            return ''

        return None
    def highlight_peripheral_stimulus_gaze(self, ns, eyetracker, mouse, gaze_debug):
        # Set which stimulus to highlight
        self.highlight.pos = self.images[self.attention].pos
        # Show initial frame of video until highlighted stimulus if fixated on or abort
        attending_frames = 0
        highlight_on = False
        idx = 0
        self.win.callOnFlip(self.add_event, ns, eyetracker, 'ima2', 'attn start', self.code_table)
        while attending_frames < self.min_attending_frames and idx < self.max_attending_frames:
            # Draw init frame of movie and two stimuli
            self.init_frame.draw()
            current_pos = self.images[self.attention].pos
            if idx % 5 == 0:
                new_pos = current_pos
                if current_pos[1] == 0 or current_pos[1] == -1:
                    new_pos[1] = 1
                else:
                    new_pos[1] = -1
                self.images[self.attention].setPos(new_pos)
                self.highlight.setPos(self.images[self.attention].pos)

            for image in self.images.values():
                image.draw()

            # Highlight stimulus
            if idx % 5 == 0:
                if highlight_on:
                    highlight_on = False
                else:
                    highlight_on = True
                if highlight_on:
                    self.highlight.draw()

            draw_eye_debug(gaze_debug, eyetracker, mouse)

            self.win.flip()
            idx += 1

            # Get gaze position from eyetracker or mouse
            gaze_position = (0, 0)
            if eyetracker is not None:
                gaze_position = eyetracker.getCurrentGazePosition()
                gaze_position = (0.5 * (gaze_position[0] + gaze_position[2]),
                                 0.5 * (gaze_position[1] + gaze_position[3]))
            elif mouse is not None:
                gaze_position = mouse.getPos()

            # Check if looking at right stimulus
            if fixation_within_tolerance(gaze_position, self.images[self.attention].pos,
                                         self.images[self.attention].size[0] / 2.0+3, self.win):
                if gaze_debug is not None:
                    gaze_debug.fillColor = (-1, -1, 1)
                attending_frames += 1
                if attending_frames == 1:
                    self.win.callOnFlip(self.add_event, ns, eyetracker, 'att1', 'attn stim', self.code_table)
            else:
                if gaze_debug is not None:
                    gaze_debug.fillColor = (1, -1, -1)
                attending_frames = 0
        if gaze_debug is not None:
            gaze_debug.fillColor = (1, -1, -1)

        return attending_frames
 def show_init_video(self, ns, eyetracker, mouse, gaze_debug):
     self.win.callOnFlip(self.add_event, ns, eyetracker, 'imov', 'init movie', self.code_table)
     while not self.init_video_stim.stim.status == visual.FINISHED:
         self.init_video_stim.stim.draw()
         draw_eye_debug(gaze_debug, eyetracker, mouse)
         self.win.flip()