예제 #1
0
    def __init__(self, mon, screenSize=[1680, 1050], skip_ringbuf=True,
                 port=5555, myIP=socket.gethostbyname(socket.gethostname()), iViewPort=4444,
                 pc_setup='one_PC',
                 calib_skip=False, calib_instruction_text='', calib_bg_color=128, calib_fg_color=64,
                 win=None):
        from iView import iview_SDK, iViewXAPI
        """
        above: 'None' inserted 20150422 by hg instead of:
        visual.Window(size=(800, 600),
                                   fullscr=False,
                                   allowGUI=False,
                                   color=(0, 0, 0),
                                   monitor=monitors.Monitor('default'),
                                   units='deg',
                                   screen=0)
        """

        # TODO??: (Henrik)
        # UDP port to listen for iView data on.
        # Set iView software to duplicate stream to this port number so that we don't conflict with
        # the listening and sending on the main port number.

        self.mon = mon
        self.mon.setWidth(53.2)   # Width of screen (cm)
        self.mon.setDistance(65)  # Distance eye monitor (cm)

        self.screenSize = screenSize
        self.skip_ringbuf = skip_ringbuf
        #misc (Henrik)
        self.my_ip = socket.gethostbyname(socket.gethostname())
        self.i = 0
        self.msg_i = 0
        self.x = 0
        self.y = 0
        self.state = 0
        self.res = 0

        # Create an instance of the eye tracker class (connects automatically)
        self.et = iview_SDK.mySDK_class(computer_setup=pc_setup)  # Also initializes the eye tracker

        # Calibrate and validate the eye tracker
        if not calib_skip:
            # Create PsychoPy window and text to show stimulus for calibration
            # TODO: Consider moving more text properties to class parameters
            self.calib_text = visual.TextStim(win, text=calib_instruction_text, wrapWidth=20, height=0.5)
            self.et.setup_calibration_parameters(bg_color=calib_bg_color, fg_color=calib_fg_color)
            self.et.calibrate(win, self.calib_text)



        # create self as a thread
        threading.Thread.__init__(self)

        self.__stop = False
예제 #2
0
def main(argv):
    '''
    Example call: client_demo.py 1 1
    '''

    this_dir = os.path.abspath(os.path.dirname(__file__))  # os.getcwd()
    os.chdir(this_dir)

    RT = core.Clock()

    # Default parameters
    eye_tracking = False  # Use eye tracker of just simulate data
    server_control = False  # Controlled by server (or participant control)

    ## If any input parameters are given
    if len(argv) > 0:
        eye_tracking = int(argv[0]) == 1
    if len(argv) > 1:
        server_control = int(argv[1]) == 1

    # Set up monitor
    mon = monitors.Monitor(constants_wally.PSYCHOPY_MONITOR_NAME)
    mon.setWidth(constants_wally.SCREEN_WIDTH)  # Width of screen (cm)
    mon.setDistance(
        constants_wally.SCREEN_EYE_DIST)  # Distance eye / monitor (cm)
    mon.setSizePix(constants_wally.SCREEN_RES)

    #create a window to draw in
    scale_f_window = 1  # Make the PsychoPy window smaller than the screen?
    win = visual.Window(constants_wally.SCREEN_RES * scale_f_window,
                        fullscr=False,
                        allowGUI=False,
                        color=(0, 0, 0),
                        monitor=mon,
                        units='deg',
                        screen=0)

    # An instruction text object
    instruction_text = visual.TextStim(win, text='', wrapWidth=20, height=0.5)
    dotStim = visual.GratingStim(win,
                                 color='red',
                                 tex=None,
                                 mask='circle',
                                 size=0.4)
    dotStim_c = visual.GratingStim(win,
                                   color='black',
                                   tex=None,
                                   mask='circle',
                                   size=0.55)

    # Add the picture
    im_search = visual.ImageStim(win, image='wally_search.jpg')
    im_face = visual.ImageStim(win, image='wally_face.jpg')
    wally_pos = constants_wally.WALLY_POS

    # Multicast xy and Listen (UDP)
    my_ip = socket.gethostbyname(socket.gethostname())
    xyCasting = CastThread.MultiCast(my_ip)
    xyCasting.setReceiveOwn(receiveOwn=True)
    xyCasting.start()

    # Each participant's dot should have a different color
    dot_color = []
    for i in range(26):
        dot_color.append(xyCasting.get26colors(int(i)))

    # Wait for a multicast message 'calibrate'
    if server_control and eye_tracking:
        instruction_text.setText('Waiting for message to start calibration ')
        instruction_text.draw()
        win.flip()
        message_received = False
        while not message_received:
            allData = xyCasting.consumeAll()
            for data, addr, time_arrive in allData:
                if 'calibrate' in data:
                    message_received = True

    # Initializes the eye tracker class and calibrates
    if eye_tracking:
        et = iview_SDK.mySDK_class(computer_setup='one_PC')
        et.setup_calibration_parameters(bg_color=128, fg_color=64)

        # Calibrate and validate the eye tracker
        et.calibrate(win, instruction_text)

        # Send message that calibration is successfully performed
        if server_control:
            xyCasting.send(
                'done_calibrating')  # optionally send calibration accuracy

    # This is Wally
    im_face.pos = (0, -5)
    im_face.draw()

    # Wait for the server script to start the experiment
    if server_control:

        instruction_text.setText(
            'Press the spacebar as soon as you have found Walley \n\n Please wait for the experiment to start. '
        )
        instruction_text.draw()
        win.flip()

        message_received = False
        while not message_received:
            allData = xyCasting.consumeAll()
            for data, addr, time_arrive in allData:
                if 'start' in data:
                    message_received = True

    else:
        instruction_text.setText(
            'Press the spacebar as soon as you have found Walley \n\n Press a key to start. '
        )
        instruction_text.draw()
        win.flip()
        event.waitKeys()

    event.clearEvents()

    # Show image
    im_search.draw()
    win.flip()

    RT.reset()

    if eye_tracking:
        et.start_recording()
        et.start()
    #core.wait(1)

    # Display a gaze contingent marker
    key_pressed = False
    k = ''
    while not key_pressed:

        # Get samples
        if eye_tracking:
            t, x, y = et.get_all_samples(mon=mon)
            t = np.mean(t)
            x = np.mean(x)
            y = np.mean(y)
            #print(x,y)
        else:
            t = 0
            # NB: this all operates in degrees!
            x = np.random.rand(1) + float(my_ip.split('.')[-1]) * .75
            y = np.random.rand(1) + float(my_ip.split('.')[-1]) * .75
            x = x[0]
            y = y[0]

        # Draw image
        im_search.draw()

        # Multicast data to all clients in multicast group
        xyCasting.send(','.join([str(t), str(x), str(y)]))

        # Read multicast data and draw positions
        #allData = xyCasting.consumeAll()
        allData = xyCasting.getNewest()
        #print(x,y,allData)
        for data, addr, time_arrive in allData:

            if 'stop' in data:
                key_pressed = True
                rt = np.inf
                break

            if 'exp_done' in data:
                continue

            # Check whether the data contains the right number of elements
            data_t = data.split(',')
            if len(data_t) == 3:
                # Get t,x,y and draw on screen
                ti, xi, yi = data_t
                xi = float(xi)
                yi = float(yi)
            else:
                continue

            # Get color for specific client
            temp_ip = int(addr[0].split('.')[-1])
            if temp_ip > 25:
                ci = xyCasting.get26colors(int(temp_ip))
            else:
                ci = dot_color[temp_ip]

            # Draw position
            dotStim.color = ci
            dotStim.pos = ((xi, yi))
            dotStim_c.pos = ((xi, yi))
            dotStim_c.draw()
            dotStim.draw()

        # Show screen when all data have been received
        win.flip()

        # Check for keypress
        k = event.getKeys(timeStamped=RT)
        if k:
            key_pressed = True
            rt = k[0][1]  # Store reaction time

    # Send message that experiment is done and the search time
    if server_control:
        xyCasting.send(' '.join(['exp_done', str(rt)]))

    xyCasting.stop()
    xyCasting.clean_up()

    # Stop multicasting thread
    if eye_tracking:
        et.stop()
        et.stop_recording()
        et.clear_buffer()
        et.disconnect()

    # Highlight the correct location of Wally
    im_search.draw()
    instruction_text.setColor('blue')
    instruction_text.setHeight(1.5)
    instruction_text.setPos((wally_pos[0], wally_pos[1] + 2))
    instruction_text.setText('Here is Wally')
    instruction_text.draw()
    instruction_text.setHeight(3)
    instruction_text.setPos(wally_pos)
    instruction_text.setText('o')
    instruction_text.draw()

    # Also draw the gaze location at the time of the decision (TODO)

    win.flip()
    core.wait(5)

    win.close()
    core.quit()
예제 #3
0
    def __init__(self,
                 mon,
                 screenSize=[1680, 1050],
                 skip_ringbuf=True,
                 port=5555,
                 myIP=socket.gethostbyname(socket.gethostname()),
                 iViewPort=4444,
                 pc_setup='one_PC',
                 calib_skip=False,
                 calib_instruction_text='',
                 calib_bg_color=128,
                 calib_fg_color=64,
                 win=None):
        from iView import iview_SDK, iViewXAPI
        """
        above: 'None' inserted 20150422 by hg instead of:
        visual.Window(size=(800, 600),
                                   fullscr=False,
                                   allowGUI=False,
                                   color=(0, 0, 0),
                                   monitor=monitors.Monitor('default'),
                                   units='deg',
                                   screen=0)
        """

        # TODO??: (Henrik)
        # UDP port to listen for iView data on.
        # Set iView software to duplicate stream to this port number so that we don't conflict with
        # the listening and sending on the main port number.

        self.mon = mon
        self.mon.setWidth(53.2)  # Width of screen (cm)
        self.mon.setDistance(65)  # Distance eye monitor (cm)

        self.screenSize = screenSize
        self.skip_ringbuf = skip_ringbuf
        #misc (Henrik)
        self.my_ip = socket.gethostbyname(socket.gethostname())
        self.i = 0
        self.msg_i = 0
        self.x = 0
        self.y = 0
        self.state = 0
        self.res = 0

        # Create an instance of the eye tracker class (connects automatically)
        self.et = iview_SDK.mySDK_class(
            computer_setup=pc_setup)  # Also initializes the eye tracker

        # Calibrate and validate the eye tracker
        if not calib_skip:
            # Create PsychoPy window and text to show stimulus for calibration
            # TODO: Consider moving more text properties to class parameters
            self.calib_text = visual.TextStim(win,
                                              text=calib_instruction_text,
                                              wrapWidth=20,
                                              height=0.5)
            self.et.setup_calibration_parameters(bg_color=calib_bg_color,
                                                 fg_color=calib_fg_color)
            self.et.calibrate(win, self.calib_text)

        # create self as a thread
        threading.Thread.__init__(self)

        self.__stop = False
예제 #4
0
def main(argv):
    '''
    Example call: client_demo.py 1 1
    '''
    
    this_dir = os.path.abspath(os.path.dirname(__file__))  # os.getcwd()
    os.chdir(this_dir)
    
    RT = core.Clock()
    
    # Default parameters
    eye_tracking   = False   # Use eye tracker of just simulate data
    server_control = False   # Controlled by server (or participant control)
    
    ## If any input parameters are given
    if len(argv) > 0:    
        eye_tracking   = int(argv[0]) == 1
    if len(argv) > 1:
        server_control = int(argv[1]) == 1
        
    # Set up monitor
    mon = monitors.Monitor(constants_wally.PSYCHOPY_MONITOR_NAME)
    mon.setWidth(constants_wally.SCREEN_WIDTH)    # Width of screen (cm)
    mon.setDistance(constants_wally.SCREEN_EYE_DIST) # Distance eye / monitor (cm) 
    mon.setSizePix(constants_wally.SCREEN_RES)
    
    #create a window to draw in
    scale_f_window = 1 # Make the PsychoPy window smaller than the screen?
    win =   visual.Window(constants_wally.SCREEN_RES*scale_f_window,fullscr=False,allowGUI=False,color=(0,0,0), monitor=mon, units='deg',screen=0)
    
    # An instruction text object
    instruction_text = visual.TextStim(win,text='',wrapWidth = 20,height = 0.5)
    dotStim = visual.GratingStim(win, color='red', tex=None, mask='circle',size=0.4)
    dotStim_c = visual.GratingStim(win, color='black', tex=None, mask='circle',size=0.55)
    
    
    # Add the picture
    im_search = visual.ImageStim(win, image='wally_search.jpg')
    im_face = visual.ImageStim(win, image='wally_face.jpg')
    wally_pos = constants_wally.WALLY_POS
    
    # Multicast xy and Listen (UDP)
    my_ip  = socket.gethostbyname(socket.gethostname())
    xyCasting = CastThread.MultiCast(my_ip)
    xyCasting.setReceiveOwn(receiveOwn = True)
    xyCasting.start()
    
    # Each participant's dot should have a different color
    dot_color = []
    for i in range(26):
        dot_color.append(xyCasting.get26colors(int(i)))     
    
    # Wait for a multicast message 'calibrate'
    if server_control and eye_tracking:
        instruction_text.setText('Waiting for message to start calibration ')
        instruction_text.draw()
        win.flip()    
        message_received = False
        while not message_received:
            allData = xyCasting.consumeAll()
            for data, addr, time_arrive in allData:
                if 'calibrate' in data:
                    message_received = True            
    
    # Initializes the eye tracker class and calibrates
    if eye_tracking:
        et = iview_SDK.mySDK_class(computer_setup = 'one_PC') 
        et.setup_calibration_parameters(bg_color=128,fg_color = 64)
    
        # Calibrate and validate the eye tracker
        et.calibrate(win,instruction_text) 
        
        # Send message that calibration is successfully performed
        if server_control:
            xyCasting.send('done_calibrating') # optionally send calibration accuracy
            
    # This is Wally
    im_face.pos = (0,-5)
    im_face.draw()       
    
    # Wait for the server script to start the experiment
    if server_control:
        
        instruction_text.setText('Press the spacebar as soon as you have found Walley \n\n Please wait for the experiment to start. ')
        instruction_text.draw()
        win.flip()        
        
        message_received = False
        while not message_received:
            allData = xyCasting.consumeAll()
            for data, addr, time_arrive in allData:
                if 'start' in data:
                    message_received = True     
                    
    else:
        instruction_text.setText('Press the spacebar as soon as you have found Walley \n\n Press a key to start. ')
        instruction_text.draw()
        win.flip()
        event.waitKeys()
    
    event.clearEvents()
    
    # Show image
    im_search.draw()
    win.flip()
    
    RT.reset()
    
    if eye_tracking:
        et.start_recording()
        et.start()
    #core.wait(1)
    
    # Display a gaze contingent marker
    key_pressed = False
    k = ''
    while not key_pressed:
            
        # Get samples
        if eye_tracking:
            t,x,y = et.get_all_samples(mon = mon)
            t = np.mean(t)
            x = np.mean(x)
            y = np.mean(y)
            #print(x,y)
        else:
            t = 0
            # NB: this all operates in degrees!
            x = np.random.rand(1) + float(my_ip.split('.')[-1])*.75
            y = np.random.rand(1) + float(my_ip.split('.')[-1])*.75
            x = x[0]
            y = y[0]
    
        # Draw image        
        im_search.draw()
        
        # Multicast data to all clients in multicast group
        xyCasting.send(','.join([str(t),str(x),str(y)]))
        
        
        # Read multicast data and draw positions
        #allData = xyCasting.consumeAll()
        allData = xyCasting.getNewest()
        #print(x,y,allData) 
        for data, addr, time_arrive in allData:
            
            if 'stop' in data:
                key_pressed = True
                rt = np.inf
                break
            
            if 'exp_done' in data:
                continue
            
            # Check whether the data contains the right number of elements
            data_t = data.split(',')
            if len(data_t) == 3:
                # Get t,x,y and draw on screen
                ti, xi, yi = data_t
                xi = float(xi)
                yi = float(yi)    
            else:
                continue

            # Get color for specific client  
            temp_ip = int(addr[0].split('.')[-1])
            if temp_ip > 25:
                ci = xyCasting.get26colors(int(temp_ip))
            else:
                ci = dot_color[temp_ip]            
            
            # Draw position
            dotStim.color = ci
            dotStim.pos = ((xi,yi))
            dotStim_c.pos = ((xi,yi))
            dotStim_c.draw()
            dotStim.draw()
            
        # Show screen when all data have been received
        win.flip()
            
        # Check for keypress
        k = event.getKeys(timeStamped = RT)
        if k:
            key_pressed = True
            rt = k[0][1]  # Store reaction time
    
    # Send message that experiment is done and the search time
    if server_control:
        xyCasting.send(' '.join(['exp_done',str(rt)]))
        
    xyCasting.stop()
    xyCasting.clean_up()
    
    # Stop multicasting thread
    if eye_tracking:
        et.stop()
        et.stop_recording()
        et.clear_buffer()
        et.disconnect()
    
    # Highlight the correct location of Wally
    im_search.draw()
    instruction_text.setColor('blue')
    instruction_text.setHeight(1.5)
    instruction_text.setPos((wally_pos[0],wally_pos[1]+2))
    instruction_text.setText('Here is Wally')
    instruction_text.draw()    
    instruction_text.setHeight(3)
    instruction_text.setPos(wally_pos)
    instruction_text.setText('o')
    instruction_text.draw()
    
    # Also draw the gaze location at the time of the decision (TODO)
    
    win.flip()
    core.wait(5)
    
    
    win.close()
    core.quit()