Example #1
0
def present_trial(imagepath):
	"""
	This is the main function used to run this demo. It is fed an imagepath and uses this to create a CommandableImageSprite offscreen. This Sprite is later moved onto the screen, where it hangs out until it is clicked.

	"""
	## Images here are commandable sprites, so we can tell them what to do using Q below
	img = CommandableImageSprite( screen, OFF_LEFT, imagepath, scale=IMAGE_SCALE)
		
	# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
	# Set up the updates, etc. 
	
	# A queue of animation operations
	Q = DisplayQueue()
	
	# Draw a single animation in if you want!
	Q.append(obj=img, action='wait', duration=1.0)
	Q.append(obj=img, action='move', pos=spot.center, duration=0.0)
	
	# What order do we draw sprites and things in?
	dos = OrderedUpdates(img) # Draw and update in this order
	
	start_time = time()
	
	## The standard event loop in kelpy -- this loops infinitely to process interactions
	## and throws events depending on what the user does
	for event in kelpy_standard_event_loop(screen, Q, dos, throw_null_events=True):
		
		if( time() - start_time > MAX_DISPLAY_TIME): 
			break
		
		# If the event is a click:
		if is_click(event):
			break
Example #2
0
def show_demo(stimuli):
	"""
		This function demonstrates an object without yes or no buttons for a few seconds
	"""
	demo_time = time()
	MAX_DISPLAY_TIME = 4.0
	
	## We create our image sprite here, inputting the screen to diplay on, it's starting position, the image to use, and the desired scale.
	image = CommandableImageSprite( screen, OFF_SCREEN, stimuli.image, scale=IMAGE_SCALE)
	
	## we then create a display queue, which is what we will use to update things.
	Q = DisplayQueue()
	## also our list of 'things'. This list is used to tell the event_loop what to look at, to track what is clicked and what needs to move, etc.
	## in this case we only have one thing to track. The function still requires a list.
	things = [image]
	
	##this next line adds a new action to the DisplayQueue, we tell it to move the image onto the on screen position over a duration of 1.5 seconds.
	Q.append(obj=image, action='move', pos= ON_SCREEN, duration=1.5)
	## note it will then remain there until the trial ends, or until we were to move it again. In this case, it just hangs out until the trial ends.
	dos = OrderedUpdates(*things)
	
	## we then play a sound to let the subject know whether this object has the desired qualities we are looking for.
	## the rest of the trial plays some similar sounds, to give the subject a hint of whether they have picked correctly or incorrectly.
	if the_rule(stimuli):
		play_sound( kstimulus('sounds/Tada.wav') )
	else:
		play_sound( kstimulus('sounds/Bad_Pick.wav') )
	
	## this is the event_loop for this presentation. Nothing fancy here, when the maximum time is up, end the presentation.
	for event in kelpy_standard_event_loop(screen, Q, dos):
		if (time() - demo_time) >= MAX_DISPLAY_TIME:
			break
Example #3
0
def show_demo(stimuli):
    """
		This function demonstrates an object without yes or no buttons for a few seconds
	"""
    demo_time = time()
    MAX_DISPLAY_TIME = 4.0

    ## We create our image sprite here, inputting the screen to diplay on, it's starting position, the image to use, and the desired scale.
    image = CommandableImageSprite(screen,
                                   OFF_SCREEN,
                                   stimuli.image,
                                   scale=IMAGE_SCALE)

    ## we then create a display queue, which is what we will use to update things.
    Q = DisplayQueue()
    ## also our list of 'things'. This list is used to tell the event_loop what to look at, to track what is clicked and what needs to move, etc.
    ## in this case we only have one thing to track. The function still requires a list.
    things = [image]

    ##this next line adds a new action to the DisplayQueue, we tell it to move the image onto the on screen position over a duration of 1.5 seconds.
    Q.append(obj=image, action='move', pos=ON_SCREEN, duration=1.5)
    ## note it will then remain there until the trial ends, or until we were to move it again. In this case, it just hangs out until the trial ends.
    dos = OrderedUpdates(*things)

    ## we then play a sound to let the subject know whether this object has the desired qualities we are looking for.
    ## the rest of the trial plays some similar sounds, to give the subject a hint of whether they have picked correctly or incorrectly.
    if the_rule(stimuli):
        play_sound(kstimulus('sounds/Tada.wav'))
    else:
        play_sound(kstimulus('sounds/Bad_Pick.wav'))

    ## this is the event_loop for this presentation. Nothing fancy here, when the maximum time is up, end the presentation.
    for event in kelpy_standard_event_loop(screen, Q, dos):
        if (time() - demo_time) >= MAX_DISPLAY_TIME:
            break
Example #4
0
def present_trial(imagepath):
	"""
	This is the main function used to run this demo. It is fed an imagepath and uses this to create a CommandableImageSprite offscreen. This Sprite is later moved onto the screen, where it hangs out until it is clicked.

	"""
	## This image is a DragSprite, a dragable version of the CommandableImageSprite. It accept similar parameters.
	img = DragSprite( screen, spot.west, imagepath, scale=IMAGE_SCALE)
		
	# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
	# Set up the updates, etc. 
	
	# A queue of animation operations
	Q = DisplayQueue()
		
	# Draw a single animation in if you want!
	Q.append(obj=img, action='wait', duration=1.0)
	Q.append(obj=img, action='move', pos=(screen.get_width()/2, screen.get_height()/2), duration=0.0)
	
	# What order do we draw sprites and things in?
	dos = OrderedUpdates(img) # Draw and update in this order
	
	## Note the time...
	start_time = time()
	
	## The standard event loop in kelpy -- this loops infinitely to process interactions
	## and throws events depending on what the user does
	for event in kelpy_standard_event_loop(screen, Q, dos, throw_null_events=True):
		## This is all you have to do to allow dragging!!
		
		img.process_dragndrop(event)
Example #5
0
def present_trial(stimuli):
    """
		This is the main function we use to run the trial. It is run twice, once for the 'correct' option, once for the 'incorrect' option.
		It lacks the ability to get shuffled around, so while the many correct and incorrect options are shuffled, they only appear in the order of 'correct' then 'incorrect'.
		There are three sprites on screen, the image, the 'yes' button, and the 'no' button.
		
	"""
    start_time = time()

    Q = DisplayQueue()
    image = CommandableImageSprite(screen,
                                   OFF_SCREEN,
                                   stimuli.image,
                                   scale=IMAGE_SCALE)
    yes_button = CommandableImageSprite(screen,
                                        YES_BUTTON_SPOT,
                                        YES_BUTTON,
                                        scale=BUTTON_SCALE)
    no_button = CommandableImageSprite(screen,
                                       NO_BUTTON_SPOT,
                                       NO_BUTTON,
                                       scale=BUTTON_SCALE)

    things = (image, yes_button, no_button)
    dos = OrderedUpdates(*things)
    ## The image is moved on screen...
    Q.append(obj=image, action='move', pos=ON_SCREEN, duration=1.5)

    for event in kelpy_standard_event_loop(screen, Q, dos):
        ## If it is clicked, we determine whether it is a correct or incorrect image, and whether yes or no was clicked...
        if is_click(event):
            who = who_was_clicked(dos)
            ## note the time...
            trial_time = time() - start_time
            ## print a bunch of things that we want to track...
            print trial_time, filename(
                stimuli.image), the_rule(stimuli), stimuli.shape, stimuli.color
            if who is yes_button and the_rule(stimuli):
                ## and play an appropriate 'hooray!' or "nope! try again!!" noise.
                play_sound(kstimulus('sounds/Tada.wav'), wait=True)
                break

            elif who is no_button and not the_rule(stimuli):
                play_sound(kstimulus('sounds/TadaWah2.wav'), wait=True)
                break

            elif who is no_button and the_rule(stimuli):
                play_sound(kstimulus('sounds/Bad_Pick.wav'), wait=True)
                break

            elif who is yes_button and not the_rule(stimuli):
                play_sound(kstimulus('sounds/Bad_Pick.wav'), wait=True)
                break
def present_trial(images, targetidx):
	
	
	assert len(images) == 4, "*** ERROR! DID NOT SUPPLY 4 IMAGES: " + str(images)
	
	img = [None] * 4
	
	## set the image locations
	## Images here are commandable sprites, so we can tell them what to do using Q below
	img[0] = CommandableImageSprite( screen, OFF_LEFT, images[0], scale=IMAGE_SCALE)
	img[1] = CommandableImageSprite( screen, spot.b2, images[1], scale=IMAGE_SCALE)
	img[2] = CommandableImageSprite( screen, spot.a3, images[2], scale=IMAGE_SCALE)
	img[3] = CommandableImageSprite( screen, spot.b3, images[3], scale=IMAGE_SCALE)
	correct = img[targetidx]
	# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
	# Set up the updates, etc. 
	
	# A queue of animation operations
	Q = DisplayQueue()
	
	# Draw a single animation in if you want!
	Q.append(obj=img[0], action='move', pos=spot.a2, duration=3.0)
	
	# What order do we draw sprites and things in?
	dos = OrderedUpdates(*img) # Draw and update in this order
	
	start_time = time()
	
	## The standard event loop in kelpy -- this loops infinitely to process interactions
	## and throws events depending on what the user does
	for event in kelpy_standard_event_loop(screen, Q, dos):
		
		# If the event is a click:
		if is_click(event):
			
			# check if each of our images was clicked
			whom = who_was_clicked(dos)
					
			if whom is correct: 
				play_sound(kstimulus('sounds/Beep2.wav'))
				Q.append(obj=whom, action='move', pos=(screen.get_width()/2, screen.get_height()/2), duration=1.0)
				return (time()-start_time)
			if whom is not correct:
				play_sound(kstimulus('sounds/Error.wav'))
Example #7
0
def present_trial(stimuli):
	"""
		This is the main function we use to run the trial. It is run twice, once for the 'correct' option, once for the 'incorrect' option.
		It lacks the ability to get shuffled around, so while the many correct and incorrect options are shuffled, they only appear in the order of 'correct' then 'incorrect'.
		There are three sprites on screen, the image, the 'yes' button, and the 'no' button.
		
	"""
	start_time = time()

	Q = DisplayQueue()	
	image = CommandableImageSprite( screen, OFF_SCREEN, stimuli.image, scale=IMAGE_SCALE)
	yes_button = CommandableImageSprite( screen, YES_BUTTON_SPOT, YES_BUTTON, scale=BUTTON_SCALE )
	no_button = CommandableImageSprite( screen, NO_BUTTON_SPOT, NO_BUTTON, scale=BUTTON_SCALE )
	
	things = (image, yes_button, no_button)
	dos = OrderedUpdates(*things)
	## The image is moved on screen...
	Q.append(obj=image, action='move', pos= ON_SCREEN, duration=1.5)
	
	for event in kelpy_standard_event_loop(screen, Q, dos):
		## If it is clicked, we determine whether it is a correct or incorrect image, and whether yes or no was clicked...
		if is_click(event):
			who = who_was_clicked(dos)
			## note the time...
			trial_time = time() - start_time
			## print a bunch of things that we want to track...
			print trial_time, filename(stimuli.image), the_rule(stimuli), stimuli.shape, stimuli.color
			if who is yes_button and the_rule(stimuli):
				## and play an appropriate 'hooray!' or "nope! try again!!" noise.
				play_sound( kstimulus('sounds/Tada.wav'), wait=True)
				break
				
			elif who is no_button and not the_rule(stimuli):
				play_sound( kstimulus('sounds/TadaWah2.wav'), wait=True)
				break
				
			elif who is no_button and the_rule(stimuli):
				play_sound( kstimulus('sounds/Bad_Pick.wav'), wait=True)
				break
				
			elif who is yes_button and not the_rule(stimuli):
				play_sound( kstimulus('sounds/Bad_Pick.wav'), wait=True)
				break
Example #8
0
def present_trial(imagepath):
    """
		This is the main function used to run this demo. It is fed an imagepath and uses this to create a CommandableImageSprite offscreen. This Sprite is later moved onto the screen, where it hangs out until it is clicked.

	"""

    ## The two items in this demo are the Thing (our dragable DragSprite) and the blicket_detector, the drop-zone-able DropSprite.

    thing = DragSprite(screen, spot.topq1, imagepath, scale=IMAGE_SCALE)
    blicket_detector = DropSprite(screen,
                                  BLICKET_DETECTOR_POSITION,
                                  blicketd_image_path,
                                  scale=.5)

    ## then we stick those in a list so we can add them to the ordered update list and have them appear on the screen.
    things = [thing, blicket_detector]
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    # Set up the updates, etc.
    # A queue of animation operations
    Q = DisplayQueue()

    ############################
    ## Register the drop zone in the DragSprite's list of Drop Zones.
    ###	If the sprite is dropped onto this zone, it will send out a ZONE_EVENT into the event loop. We can then pick this event up using some handy functions in the EventHandler class.
    thing.register_drop_zone(blicket_detector)

    # Draw a single animation in if you want!
    #Q.append(obj=img, action='wait', duration=1.0)
    #Q.append(obj=img, action='move', pos=(screen.get_width()/4, screen.get_height()/4), duration=0.0)

    # What order do we draw sprites and things in?
    dos = OrderedUpdates(*things)  # Draw and update in this order

    ## The standard event loop in kelpy -- this loops infinitely to process interactions
    ## and throws events depending on what the user does
    for event in kelpy_standard_event_loop(screen,
                                           Q,
                                           dos,
                                           throw_null_events=True):

        #################################33
        ##This next line is all you need to make your drag sprites dragable!
        ##
        thing.process_dragndrop(event)

        ######
        ## Then we use the next two functions to check if a Zone_Event signals that we have dropped onto a drop zone (so something would need to happen!)
        ##
        if was_dropped_into_zone(event):
            #########
            ## Check who was dropped, whether it was the thing we wanted (which it undoubtedly will be in this example...)
            if who_was_dropped(event) is thing:
                ## Then play a sound! Huzzah!
                play_sound(kstimulus('sounds/Bing.wav'))
Example #9
0
def display_wait_scene():
    transparent_button = os.path.dirname(__file__) + "stimuli/transparent.png"

    img = CommandableImageSprite(screen, (0, 0), transparent_button, scale=1.0)
    Q = DisplayQueue()
    dos = OrderedUpdates(img)
    finished = False

    for event in kelpy_standard_event_loop(screen, Q, dos):

        if is_click(event):
            if finished:
                break
            whom = who_was_clicked(dos)
            if whom is img:
                finished = True  #so we have to double click
Example #10
0
def display_wait_scene():
	transparent_button = os.path.dirname( __file__ )+"stimuli/transparent.png"
	img = CommandableImageSprite( screen, (0,0), transparent_button, scale=1.0)
	
	## set the image locations
	## Images here are commandable sprites, so we can tell them what to do using Q below
	
	
	# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
	# Set up the updates, etc. 
	
	# A queue of animation operations
	Q = DisplayQueue()

	#Q.append(obj='sound', file=(target_audio1[targetidx]) )
	# Draw a single animation in if you want!
	

	# What order do we draw sprites and things in?
	dos = OrderedUpdates(img) # Draw and update in this order
	
	#play_sound(kstimulus("sounds/good_job.wav"))  ## This should be changed to play the proper intro sound for the character. right now it just, quite annoyingly, says "Good job!"
	## The standard event loop in kelpy -- this loops infinitely to process interactions
	## and throws events depending on what the user does
	finished = False
	

	for event in kelpy_standard_event_loop(screen, Q, dos):
		
		# if time()-start_time > MAX_DISPLAY_TIME:
		# 	pass

		# If the event is a click:
		if is_click(event):
			if finished:
				break
			# check if each of our images was clicked
			whom = who_was_clicked(dos)
					
			if whom is img:  ## which isn't the button btw
				
				finished  = True
Example #11
0
def present_trial(images, targetidx):
	
	
	assert len(images) == 4, "*** ERROR! DID NOT SUPPLY 4 IMAGES: " + str(images)
	
	img = [None] * 4
	
	## set the image locations
	## Images here are commandable sprites, so we can tell them what to do using Q below
	img[0] = CommandableImageSprite( screen, OFF_LEFT, images[0], scale=IMAGE_SCALE)
	img[1] = CommandableImageSprite( screen, spot.middleq2, images[1], scale=IMAGE_SCALE)
	img[2] = CommandableImageSprite( screen, spot.topq3, images[2], scale=IMAGE_SCALE)
	img[3] = CommandableImageSprite( screen, spot.middleq3, images[3], scale=IMAGE_SCALE)
	correct = img[targetidx]
	# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
	# Set up the updates, etc. 
	
	# A queue of animation operations
	Q = DisplayQueue()
	
	# Draw a single animation in if you want!
	Q.append(obj=img[0], action='move', pos=spot.topq2, duration=3.0)
	
	# What order do we draw sprites and things in?
	dos = OrderedUpdates(*img) # Draw and update in this order
	
	start_time = time()
	
	## The standard event loop in kelpy -- this loops infinitely to process interactions
	## and throws events depending on what the user does
	for event in kelpy_standard_event_loop(screen, Q, dos):
		
		# If the event is a click:
		if is_click(event):
			
			# check if each of our images was clicked
			whom = who_was_clicked(dos)
					
			if whom is correct: 
				play_sound(kstimulus('sounds/Beep2.wav'))
				Q.append(obj=whom, action='move', pos=(screen.get_width()/2, screen.get_height()/2), duration=1.0)
				return (time()-start_time)
			if whom is not correct:
				play_sound(kstimulus('sounds/Error.wav'))
Example #12
0
def present_trial(imagepath):
    """
	This is the main function used to run this demo. It is fed an imagepath and uses this to create a CommandableImageSprite offscreen. This Sprite is later moved onto the screen, where it hangs out until it is clicked.

	"""
    ## Images here are commandable sprites, so we can tell them what to do using Q below
    img = CommandableImageSprite(screen,
                                 OFF_LEFT,
                                 imagepath,
                                 scale=IMAGE_SCALE)

    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    # Set up the updates, etc.

    # A queue of animation operations
    Q = DisplayQueue()

    # Draw a single animation in if you want!
    Q.append(obj=img, action='wait', duration=1.0)
    Q.append(obj=img, action='move', pos=spot.middle, duration=0.0)

    # What order do we draw sprites and things in?
    dos = OrderedUpdates(img)  # Draw and update in this order

    start_time = time()

    ## The standard event loop in kelpy -- this loops infinitely to process interactions
    ## and throws events depending on what the user does
    for event in kelpy_standard_event_loop(screen,
                                           Q,
                                           dos,
                                           throw_null_events=True):

        if (time() - start_time > MAX_DISPLAY_TIME):
            break

        # If the event is a click:
        if is_click(event):
            break
Example #13
0
def present_no_choice_double(images, rightid, wrongid, order):
	
	guys = [None ,rightid, wrongid]
	img = [None] * 3
	totalclicks = -1
	## set the image locations
	## Images here are commandable sprites, so we can tell them what to do using Q below
	img[0] = CommandableImageSprite( screen, spot.center, button_image, scale=.5)
	img[1] = CommandableImageSprite( screen, double_displayat[0], images[rightid], scale=QUAD_IMAGE_SCALE)
	img[2] = CommandableImageSprite( screen, double_displayat[1] , images[wrongid], scale=QUAD_IMAGE_SCALE)
	
	
	# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
	# Set up the updates, etc. 
	def guytonumber(person):  ##should probably be a switch
			#print person
			if person == rightid:
				return 1
			elif person == wrongid:
				return 2
			else:
				print "error, something went super wrong"

	# A queue of animation operations
	Q = DisplayQueue()
	
	# Draw a single animation in if you want!
	
	output_string = str( rightid ) + ";" + str(wrongid) + ',{' 
	# What order do we draw sprites and things in?
	dos = OrderedUpdates(*img) # Draw and update in this order
	
	start_time = time()
	#play_sound(kstimulus("sounds/good_job.wav"))  ## This should be changed to play the proper intro sound for the character. right now it just, quite annoyingly, says "Good job!"
	finished = False
	clicked = [0] * 3
	## The standard event loop in kelpy -- this loops infinitely to process interactions
	## and throws events depending on what the user does
	for event in kelpy_standard_event_loop(screen, Q, dos):
		
		# if time()-start_time > MAX_DISPLAY_TIME:
		# 	pass # could make a limit if you wanted

		# If the event is a click:
		if is_click(event) and not Q.commands: 
			if finished: ## If this is the second click, move on to the next thing!
				return output_string
			# check if each of our images was clicked
			whom = who_was_clicked( dos )
			
			if whom is img[0]:  ## which is the button btw
				#print "BUTTON PRESS: " + str(time() - start_time) , 
				totalclicks = totalclicks+1
				if clicked[1] >CLICKED_TIMES and clicked[2]>CLICKED_TIMES :
					pass
				else:
					
					index = guytonumber(order[totalclicks]) ##convert that index from the main list to the internal index.
					output_string += str( format( order[totalclicks], "x" ) ) 
					clicked[index] = clicked[index] + 1
					if clicked[index] == 1:
						Q.append(obj='sound', file=(target_audio1[guys[index]]) )
					elif clicked[index] == 2:
						Q.append(obj='sound', file=(target_audio2[guys[index]]) )
					elif clicked[index] == 3:
						Q.append(obj='sound', file=(target_audio3[guys[index]]) )
					elif clicked[index] == 4:
						Q.append(obj='sound', file=(target_audio1[guys[index]]) )
					elif clicked[index] == 5:
						Q.append(obj='sound', file=(target_audio2[guys[index]]) )
					elif clicked[index] == 6:
						Q.append(obj='sound', file=(target_audio3[guys[index]]) )
					else:
						pass
					#Q.append(obj=img[1], action='swapblink', position=(1000,400), image=target_images[targetidx], period=.5, duration=0, rotation=0, scale=IMAGE_SCALE, brightness=1.0 )
					
					Q.append(obj=img[index], action="scale", amount=1.5, duration=1.0)  ##append simultaneous doesn't work : (
					Q.append(obj=img[index], action="scale", amount=(1/1.5), duration=1.0)
					if clicked[index] == CLICKED_TIMES:
						clicked[index] = clicked[index]+1
						Q.append(obj=img[index], action='swapblink', position=(1000,400), image=target_images_gray[guys[index]], period=.5, duration=0, rotation=0, scale=QUAD_IMAGE_SCALE, brightness=1.0 )
						Q.append(obj='sound', file=kstimulus('sounds/Cheek-Pop.wav'))
						if clicked[1] >CLICKED_TIMES and clicked[2]>CLICKED_TIMES :
							finished = True
							output_string +=  "},"
							output_string += str ( double_displayat[0] ).replace(",",";") + str ( double_displayat[1] ).replace(",",";")
Example #14
0
def present_trial(objects, probabilities, trial,i, writer):
    start_time = time()
    in_right = False
    in_left = False
    left_box =  TobiiSprite( screen, spot.c1, kstimulus("misc/box.png"), tobii_controller, scale=BOX_SCALE)
    left_boxfront = TobiiSprite(screen, spot.c1, kstimulus("misc/boxfront.png"), tobii_controller,scale=BOX_SCALE)
    left_lid =  TobiiSprite( screen, LID_SPOT1, kstimulus("misc/lid.png"), tobii_controller,scale=BOX_SCALE)
    right_box = TobiiSprite(screen, spot.c4, kstimulus("misc/box.png"),tobii_controller, scale=BOX_SCALE)
    right_boxfront = TobiiSprite(screen, spot.c4, kstimulus("misc/boxfront.png"), tobii_controller,scale=BOX_SCALE)
    right_lid = TobiiSprite(screen, LID_SPOT2, kstimulus("misc/lid.png"),tobii_controller, scale=BOX_SCALE)
    left_object = TobiiSprite( screen, spot.c1, objects[0],tobii_controller, scale=IMAGE_SCALE)
    right_object = TobiiSprite(screen, spot.c4, objects[1],tobii_controller, scale=IMAGE_SCALE)


    #the boxes keep opening every when_open seconds, and object appearance is stochastic

    # A queue of animation operations
    Q = DisplayQueue()

    Q.append(obj=left_lid, action='wait', duration=1)
    Q.append(obj=right_lid, action='wait', duration=1)
    Q.append_simultaneous(obj=left_lid, action = 'move', pos=left_lid_MOVE, duration=0.25)
    Q.append_simultaneous(obj=right_lid, action='move', pos=right_lid_MOVE, duration=0.25)

    #with certain probability, reveal object:
    flip1 = random.random()
    if  flip1 < probabilities[0]:
        Q.append_simultaneous(obj=left_object, action='move', pos=spot.b1, duration=.5)
        in_left = True

    #with other probability, reveal object
    flip2 = random.random()
    
    if flip2 < probabilities[1]:
        Q.append_simultaneous(obj=right_object, action='move', pos=spot.b4, duration=.5)
        in_right = True

    Q.append(obj=left_object, action='wait', duration=.25)
    Q.append(obj=right_object, action='wait', duration=.25)

    Q.append(obj=left_lid, action='wait', duration=.25)
    Q.append(obj=right_lid, action='wait', duration=.25)

    Q.append_simultaneous(obj=left_object, action='move', pos=spot.c1, duration=.5)
    Q.append_simultaneous(obj=right_object, action='move', pos=spot.c4, duration=.5)
    Q.append_simultaneous(obj=left_lid, action='move', pos=LID_SPOT1,duration=.5)
    Q.append_simultaneous(obj=right_lid, action='move', pos=LID_SPOT2, duration=.5)

    Q.append(obj=left_object, action='move', pos=OFF_SOUTH, duration=0.0)
    Q.append(obj=right_object, action='move', pos=OFF_SOUTH, duration=0.0)

    # What order do we draw sprites and things in?
    dos = OrderedUpdates([left_box,left_object,left_boxfront, right_box,right_object,right_boxfront,left_lid,right_lid])  # Draw and update in this order

    #main ticker loop
    for event in kelpy_standard_event_loop(screen, Q, dos, throw_null_events=True):
    # output trial info to csv

        writer.writerow([subject, session, trial, i, start_time, time(), objects[0], probabilities[0], in_left, left_box.is_looked_at(), left_object.is_looked_at(), left_lid.is_looked_at(), objects[1],probabilities[1], in_right, right_box.is_looked_at(), right_object.is_looked_at(), right_lid.is_looked_at()])
        print file_header
        #print subject, session, trial, i, start_time, time(), objects[0], probabilities[0], in_left, left_box.is_looked_at(), left_object.is_looked_at(), left_lid.is_looked_at(), objects[1],probabilities[1], in_right, right_box.is_looked_at(), right_object.is_looked_at(), right_lid.is_looked_at()
        if (time() - start_time > MAX_DISPLAY_TIME):
            break

        # If the event is a click:
        #if is_click(event):
         #   break
		
    # need to do a check for exiting here
        if event.type == KEYDOWN:
            if event.key == K_ESCAPE:
                print("escaping now")
                quit()
                # make sure to close the data file when exiting, otherwise it'll hang
                if not use_tobii_sim:
                    tobii_controller.stop_tracking()
                    tobii_controller.close_data_file()
                    tobii_controller.destroy()
Example #15
0
def present_trial(chosen_for_sequence,window_positions,images,the_blob):
	#to calculate proportion of looks
	w0,w1,w2,w3=0,0,0,0
	w0_counter = 0
	item_start=time()
	#in order to pause the experiment
	global paused
	if not paused:

		#haven't looked away yet, keep running
		started_lookaway = False

		#if they lookaway for _ seconds
		lookaway_for_time =2.0

		if not use_tobii_sim:
		# start recording the "eye gaze" data
			tobii_controller.start_tracking()

		# A queue of animation operations
		trial_start = time()
		#create the windows as TobiiSprites so they are eye-trackable

		w0 = TobiiSprite(screen, window_positions[0], kstimulus("laura_diss/window.jpg"), scale=.35,tobii_controller=tobii_controller)
		w1 = TobiiSprite(screen, window_positions[1], kstimulus("laura_diss/window.jpg"), scale=.35,tobii_controller=tobii_controller)
		w2 = TobiiSprite(screen, window_positions[2], kstimulus("laura_diss/window.jpg"), scale=.35,tobii_controller=tobii_controller)
		w3 = TobiiSprite(screen, window_positions[3], kstimulus("laura_diss/window.jpg"), scale=.35,tobii_controller=tobii_controller)
		# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #

		images = [
		CommandableImageSprite(screen, OFF_LEFT, images[0], scale=IMAGE_SCALE, name="0"),
		CommandableImageSprite(screen, OFF_LEFT, images[1], scale=IMAGE_SCALE,name="1"),
		CommandableImageSprite(screen, OFF_LEFT, images[2], scale=IMAGE_SCALE,name="2"),
		CommandableImageSprite(screen, OFF_LEFT, images[3], scale=IMAGE_SCALE,name="3")]

		img = CommandableImageSprite(screen, OFF_LEFT,chosen_for_sequence[0][0] , scale=IMAGE_SCALE)

		#create the display Queue and queue up all the images
		Q = DisplayQueue()

		for value in chosen_for_sequence:


			#img = CommandableImageSprite(screen, OFF_LEFT, value[0], scale=IMAGE_SCALE)
			position = value[1]
			#the animation
			Q.append(obj=images[value[2]], action='wait', duration=1.0)
			Q.append(obj=images[value[2]], action='move', pos=position, duration=0.0)
			Q.append(obj=images[value[2]],action='circlescale',duration=1, amount=.08, period=1)
			Q.append(obj=images[value[2]],action='move',pos=OFF_LEFT,duration=0.0)

		dos = OrderedUpdates(the_blob,w0,w1,w2,w3,images)
		# What order do we draw sprites and things in?
		# Draw and update in this order



		for event in kelpy_standard_event_loop(screen,Q, dos, throw_null_events=True):
			#update
			pygame.display.flip()
			#record the active box
			print >> open(matched,'a'),[time()-exp_start,Q.get_obj().get_name()]

			#tobii_controller.record_event(Q.get_obj().get_name())


			#can stop the experiment on a click event
			if is_click(event):
				if not use_tobii_sim:
					tobii_controller.stop_tracking()
					tobii_controller.close_data_file()
					tobii_controller.destroy()
					quit()
				quit()

				if event.type == pygame.QUIT:
					pygame.quit()
					return

			#item animation time has run out, move on
			if time()-item_start >len(chosen_images_and_quadrants)*2:
				break

			#the kind of complicated lookaway stuff
			if not the_blob.is_looked_at():
				if not started_lookaway:
					print "you started looking away"
					#start tracking how long they are looking away for
					started_lookaway_time = time()
					started_lookaway = True
					print time()-started_lookaway_time

				#if they have looked away for longer than lookaway_for_time, it "counts" as a lookaway
				if (time() - started_lookaway_time > lookaway_for_time):
					print "HEY YOURE NOT LOOKING"
					break
			#if they look back at the blob, restart the lookaway clock
			elif the_blob.is_looked_at():
				started_lookaway=False

			if is_space_pressed():
				paused=True




		#can unpause by pressing spacebar
		while paused:
			for event in pygame.event.get():
				if event.type==KEYUP:
					if event.key==K_SPACE:
						paused = False


	else:
		#COVERTASK! (will run when sequence == 5)
		cover_task()
Example #16
0
#import kelpy.ScreenVideoRecorder

##############################################
## Set up pygame

screen = initialize_kelpy(fullscreen=False)

WINDOW_WIDTH, WINDOW_HEIGHT = screen.get_size()
CENTER_STAGE = (WINDOW_WIDTH / 2, WINDOW_HEIGHT / 2)

probe = CommandableImageSprite(screen,
                               CENTER_STAGE,
                               kstimulus("cars/beetle.png"),
                               scale=0.5)
fixation = CommandableImageSprite(screen,
                                  CENTER_STAGE,
                                  kstimulus("fixation/blue_cross.png"),
                                  scale=0.3)
ou = OrderedUpdates(probe, fixation)

Q = DisplayQueue()
Q.append(obj=probe, action='wait', duration=2)
Q.append(obj=probe, action='darken', amount=0.05, duration=3.0)
Q.append(obj=probe, action='wait', duration=3)
Q.append(obj=probe, action='wait', duration=5)
Q.append(obj='throw_event',
         event=pygame.event.Event(EXIT_KELPY_STANDARD_EVENT_LOOP))

for event in kely_standard_event_loop(screen, Q, ou):
    pass
Example #17
0
def present_trial(car_paths, animal_paths):
    """
		This is our main method that we use to run this trial. It accepts two string arrays as it's parameters.
		car_paths 
		animal_paths 
		
		These arrays are shuffled and one image from each of them is randomly picked to be the stimuli for the trial.
		This function makes use of some kelpy classes to display the images and then handle user interaction.
		
	"""
    ## First we set up the elements of the trial.
    ## The images we will be using are set up like so...

    ## We pick two random images from the list and assign them to the variables pick1 and pick2.

    pick1 = randint(0, len(car_paths) - 1)
    pick2 = randint(0, len(animal_paths) - 1)

    ## Later this function will create two CommandableImageSprite objects with those two images.
    ## We start by first making an empty array to hold the objects.
    img = [None] * 2

    ## We shuffle out offscreen values and later assign one of them to each image.
    ## This will make them appear to come from random directions when they come on screen.
    shuffle(offscreen_positions)

    ## We then create our kelpy CommandableImageSprite objects.
    ## These are initialized by passing the object it's screen (to be diplayed on), the start position, the image path, and the scale of the object.
    img[0] = CommandableImageSprite(screen,
                                    offscreen_positions[0],
                                    car_paths[pick1],
                                    scale=IMAGE_SCALE)
    img[1] = CommandableImageSprite(screen,
                                    offscreen_positions[1],
                                    animal_paths[pick2],
                                    scale=IMAGE_SCALE)

    ## We then designate which image is going to be the correct one, and store a reference to that object.
    ## in this case the image assigned to img[1] is always drawn from our pool of animal images.
    correct = img[1]

    ## This line sets up the display queue (from the kelpy class DisplayQueue(). Think of this as our list of things to happen.
    Q = DisplayQueue()

    #### These next lines are a script of what is to happen in the experiment.

    ## We move the two objects in from their start positions offscreen.
    ## They are moved to a shuffled position from the onscreen_positions array.
    ## Note that we first shuffle the array to randomize the positions.
    shuffle(onscreen_positions)
    start_time = time()
    Q.append(obj=img[0],
             action='move',
             pos=onscreen_positions[0],
             duration=1.5)
    Q.append(obj=img[1],
             action='move',
             pos=onscreen_positions[1],
             duration=1.5)

    # We store the order that we will draw and update things in this variable 'dos'
    dos = OrderedUpdates(*img)

    ## And we take a note of the time that the trial starts with this line.
    ## Calling the time() method from the python time library.

    #####################################################
    ##These next few lines are used to print all of our info in a nice and orderly fashion.
    ##the python str method rsplit is used to seperate the filename from the rest of the filepath.
    ## we simply match everything 1 backslash from the end, it returns both items in a list.
    ## we want the last item, so we ask for item 1 from the list. It's repeated for the car and animal images.
    car_used = filename(car_images[pick1])
    animal_used = filename(animal_images[pick2])

    ###############
    ## Then we determine whether the onscreen position for the car was left or right. We deduce that the animal was the opposite.
    ## These variables are used later to print the on screen position.
    if onscreen_positions[0] is ON_LEFT:
        car_position = 'LEFT'
        animal_position = 'RIGHT'
    else:
        car_position = 'RIGHT'
        animal_position = 'LEFT'

    ## The standard event loop in kelpy -- this loops infinitely to process interactions
    ## and throws events depending on what the user does
    for event in kelpy_standard_event_loop(screen, Q, dos):

        # If the event is a click:
        if is_click(event):

            ## check if each of our images was clicked.
            ## (we use the function wasClicked from the kelpy EventHandler library to return which item was clicked.)
            who = who_was_clicked(dos)
            trial_time = time() - start_time
            if who is correct:
                ## Print whether the correct item was clicked, which car was used, it's position, which animal was used and it's position, and how long the trial took in seconds.
                play_sound(sound_yup_path, wait=True, volume=7.0)
                return True, car_used, car_position, animal_used, animal_position, trial_time
            else:
                ## otherwise print the fail sound and print false.
                play_sound(sound_nope_path, wait=True, volume=7.0)
                return False, car_used, car_position, animal_used, animal_position, trial_time
            break
Example #18
0
def present_trial(car_paths, animal_paths):
	"""
		This is our main method that we use to run this trial. It accepts two string arrays as it's parameters.
		car_paths 
		animal_paths 
		
		These arrays are shuffled and one image from each of them is randomly picked to be the stimuli for the trial.
		This function makes use of some kelpy classes to display the images and then handle user interaction.
		
	"""
	## First we set up the elements of the trial.
	## The images we will be using are set up like so...
	
	## We pick two random images from the list and assign them to the variables pick1 and pick2.

	pick1 = randint(0, len(car_paths)-1)
	pick2 = randint(0, len(animal_paths)-1)

	## Later this function will create two CommandableImageSprite objects with those two images.
	## We start by first making an empty array to hold the objects.
	img = [None] * 2

	## We shuffle out offscreen values and later assign one of them to each image.
	## This will make them appear to come from random directions when they come on screen.
	shuffle(offscreen_positions)

	## We then create our TobiiSprite objects.
	## These are initialized the same way as CommandableImageSprites, but must also have the TobiiController as an argument and has an optional argument of "is_following" (defaults to False). In this example, since we don't want the images to move with eye gaze, we will leave is_following at its default value
	img[0] = TobiiSprite( screen, offscreen_positions[0], car_paths[pick1], tobii_controller, scale=IMAGE_SCALE)
	img[1] = TobiiSprite( screen, offscreen_positions[1], animal_paths[pick2], tobii_controller, scale=IMAGE_SCALE)

	## We then designate which image is going to be the correct one, and store a reference to that object.
	## in this case the image assigned to img[1] is always drawn from our pool of animal images.
	correct = img[1]

	## This line sets up the display queue (from the kelpy class DisplayQueue(). Think of this as our list of things to happen.
	Q = DisplayQueue()

	#### These next lines are a script of what is to happen in the experiment.

	## We move the two objects in from their start positions offscreen.
	## They are moved to a shuffled position from the onscreen_positions array.
	## Note that we first shuffle the array to randomize the positions.
	shuffle(onscreen_positions)

	Q.append(obj=img[0], action='move', pos= onscreen_positions[0], duration=0.5)
	Q.append(obj=img[1], action='move', pos= onscreen_positions[1], duration=0.5)
	
	# We store the order that we will draw and update things in this variable 'dos'
	dos = OrderedUpdates(*img) 

	## And we take a note of the time that the trial starts with this line.
	## Calling the time() method from the python time library.
	start_time = time()
		
	#####################################################
	##These next few lines are used to print all of our info in a nice and orderly fashion.
	##the python str method rsplit is used to seperate the filename from the rest of the filepath.
	## we simply match everything 1 backslash from the end, it returns both items in a list.
	## we want the last item, so we ask for item 1 from the list. It's repeated for the car and animal images.
	car_used = filename(car_images[pick1])
	animal_used = filename(animal_images[pick2])
	
	###############
	## Then we determine whether the onscreen position for the car was left or right. We deduce that the animal was the opposite.
	## These variables are used later to print the on screen position.
	if  onscreen_positions[0] is spot.west:
		car_position = 'LEFT'
		animal_position = 'RIGHT'
	else:
		car_position = 'RIGHT'
		animal_position = 'LEFT'

	#start tracking
	 tobii_controller.start_tracking()	
	

	## The standard event loop in kelpy -- this loops infinitely to process interactions
	## and throws events depending on what the user does
	trial_time = 5.0
	for event in kelpy_standard_event_loop(screen, Q, dos, throw_null_events=True):
		
		if (time() - start_time > trial_time): 
			print "trial end"
			break

		#this is set specifically for the tobii controller; otherwise the program hangs
		#since the text file is not closed
		if event.type == QUIT:
			 tobii_controller.close_data_file()
			 tobii_controller.destroy()
			
	
	 tobii_controller.stop_tracking()
	
	chosen = None
	max_value = 0.0;
	proportions = looking_proportions(dos, trial_time) 
	print proportions
	
	for item, value in proportions.iteritems():
		if value > max_value:
			chosen = item
			max_value = value 

	if chosen is not None:
		print "chosen: ", filename(chosen.image_path), proportions[chosen]
	else:
		print "nothing chosen"
	
	if chosen is correct:
	## Print whether the correct item was clicked, which car was used, it's position, which animal was used and it's position, and how long the trial took in seconds.
		play_sound(sound_yup_path, wait=True, volume=5.0)
		return True, car_used, car_position, animal_used, animal_position, trial_time
	else:
	## otherwise print the fail sound and print false.
		play_sound(sound_nope_path, wait=True, volume=5.0)
		return False, car_used, car_position, animal_used, animal_position, trial_time
Example #19
0
def present_trial(car_paths, animal_paths):
	"""
		This is our main method that we use to run this trial. It accepts two string arrays as it's parameters.
		car_paths 
		animal_paths 
		
		These arrays are shuffled and one image from each of them is randomly picked to be the stimuli for the trial.
		This function makes use of some kelpy classes to display the images and then handle user interaction.
		
	"""
	## First we set up the elements of the trial.
	## The images we will be using are set up like so...
	
	## We pick two random images from the list and assign them to the variables pick1 and pick2.

	pick1 = randint(0, len(car_paths)-1)
	pick2 = randint(0, len(animal_paths)-1)

	## Later this function will create two CommandableImageSprite objects with those two images.
	## We start by first making an empty array to hold the objects.
	img = [None] * 2

	## We shuffle out offscreen values and later assign one of them to each image.
	## This will make them appear to come from random directions when they come on screen.
	shuffle(offscreen_positions)

	## We then create our kelpy CommandableImageSprite objects.
	## These are initialized by passing the object it's screen (to be diplayed on), the start position, the image path, and the scale of the object.
	img[0] = CommandableImageSprite( screen, offscreen_positions[0], car_paths[pick1], scale=IMAGE_SCALE)
	img[1] = CommandableImageSprite( screen, offscreen_positions[1], animal_paths[pick2], scale=IMAGE_SCALE)

	## We then designate which image is going to be the correct one, and store a reference to that object.
	## in this case the image assigned to img[1] is always drawn from our pool of animal images.
	correct = img[1]

	## This line sets up the display queue (from the kelpy class DisplayQueue(). Think of this as our list of things to happen.
	Q = DisplayQueue()

	#### These next lines are a script of what is to happen in the experiment.

	## We move the two objects in from their start positions offscreen.
	## They are moved to a shuffled position from the onscreen_positions array.
	## Note that we first shuffle the array to randomize the positions.
	shuffle(onscreen_positions)
	start_time = time()
	Q.append(obj=img[0], action='move', pos= onscreen_positions[0], duration=1.5)
	Q.append(obj=img[1], action='move', pos= onscreen_positions[1], duration=1.5)
	
	# We store the order that we will draw and update things in this variable 'dos'
	dos = OrderedUpdates(*img) 

	## And we take a note of the time that the trial starts with this line.
	## Calling the time() method from the python time library.

		
	#####################################################
	##These next few lines are used to print all of our info in a nice and orderly fashion.
	##the python str method rsplit is used to seperate the filename from the rest of the filepath.
	## we simply match everything 1 backslash from the end, it returns both items in a list.
	## we want the last item, so we ask for item 1 from the list. It's repeated for the car and animal images.
	car_used = filename(car_images[pick1])
	animal_used = filename(animal_images[pick2])
	
	###############
	## Then we determine whether the onscreen position for the car was left or right. We deduce that the animal was the opposite.
	## These variables are used later to print the on screen position.
	if  onscreen_positions[0] is ON_LEFT:
		car_position = 'LEFT'
		animal_position = 'RIGHT'
	else:
		car_position = 'RIGHT'
		animal_position = 'LEFT'
	
	
	

	## The standard event loop in kelpy -- this loops infinitely to process interactions
	## and throws events depending on what the user does
	for event in kelpy_standard_event_loop(screen, Q, dos):
		
		# If the event is a click:
		if is_click(event):
			
			## check if each of our images was clicked.
			## (we use the function wasClicked from the kelpy EventHandler library to return which item was clicked.)
			who = who_was_clicked(dos)
			trial_time = time() - start_time
			if who is correct:
			## Print whether the correct item was clicked, which car was used, it's position, which animal was used and it's position, and how long the trial took in seconds.
				play_sound(sound_yup_path, wait=True, volume=7.0)
				return True, car_used, car_position, animal_used, animal_position, trial_time
			else:
			## otherwise print the fail sound and print false.
				play_sound(sound_nope_path, wait=True, volume=7.0)
				return False, car_used, car_position, animal_used, animal_position, trial_time
			break
Example #20
0
def present_choice_single(images, targetidx):
	
	img = [None] * 2
	
	## set the image locations
	## Images here are commandable sprites, so we can tell them what to do using Q below
	img[0] = CommandableImageSprite( screen, spot.center, button_image, scale=.5, brightness=.5)
	img[1] = CommandableImageSprite( screen, double_displayat[1], images[targetidx[0]], scale=QUAD_IMAGE_SCALE)
	# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
	# Set up the updates, etc. 
	
	# A queue of animation operations
	Q = DisplayQueue()
	
	timesclicked = 0  ##keep track of how many times things have been clicked! this one is just a single int

	# What order do we draw sprites and things in?
	dos = OrderedUpdates(*img) # Draw and update in this order
	
	start_time = time()  ## start logging the start time. start.
	
	## The standard event loop in kelpy -- this loops infinitely to process interactions
	## and throws events depending on what the user does
	finished = False
	
	for event in kelpy_standard_event_loop(screen, Q, dos):
		
		if timesclicked == CLICK_LIMIT:
			timesclicked = timesclicked+1
			Q.append(obj=img[1], action='swapblink', position=(1000,400), image=target_images_gray[targetidx[0]], period=.5, duration=0, rotation=0, scale=QUAD_IMAGE_SCALE, brightness=1.0 )
			Q.append(obj='sound', file=kstimulus('sounds/Cheek-Pop.wav'))

		

		# If the event is a click:
		if is_click(event) and not Q.commands:
			
				
			# check if each of our images was clicked
			whom = who_was_clicked(dos)
			

			if whom is img[1]:  ## which is the button btw
				if timesclicked > CLICK_LIMIT:
					return "single,"+ str(targetidx[0]) +',' + str(double_displayat[1]).replace(",",";")
				else:
					timesclicked = timesclicked + 1
					if timesclicked == 1:
						Q.append(obj='sound', file=target_audio[targetidx[0]]) 
					elif timesclicked == 2:
						Q.append(obj='sound', file=target_audio2[targetidx[0]])
					elif timesclicked == 3:
						Q.append(obj='sound', file=target_audio3[targetidx[0]])
					elif timesclicked == 4:
						Q.append(obj='sound', file=target_audio[targetidx[0]]) 
					elif timesclicked == 5:
						Q.append(obj='sound', file=target_audio2[targetidx[0]])
					elif timesclicked == 6:
						Q.append(obj='sound', file=target_audio3[targetidx[0]])
					else:
						pass
					
					Q.append(obj=img[1], action="scale", amount=1.5, duration=1.0)  ##append simultaneous doesn't work : (
					Q.append(obj=img[1], action="scale", amount=(1/1.5), duration=1.0)
Example #21
0
def present_choice_octuple(images, rightid, wrong1, wrong2, wrong3, wrong4, wrong5, wrong6, wrong7):
	

	img = [None] * 9
	
	#!# Maybe call "guys" "image_locations"
	
	guys = [None, rightid, wrong1, wrong2, wrong3, wrong4, wrong5, wrong6, wrong7]
	## set the image locations
	## Images here are commandable sprites, they are displayed in a shuffled order on the screen, but are logged in this order.
	img[0] = CommandableImageSprite( screen, spot.center, button_image, scale=.5, brightness=.5)
	 
	#!# Can rename img[0] to "button" and write the below in a loop
	
	img[1] = CommandableImageSprite( screen, octuple_displayat[0], images[rightid], scale=QUAD_IMAGE_SCALE )
	img[2] = CommandableImageSprite( screen, octuple_displayat[1] , images[wrong1], scale=QUAD_IMAGE_SCALE )
	img[3] = CommandableImageSprite( screen, octuple_displayat[2], images[wrong2], scale=QUAD_IMAGE_SCALE )
	img[4] = CommandableImageSprite( screen, octuple_displayat[3], images[wrong3], scale=QUAD_IMAGE_SCALE )
	img[5] = CommandableImageSprite( screen, octuple_displayat[4], images[wrong4], scale=QUAD_IMAGE_SCALE )
	img[6] = CommandableImageSprite( screen, octuple_displayat[5] , images[wrong5], scale=QUAD_IMAGE_SCALE )
	img[7] = CommandableImageSprite( screen, octuple_displayat[6], images[wrong6], scale=QUAD_IMAGE_SCALE )
	img[8] = CommandableImageSprite( screen, octuple_displayat[7], images[wrong7], scale=QUAD_IMAGE_SCALE )


	outputString= "oct," + str(rightid) + ";" + str(wrong1) + ";" + str(wrong2) + ";" + str(wrong3) +  ";" + str(wrong4) + ";" + str(wrong5) + ";" + str(wrong6) + ";" + str(wrong7) +  ",("

	# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
	# Set up the updates, etc. 
	
	# A queue of animation operations
	Q = DisplayQueue()
	
	# Draw a single animation in if you want!
	
	

	# What order do we draw sprites and things in?
	dos = OrderedUpdates(*img) # Draw and update in this order
	
	start_time = time()
	#Q.append(obj='sound', file=kstimulus("sounds/good_job.wav"))  ## This should be changed to play the proper intro sound for the character. right now it just, quite annoyingly, says "Good job!"
	finished = False
	clicked = [0] * 9

 	## The standard event loop in kelpy -- this loops infinitely to process interactions
	## and throws events depending on what the user does
	for event in kelpy_standard_event_loop(screen, Q, dos):
		

		# If the event is a click:
		if is_click(event) and not Q.commands:
			
			#!# if all([x > 3 for x in clicked]):
			if clicked[1] >CLICK_LIMIT and clicked[2] > CLICK_LIMIT and clicked[3] > CLICK_LIMIT and clicked[4] > CLICK_LIMIT and clicked[5] > CLICK_LIMIT and clicked[6] > CLICK_LIMIT and clicked[7] > CLICK_LIMIT and clicked[8] > CLICK_LIMIT:
				return outputString + ")," + str( octuple_displayat[0]).replace(",",";")  + str( octuple_displayat[1] ).replace(",",";")  + str( octuple_displayat[2] ).replace(",",";") + str( octuple_displayat[3] ).replace(",",";") + str( octuple_displayat[4] ).replace(",",";") + str( octuple_displayat[5] ).replace(",",";") + str( octuple_displayat[6] ) + ";" + str( octuple_displayat[7] ).replace(",",";")
			# check if each of our images was clickedstr
			whom = who_was_clicked(dos)
			
			for i in range(1,9):
				if whom is img[i]:  ## which is the button btw
					if clicked[i] > CLICK_LIMIT:
						pass
					else:
						outputString = outputString+ str(format(guys[i], 'x'))
						clicked[i] = clicked[i] + 1
						if clicked[i] == 1:
							Q.append(obj='sound', file=(target_audio[guys[i]]) )
						elif clicked[i] == 2:
							Q.append(obj='sound', file=(target_audio2[guys[i]]) )
						elif clicked[i] == 3:
							Q.append(obj='sound', file=(target_audio3[guys[i]]) )
						elif clicked[i] == 4:
							Q.append(obj='sound', file=target_audio[guys[i]] ) 
						elif clicked[i] == 5:
							Q.append(obj='sound', file=target_audio2[guys[i]] )
						elif clicked[i] == 6:
							Q.append(obj='sound', file=target_audio3[guys[i]] )
						else:
							pass
						
						
						Q.append(obj=img[i], action="scale", amount=1.5, duration=1.0)  ##append simultaneous doesn't work : (
						Q.append(obj=img[i], action="scale", amount=(1/1.5), duration=1.0)
						if clicked[i] == CLICK_LIMIT:
							clicked[i] = clicked[i]+1
							Q.append(obj=img[i], action='swapblink', position=(1000,400), image=target_images_gray[guys[i]], period=.5, duration=0, rotation=0, scale=QUAD_IMAGE_SCALE, brightness=1.0 )
							Q.append(obj='sound', file=kstimulus('sounds/Cheek-Pop.wav'))
Example #22
0
def present_choice_double(images, rightid, wrongid):
	
	guys = [None, rightid, wrongid]
	img = [None] * 3
	
	## set the image locations
	## Images here are commandable sprites, so we can tell them what to do using Q below
	img[0] = CommandableImageSprite( screen, spot.center, button_image, scale=.5, brightness=.5)
	img[1] = CommandableImageSprite( screen, double_displayat[0], images[rightid], scale=QUAD_IMAGE_SCALE)
	img[2] = CommandableImageSprite( screen, double_displayat[1] , images[wrongid], scale=QUAD_IMAGE_SCALE)
	# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
	# Set up the updates, etc. 
	
	# A queue of animation operations
	Q = DisplayQueue()
	
	# Draw a single animation in if you want!
	
	

	# What order do we draw sprites and things in?
	dos = OrderedUpdates(*img) # Draw and update in this order
	
	start_time = time()
	#Q.append(obj='sound', file=kstimulus("sounds/good_job.wav"))  ## This should be changed to play the proper intro sound for the character. right now it just, quite annoyingly, says "Good job!"
	finished = False
	clicked = [0] * 3
	outputString= "double," + str(rightid) + ";" + str(wrongid) +  ",("
 	## The standard event loop in kelpy -- this loops infinitely to process interactions
	## and throws events depending on what the user does
	for event in kelpy_standard_event_loop(screen, Q, dos):
		

		# If the event is a click:
		if is_click(event) and not Q.commands:
			if clicked[1] > CLICK_LIMIT and clicked[2] > CLICK_LIMIT:
				return outputString + "),"  + str( double_displayat[0] ).replace(",",";")+ str(double_displayat[1]).replace(",",";") 
							# check if each of our images was clicked
			whom = who_was_clicked(dos)
			for i in range(1,3):
				if whom is img[i]:  ## which is the button btw
					if clicked[i] > CLICK_LIMIT:
						pass
					else:
						outputString = outputString+ str(format(guys[i], 'x'))
						clicked[i] = clicked[i] + 1
						if clicked[i] == 1:
							Q.append(obj='sound', file=(target_audio[guys[i]]) )
						elif clicked[i] == 2:
							Q.append(obj='sound', file=(target_audio2[guys[i]]) )
						elif clicked[i] == 3:
							Q.append(obj='sound', file=(target_audio3[guys[i]]) )
						elif clicked[i] == 4:
							Q.append(obj='sound', file=target_audio[guys[i]] ) 
						elif clicked[i] == 5:
							Q.append(obj='sound', file=target_audio2[guys[i]] )
						elif clicked[i] == 6:
							Q.append(obj='sound', file=target_audio3[guys[i]] )
						else:
							pass
						
						
						Q.append(obj=img[i], action="scale", amount=1.5, duration=1.0)  ##append simultaneous doesn't work : (
						Q.append(obj=img[i], action="scale", amount=(1/1.5), duration=1.0)
						if clicked[i] == CLICK_LIMIT:
							clicked[i] = clicked[i]+1
							Q.append(obj=img[i], action='swapblink', position=(1000,400), image=target_images_gray[guys[i]], period=.5, duration=0, rotation=0, scale=QUAD_IMAGE_SCALE, brightness=1.0 )
							Q.append(obj='sound', file=kstimulus('sounds/Cheek-Pop.wav'))
Example #23
0
def display_naming_scene( screen, images, seeds , sixteen_displayat, SCALE):
	
	shuffle( sixteen_displayat )
	faudio = os.path.dirname( __file__ )+"/stimuli/audio/find/"  ##This returns the filepath relative to this file. We're loading a bunch of things from the stimuli folder.

	find_audio =[
	faudio+"find_beppo.wav",
	faudio+"find_deela.wav",
	faudio+"find_finna.wav",
	faudio+"find_guffi.wav",
	faudio+"find_higoo.wav",
	faudio+"find_kogay.wav",
	faudio+"find_lahdo.wav",
	faudio+"find_mobi.wav",
	faudio+"find_nadoo.wav",
	faudio+"find_pavy.wav",
	faudio+"find_roozy.wav",
	faudio+"find_soma.wav",
	faudio+"find_tibble.wav",
	faudio+"find_vaylo.wav",
	faudio+"find_zefay.wav"
	]

	transparent_button = os.path.dirname( __file__ )+"/stimuli/transparent.png"
	img = [None] * 16


	img[0] = CommandableImageSprite( screen, (0,0), transparent_button, scale=1.0)
	img[1] = CommandableImageSprite( screen, sixteen_displayat[0], images[0], scale=SCALE )
	img[2] = CommandableImageSprite( screen, sixteen_displayat[1] , images[1], scale=SCALE )
	img[3] = CommandableImageSprite( screen, sixteen_displayat[2], images[2], scale=SCALE )
	img[4] = CommandableImageSprite( screen, sixteen_displayat[3], images[3], scale=SCALE )
	img[5] = CommandableImageSprite( screen, sixteen_displayat[4], images[4], scale=SCALE )
	img[6] = CommandableImageSprite( screen, sixteen_displayat[5] , images[5], scale=SCALE )
	img[7] = CommandableImageSprite( screen, sixteen_displayat[6], images[6], scale=SCALE )
	img[8] = CommandableImageSprite( screen, sixteen_displayat[7], images[7], scale=SCALE )
	img[9] = CommandableImageSprite( screen, sixteen_displayat[8], images[8], scale=SCALE )
	img[10] = CommandableImageSprite( screen, sixteen_displayat[9] , images[9], scale=SCALE )
	img[11] = CommandableImageSprite( screen, sixteen_displayat[10], images[10], scale=SCALE )
	img[12] = CommandableImageSprite( screen, sixteen_displayat[11], images[11], scale=SCALE )
	img[13] = CommandableImageSprite( screen, sixteen_displayat[12], images[12], scale=SCALE )
	img[14] = CommandableImageSprite( screen, sixteen_displayat[13] , images[13], scale=SCALE )
	img[15] = CommandableImageSprite( screen, sixteen_displayat[14], images[14], scale=SCALE )
	
	Q = DisplayQueue()
	

	dos = OrderedUpdates(img)
	double_click=0
	finished = False
	clicked = 0
	raynj = None
	pickthisone = None
	pickthisone = seeds[0]
	
	output_string = ""

	Q.append(obj='sound', file= find_audio[pickthisone])
	timer = time()
	#print pickthisone, seeds[0] , clicked
	#print 
	for event in kelpy_standard_event_loop(screen, Q, dos):
		
		if is_click(event) and not Q.commands:
			if finished:
				break
			whom = who_was_clicked(dos)
					
			if whom is img[0]:
				double_click += 1
				if double_click > 1:
					finished = True
					break;
			
			if whom is not img[0] and whom is not None:
				for i in range (0 , len(img)):
					if whom is img[i]:
						output_string += "{NS: " + str( seeds[clicked] ) + ',' + str( i-1 ) + "," + str(time() - timer ) + "} , "
				clicked += 1
				#print len(seeds)-1, clicked
				if  clicked > len( seeds )-1 :
					finished = True
					return output_string
				else:
					Q.append(obj='sound', file= find_audio[ seeds[clicked] ], wait=True )
					timer = time()

					pickthisone = clicked
Example #24
0
def present_no_choice_single(images, targetidx):
	
	img = [None] * 2
	
	## set the image locations
	## Images here are commandable sprites, so we can tell them what to do using Q below
	img[0] = CommandableImageSprite( screen, spot.center, button_image, scale=.5)
	img[1] = CommandableImageSprite( screen, double_displayat[1], images[targetidx], scale=QUAD_IMAGE_SCALE)
	# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
	# Set up the updates, etc. 
	output_string = str( targetidx ) +',' + "{SINGLE}" +','+ str( spot.center ).replace(",", ";")
	# A queue of animation operations
	Q = DisplayQueue()
	
	# Draw a single animation in if you want!
	

	# What order do we draw sprites and things in?
	dos = OrderedUpdates(*img) # Draw and update in this order
	
	start_time = time()
	#play_sound(kstimulus("sounds/good_job.wav"))  ## This should be changed to play the proper intro sound for the character. right now it just, quite annoyingly, says "Good job!"
	## The standard event loop in kelpy -- this loops infinitely to process interactions
	## and throws events depending on what the user does
	finished = False
	clicked = [0] * 2

	##print "(Single," + 
	for event in kelpy_standard_event_loop(screen, Q, dos):
		
		# if time()-start_time > MAX_DISPLAY_TIME:
		# 	pass

		# If the event is a click:
		if is_click(event) and not Q.commands:
			if finished:
				return output_string

			# check if each of our images was clicked
			whom = who_was_clicked(dos)
					
			if whom is img[0]:  ## which isn't the button btw
				if clicked[1] > CLICKED_TIMES:
					pass
				else:
					clicked[1] = clicked[1] + 1
					if clicked[1] == 1:
						Q.append(obj='sound', file=(target_audio1[targetidx]) )
					elif clicked[1] == 2:
						Q.append(obj='sound', file=(target_audio2[targetidx]) )
					elif clicked[1] == 3:
						Q.append(obj='sound', file=(target_audio3[targetidx]) )
					elif clicked[1] == 4:
						Q.append(obj='sound', file=(target_audio1[targetidx]) )
					elif clicked[1] == 5:
						Q.append(obj='sound', file=(target_audio2[targetidx]) )
					elif clicked[1] == 6:
						Q.append(obj='sound', file=(target_audio3[targetidx]) )
					
					else:
						pass
					
					#Q.append(obj=img[1], action='swapblink', position=(1000,400), image=target_images[targetidx], period=.5, duration=0, rotation=0, scale=IMAGE_SCALE, brightness=1.0 )
					
					Q.append(obj=img[1], action="scale", amount=1.5, duration=1.0)  ##append simultaneous doesn't work : (
					Q.append(obj=img[1], action="scale", amount=(1/1.5), duration=1.0)
					if clicked[1] == CLICKED_TIMES:
						clicked[1] = clicked[1]+1
						Q.append(obj=img[1], action='swapblink', position=(1000,400), image=target_images_gray[targetidx], period=.5, duration=0, rotation=0, scale=QUAD_IMAGE_SCALE, brightness=1.0 )
						Q.append(obj='sound', file=kstimulus('sounds/Cheek-Pop.wav'))
						finished = True
Example #25
0
def present_trial(image1, image2, image3, theblicket):
    """
		This is the main function used to run this demo. It is fed an imagepath and uses this to create a CommandableImageSprite offscreen. This Sprite is later moved onto the screen, where it hangs out until it is clicked.

	"""
    #####
    ## First we create all of our objects, a bunch of DragSprites and a DropSprite.

    ## one of our sprites has been designated as the Blicket, it will always be the last filepath passed into the function.
    ## This should make it easier when I (or you) implement a csv reader to input the stimuli later.

    ## The positions are randomized before the trial starts, so they are not always in the same place.
    ## NOTE: Right now, this version does not have a demonstration of the blicket.
    thing1 = DragSprite(screen, display_spots[0], image1, scale=IMAGE_SCALE)
    thing2 = DragSprite(screen, display_spots[1], image2, scale=IMAGE_SCALE)
    thing3 = DragSprite(screen, display_spots[2], image3, scale=IMAGE_SCALE)
    blicket = DragSprite(screen,
                         display_spots[3],
                         theblicket,
                         scale=IMAGE_SCALE)
    blicket_detector = DropSprite(screen,
                                  BLICKET_DETECTOR_POSITION,
                                  blicketd_image_path,
                                  scale=.5)

    ## then we stick those in a list so we can add them to the ordered update list and have them appear on the screen.
    things = [thing1, thing2, thing3, blicket, blicket_detector]
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    # Set up the updates, etc.
    # A queue of animation operations
    Q = DisplayQueue()

    ############################
    ## Register the drop zone in each sprite's list of Drop Zones.
    ###	If the sprite is dropped onto this zone, it will send out a ZONE_EVENT into the event loop. We can then pick this event up using some handy functions in the EventHandler class.
    for i in xrange(4):
        things[i].register_drop_zone(blicket_detector)

    # What order do we draw sprites and things in?
    dos = OrderedUpdates(*things)  # Draw and update in this order
    start_time = time()

    ##These functions are used to blink the blicket detector on or off, two different versions depending on whether the subject gets the right object or not.

    def blink_detector_off():
        Q.append(obj=blicket_detector,
                 action='swap',
                 image=kstimulus('feature_tvs/screen_inactive.png'),
                 start_time=0,
                 rotation=0,
                 scale=.5,
                 brightness=1.0)

    def blink_detector_right():
        Q.append(obj=blicket_detector,
                 action='swap',
                 image=kstimulus('feature_tvs/screen_active_star.png'),
                 rotation=0,
                 scale=.499999999,
                 brightness=1.0)
        Q.append(obj=blicket_detector, action='wait', duration=1.0)
        blink_detector_off()

    def blink_detector_wrong():
        Q.append(obj=blicket_detector,
                 action='swap',
                 image=kstimulus('feature_tvs/screen_active.png'),
                 rotation=0,
                 scale=.5,
                 brightness=1.0)
        Q.append(obj=blicket_detector, action='wait', duration=.4)
        blink_detector_off()

    ## The standard event loop in kelpy -- this loops infinitely to process interactions
    ## and throws events depending on what the user does
    for event in kelpy_standard_event_loop(screen,
                                           Q,
                                           dos,
                                           throw_null_events=True):

        ## To make the DragSprites dragable, we use a loop to check over all the dragable items in the things list.
        for i in xrange(4):
            ## all the is required to run the process_dragndrop function during each eventloop cycle.
            things[i].process_dragndrop(event)

        ######
        ## Then we use the next two functions to check if a Zone_Event signals that we have dropped onto a drop zone (so something would need to happen!)
        ##
        if was_dropped_into_zone(event):
            #########
            ## Check who was dropped, whether it was the thing we wanted (which it undoubtedly will be in this example...)
            who = who_was_dropped(event)
            if who is blicket:
                ## Then play a sound! Huzzah!
                play_sound(kstimulus('sounds/Bing.wav'), wait=False)
                blink_detector_right()
                print True, time() - start_time, filename(theblicket)
            else:
                ## You have failed to detect the blicket, therefore you get a red blinky light and a buzzer noise.
                blink_detector_wrong()
                play_sound(kstimulus('sounds/Bad_Pick.wav'), wait=False)
                print False, time() - start_time, filename(who.image_path)
Example #26
0
def present_no_choice_octuple(images, rightid, wrong1, wrong2, wrong3, wrong4, wrong5, wrong6, wrong7, order):
	
	guys = [None ,rightid, wrong1, wrong2, wrong3, wrong4, wrong5, wrong6, wrong7]
	totalclicks = -1
	img = [None] * 9
	
	## set the image locations
	## Images here are commandable sprites, so we can tell them what to do using Q below
	img[0] = CommandableImageSprite( screen, spot.center, button_image, scale=.5)
	img[1] = CommandableImageSprite( screen, octuple_displayat[0], images[rightid], scale=QUAD_IMAGE_SCALE )
	img[2] = CommandableImageSprite( screen, octuple_displayat[1] , images[wrong1], scale=QUAD_IMAGE_SCALE )
	img[3] = CommandableImageSprite( screen, octuple_displayat[2], images[wrong2], scale=QUAD_IMAGE_SCALE )
	img[4] = CommandableImageSprite( screen, octuple_displayat[3], images[wrong3], scale=QUAD_IMAGE_SCALE )
	img[5] = CommandableImageSprite( screen, octuple_displayat[4], images[wrong4], scale=QUAD_IMAGE_SCALE )
	img[6] = CommandableImageSprite( screen, octuple_displayat[5] , images[wrong5], scale=QUAD_IMAGE_SCALE )
	img[7] = CommandableImageSprite( screen, octuple_displayat[6], images[wrong6], scale=QUAD_IMAGE_SCALE )
	img[8] = CommandableImageSprite( screen, octuple_displayat[7], images[wrong7], scale=QUAD_IMAGE_SCALE )
	output_string = "octuple,"
	output_string +=  str( rightid )+ ";" + str( wrong1)  + ";" + str( wrong2 ) + ";" + str( wrong3 ) + ";" + str( wrong4 ) + ";" + str( wrong5 ) + ";" + str( wrong6 ) + ";" + str( wrong7 ) + ",("


	def guytonumber(person):  ##should probably be a switch
		##print person
		if person == rightid:
			return 1
		elif person == wrong1:
			return 2
		elif person == wrong2:
			return 3
		elif person == wrong3:
			return 4
		elif person == wrong4:
			return 5
		elif person == wrong5:
			return 6
		elif person == wrong6:
			return 7
		elif person == wrong7:
			return 8
		else:
			print "error, something went super wrong"


	# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
	# Set up the updates, etc. 
	
	# A queue of animation operations
	Q = DisplayQueue()
	
	# Draw a single animation in if you want!
	
	

	# What order do we draw sprites and things in?
	dos = OrderedUpdates(*img) # Draw and update in this order
	
	start_time = time()
	
	finished = False
	clicked = [0] * 9
	## The standard event loop in kelpy -- this loops infinitely to process interactions
	## and throws events depending on what the user does
	for event in kelpy_standard_event_loop(screen, Q, dos):
		
		# if time()-start_time > MAX_DISPLAY_TIME:
		# 	pass # could make a limit if you wanted

		# If the event is a click:
		if is_click(event) and not Q.commands:
			if finished: ## If this is the second click, move on to the next thing!
				return output_string
			# check if each of our images was clicked
			whom = who_was_clicked( dos )
			
			donezo = [ clicked[1] > CLICKED_TIMES ,
			 			clicked[2] > CLICKED_TIMES ,
			  			clicked[3] > CLICKED_TIMES , 
			  			clicked[4] > CLICKED_TIMES ,
			  			clicked[5] > CLICKED_TIMES , 
			  			clicked[6] > CLICKED_TIMES , 
			  			clicked[7] > CLICKED_TIMES ,
			  			clicked[8] > CLICKED_TIMES  ]


			if whom is img[0] :  ## which is the button btw
				#print "BUTTON PRESS: " + str(time() - start_time) , 
				if all(donezo):
					pass
				else:
					#print "Before " + str( totalclicks)
					totalclicks = totalclicks+1
					#print "Before " + str( totalclicks) + "and " + str( order[totalclicks] )

					index = guytonumber(order[totalclicks]) ##convert that index from the main list to the internal index.
					#print filename(target_images[guys[index]])
					clicked[index] = clicked[index] + 1
					output_string += str( format( order[totalclicks], "x" ) ) 
					if clicked[index] == 1:
						Q.append(obj='sound', file=(target_audio1[guys[index]]) )
					elif clicked[index] == 2:
						Q.append(obj='sound', file=(target_audio2[guys[index]]) )
					elif clicked[index] == 3:
						Q.append(obj='sound', file=(target_audio3[guys[index]]) )
					elif clicked[index] == 4:
						Q.append(obj='sound', file=(target_audio1[guys[index]]) )
					elif clicked[index] == 5:
						Q.append(obj='sound', file=(target_audio2[guys[index]]) )
					elif clicked[index] == 6:
						Q.append(obj='sound', file=(target_audio3[guys[index]]) )
					else:
						pass
					#Q.append(obj=img[1], action='swapblink', position=(1000,400), image=target_images[targetidx], period=.5, duration=0, rotation=0, scale=IMAGE_SCALE, brightness=1.0 )
					
					Q.append(obj=img[index], action="scale", amount=1.5, duration=1.0)  ##append simultaneous doesn't work : (
					Q.append(obj=img[index], action="scale", amount=(1/1.5), duration=1.0)
					if clicked[index] == CLICKED_TIMES:
						clicked[index] = clicked[index]+1
						Q.append(obj=img[index], action='swapblink', position=(1000,400), image=target_images_gray[guys[index]], period=.5, duration=0, rotation=0, scale=QUAD_IMAGE_SCALE, brightness=1.0 )
						Q.append(obj='sound', file=kstimulus('sounds/Cheek-Pop.wav'))
						if clicked[1] >CLICKED_TIMES and clicked[2]>CLICKED_TIMES and clicked[3] > CLICKED_TIMES and clicked[4] > CLICKED_TIMES and clicked[5] >CLICKED_TIMES and clicked[6]>CLICKED_TIMES and clicked[7] > CLICKED_TIMES and clicked[8] > CLICKED_TIMES:
							finished = True
							output_string += "),"
							output_string += str ( octuple_displayat[0] ).replace(",",";")
							output_string += str ( octuple_displayat[1] ).replace(",",";") 
							output_string += str ( octuple_displayat[2] ).replace(",",";") 
							output_string += str ( octuple_displayat[3] ).replace(",",";") 
							output_string += str ( octuple_displayat[4] ).replace(",",";") 
							output_string += str ( octuple_displayat[5] ).replace(",",";") 
							output_string += str ( octuple_displayat[6] ).replace(",",";") 
							output_string += str ( octuple_displayat[7] ).replace(",",";") 
def present_trial(screen, object_type, object_color, object_pattern, functions, training=True, pre=""):
	
	
	Q = DisplayQueue();	

	in_object_path = kstimulus("feature_cars/"+object_type +"_"+ object_color +"_"+ object_pattern +".png")
	possible_answers = [kstimulus("feature_patches/blue_circles.png"), 
	                    kstimulus("feature_patches/blue_stars.png"),
	                    kstimulus("feature_patches/red_circles.png"), 
	                    kstimulus("feature_patches/red_stars.png")]
	
	out_object_color = object_color # These are altered below to create the new object
	out_object_pattern = object_pattern
	
	# now figure out what the right output should be!
	for f in functions:
		if f == "red": out_object_color = "red"
		elif f == "blue": out_object_color = "blue"
		elif f == "circles": out_object_pattern = "circles"
		elif f == "stars": out_object_pattern = "stars"
		else: print "*** Error bad object function"+functions+"\t"+f
	
	out_object_path = kstimulus("cars/"+object_type +"_"+ out_object_color +"_"+ out_object_pattern +".png")
	correct1_string = kstimulus("cars/"+object_color+"_"+object_pattern+".png") # the right answer
	correct2_string = kstimulus("cars/"+out_object_color+"_"+out_object_pattern+".png") # the right answer
	
	# now make a display screen for each function
	TIME_TO_GET_TO_RESPONSE1 = 1.5
	TIME_BETWEEN_SCREENS = 1.0 # this is how long it takes to move between screens (And to the first one after the first response)
	TIME_TO_APPLY_FUNCTION = 2.0 # each screen takes this long to apply its function
	REVEAL_TIME = 0.5
	
	displayscreens = [None]*len(functions)
	screen_active = [None]*len(functions)
	screen_inactive = [None]*len(functions)
	for fi in range(len(functions)):
		screen_inactive[fi] = kstimulus("feature_tvs/screen_inactive.png")# blink *off* when active. Bizarre, but this leaves it up
		screen_active[fi] = kstimulus("feature_tvs/screen_active_"+functions[fi]+".png") # store an array of images for when the screen activates
		displayscreens[fi] = CommandableImageSprite( screen, BLOCKER_POSITION, screen_inactive[fi], scale=SCREEN_SCALE)
	
	## arrange all the display screens horizontally
	displayscreens.reverse() # do this so that the order is correct -- left to right in function args is R-L on screen
	Arranger.ArrangeHorizontal(displayscreens, BLOCKER_X, BLOCKER_Y, pad=0)
	displayscreens.reverse()
	
	# Set up the sounds
	if training:
		correct_sound = random.choice(correct_sounds)
		incorrect_sound = random.choice(incorrect_sounds)
	else:
		correct_sound = random.choice(test_sounds)
		incorrect_sound = correct_sound
	
	
	# The image click responses
	if len(functions) == 1:
		pre = pre + functions[0] + "\tNA" + "\t" + q(in_object_path)
	else:
		pre = pre + functions[0] + "\t" + functions[1] + "\t" + q(in_object_path)
	
	response1 = ImageClickResponse( screen, RESPONSE1_POSITION, possible_answers, scale=0.50, correct=correct1_string, feedback=True, pre="R1\t"+pre, correct_sound=random.choice(correct_sounds), incorrect_sound=random.choice(incorrect_sounds)) ## Edited here to always play correct and incorrect regardless of training condition
	response1.start()
	response2 = ImageClickResponse( screen, RESPONSE2_POSITION, possible_answers, scale=0.50, correct=correct2_string, feedback=training, pre="R2\t"+pre, correct_sound=correct_sound, incorrect_sound=incorrect_sound)
	response2.start()
	
	# the object
	myobj = CommandableImageSprite(screen, OBJECT_START_POSITION, in_object_path, scale=TRUCK_SCALE)
	
	# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
	# Now set up all the motions
	
	# move to the blocker's height, below RESPONSE1
	Q.append(obj=myobj, action='wagglemove', duration=TIME_TO_GET_TO_RESPONSE1, period=1., amount=1.25, pos=[RESPONSE1_X, BLOCKER_Y+48]) 
	Q.append(obj=response1, action='respond', duration=Infinity)  # duration=Infinity in order to keep this waiting as long as necessary
	
	#Q.append(obj=response1, action='hide') # After ms_nov7, we do NOT hide in order to make things better
	
	for fi in range(len(functions)):
		Q.append(obj=myobj, action='wagglemove', duration=TIME_BETWEEN_SCREENS, period=1., amount=0.25, pos=[displayscreens[fi].get_x()-20, displayscreens[fi].get_y()+38])
		Q.append(obj=myobj, action='waggle', duration=TIME_TO_APPLY_FUNCTION, period=0.5, amount=1.2)
		
		Q.append_simultaneous(obj=displayscreens[fi], action='swapblink', image=screen_active[fi], scale=SCREEN_SCALE, rotation=0, duration=TIME_TO_APPLY_FUNCTION, period=0.5)
		
		Q.append(obj=myobj, action='restore')
		
		# and leave the screen on
		Q.append(obj=displayscreens[fi], action='swap', image=screen_active[fi], rotation=0, scale=SCREEN_SCALE)
		
	
	Q.append(obj=response2, action='respond', duration=Infinity)  # duration=Infinity in order to keep this waiting as long as necessary
	
	# in training, reveal the object
	if training:
		Q.append(obj=myobj, action='swap', image=out_object_path, rotation=0, scale=TRUCK_SCALE)
		
		for fi in range(len(functions)):
			Q.append(obj=displayscreens[fi], action='move', pos=[displayscreens[fi].get_x(), -500], duration=1.0)
		
		Q.append(obj=myobj, action='wait', duration=2.0)
		
	# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
	# Set up the updates, etc. 
	
	# NOTE: this controls the z-depth
	ou = OrderedUpdates(myobj, response1, response2, *displayscreens)

	for event in kelpy_standard_event_loop(screen, Q, ou, bottom_smile_updates): 
		if (  event.type == pygame.MOUSEBUTTONDOWN and pygame.mouse.get_pressed()[0]):
				for x in dos: x.clickme(pygame.mouse.get_pos()) # everyone gets to process this:
		if event.type == QUIT: quit()
		if event.type == KEYDOWN and event.key == K_ESCAPE: quit()
Example #28
0
def open_box(object, box, probability, trial, writer, BGCOLOR, lookaway_start_time,TRIAL_START):
    print TRIAL_START
    pygame.display.set_mode((1400,900),pygame.FULLSCREEN)
    play_sound(kstimulus('music/hothothot.wav'))
    start_time = time()
    lookaway_time = 2.0

    started_looking_away = False
    screen.fill(BGCOLOR)
    box =  TobiiSprite( screen, spot.center, box, tobii_controller, scale=BOX_SCALE)
    object = TobiiSprite( screen, spot.south, object,tobii_controller, scale=IMAGE_SCALE)
    blankbox = TobiiSprite( screen, spot.center, '../../kelpy/stimuli/misc/blankbox.png', tobii_controller, scale=BOX_SCALE)

    Q = DisplayQueue()

    Q.append(obj=box, action='wait', duration=1)


    for i in range(1000):
        #with certain probability, reveal object:
        flip = random.random()
        #print flip
        if  flip < probability:
            Q.append_simultaneous(obj=object, action='move', pos=spot.center, duration=0)
            present = True
        Q.append(obj=box, action='move', pos=spot.north,duration=1)
        Q.append(obj=box, action='wait',duration=.25)
        Q.append(obj=box, action='move', pos=spot.center,duration=1)
        Q.append(obj=box, action='wait',duration=.25)
        Q.append_simultaneous(obj=object, action='move', pos=spot.north, duration=0)




    dos = OrderedUpdates([blankbox, object, box ])






    #main ticker loop
    for event in kelpy_standard_event_loop(screen, Q, dos, throw_null_events=True):
    # output trial info to csv
        pygame.draw.rect(screen,(0,0,0),((screen.get_width() /2 )-200, (screen.get_height() /2 )-200,400,400),4)
        pygame.draw.rect(screen,BGCOLOR,((screen.get_width() /2 )-200, (screen.get_height() /2 )-601,400,400),0)
        pygame.draw.rect(screen,BGCOLOR,((screen.get_width() /2 )-200, (screen.get_height() /2 )+201,400,400),0)
        pygame.display.update()
        #writer.writerow([subject, session, trial, i, start_time, time(), objects[0], probabilities[0], in_left, left_box.is_looked_at(), left_object.is_looked_at(), left_lid.is_looked_at(), objects[1],probabilities[1], in_right, right_box.is_looked_at(), right_object.is_looked_at(), right_lid.is_looked_at()])
        #print file_header
        #print subject, session, trial, i, start_time, time(), objects[0], probabilities[0], in_left, left_box.is_looked_at(), left_object.is_looked_at(), left_lid.is_looked_at(), objects[1],probabilities[1], in_right, right_box.is_looked_at(), right_object.is_looked_at(), right_lid.is_looked_at()
        if (time() - start_time > MAX_DISPLAY_TIME):
            break


        if not blankbox.is_looked_at():
            if not started_looking_away:
                lookaway_start_time = time()
                started_looking_away = True
                #check if they've looked away for long enough
            if (time() - lookaway_start_time > lookaway_time):
                final_time = time() - TRIAL_START
                print str(final_time) + "is final time!!!!!!!!!!!"
                break #or whatever else you need to do at the end of the trial

        elif blankbox.is_looked_at():
		    started_looking_away = False #resets timer
        # If the event is a click:
        #if is_click(event):
         #   break

    # need to do a check for exiting here
        if event.type == KEYDOWN:
            if event.key == K_ESCAPE:
                print("escaping now")
                quit()
                # make sure to close the data file when exiting, otherwise it'll hang
                if not use_tobii_sim:
                    tobii_controller.stop_tracking()
                    tobii_controller.close_data_file()
                    tobii_controller.destroy()
            if event.key == K_p:

                pygame.mixer.music.pause()
                sleep(3)
                pygame.mixer.music.unpause()
Example #29
0
from kelpy.CommandableImageSprite import *
from kelpy.DisplayQueue import *
from kelpy.Miscellaneous import *
from kelpy.OrderedUpdates import *

## To record video of a display, just import this:
#import kelpy.ScreenVideoRecorder

##############################################
## Set up pygame

screen = initialize_kelpy(fullscreen=False) 

WINDOW_WIDTH, WINDOW_HEIGHT = screen.get_size()
CENTER_STAGE  = ( WINDOW_WIDTH/2, WINDOW_HEIGHT/2)

probe  = CommandableImageSprite( screen, CENTER_STAGE, kstimulus("cars/beetle.png"), scale=0.5)
fixation  = CommandableImageSprite( screen, CENTER_STAGE, kstimulus("fixation/blue_cross.png"), scale=0.3)
ou = OrderedUpdates(probe, fixation)

Q = DisplayQueue()
Q.append(obj=probe, action='wait',   duration=2)
Q.append(obj=probe, action='darken', amount=0.05, duration=3.0)
Q.append(obj=probe, action='wait',   duration=3)
Q.append(obj=probe, action='wait',   duration=5)
Q.append(obj='throw_event', event=pygame.event.Event(EXIT_KELPY_STANDARD_EVENT_LOOP))

for event in kely_standard_event_loop(screen, Q, ou): 	
	pass
def present_trial(screen, object_type, object_color, object_pattern, functions, training=True, pre=""):

    Q = DisplayQueue()

    in_object_path = kstimulus("feature_cars/" + object_type + "_" + object_color + "_" + object_pattern + ".png")
    possible_answers = [
        kstimulus("feature_patches/blue_circles.png"),
        kstimulus("feature_patches/blue_stars.png"),
        kstimulus("feature_patches/red_circles.png"),
        kstimulus("feature_patches/red_stars.png"),
    ]

    out_object_color = object_color  # These are altered below to create the new object
    out_object_pattern = object_pattern

    # now figure out what the right output should be!
    for f in functions:
        if f == "red":
            out_object_color = "red"
        elif f == "blue":
            out_object_color = "blue"
        elif f == "circles":
            out_object_pattern = "circles"
        elif f == "stars":
            out_object_pattern = "stars"
        else:
            print "*** Error bad object function" + functions + "\t" + f

    out_object_path = kstimulus("cars/" + object_type + "_" + out_object_color + "_" + out_object_pattern + ".png")
    correct1_string = kstimulus("cars/" + object_color + "_" + object_pattern + ".png")  # the right answer
    correct2_string = kstimulus("cars/" + out_object_color + "_" + out_object_pattern + ".png")  # the right answer

    # now make a display screen for each function
    TIME_TO_GET_TO_RESPONSE1 = 1.5
    TIME_BETWEEN_SCREENS = (
        1.0
    )  # this is how long it takes to move between screens (And to the first one after the first response)
    TIME_TO_APPLY_FUNCTION = 2.0  # each screen takes this long to apply its function
    REVEAL_TIME = 0.5

    displayscreens = [None] * len(functions)
    screen_active = [None] * len(functions)
    screen_inactive = [None] * len(functions)
    for fi in range(len(functions)):
        screen_inactive[fi] = kstimulus(
            "feature_tvs/screen_inactive.png"
        )  # blink *off* when active. Bizarre, but this leaves it up
        screen_active[fi] = kstimulus(
            "feature_tvs/screen_active_" + functions[fi] + ".png"
        )  # store an array of images for when the screen activates
        displayscreens[fi] = CommandableImageSprite(screen, BLOCKER_POSITION, screen_inactive[fi], scale=SCREEN_SCALE)

        ## arrange all the display screens horizontally
    displayscreens.reverse()  # do this so that the order is correct -- left to right in function args is R-L on screen
    Arranger.ArrangeHorizontal(displayscreens, BLOCKER_X, BLOCKER_Y, pad=0)
    displayscreens.reverse()

    # Set up the sounds
    if training:
        correct_sound = random.choice(correct_sounds)
        incorrect_sound = random.choice(incorrect_sounds)
    else:
        correct_sound = random.choice(test_sounds)
        incorrect_sound = correct_sound

        # The image click responses
    if len(functions) == 1:
        pre = pre + functions[0] + "\tNA" + "\t" + q(in_object_path)
    else:
        pre = pre + functions[0] + "\t" + functions[1] + "\t" + q(in_object_path)

    response1 = ImageClickResponse(
        screen,
        RESPONSE1_POSITION,
        possible_answers,
        scale=0.50,
        correct=correct1_string,
        feedback=True,
        pre="R1\t" + pre,
        correct_sound=random.choice(correct_sounds),
        incorrect_sound=random.choice(incorrect_sounds),
    )  ## Edited here to always play correct and incorrect regardless of training condition
    response1.start()
    response2 = ImageClickResponse(
        screen,
        RESPONSE2_POSITION,
        possible_answers,
        scale=0.50,
        correct=correct2_string,
        feedback=training,
        pre="R2\t" + pre,
        correct_sound=correct_sound,
        incorrect_sound=incorrect_sound,
    )
    response2.start()

    # the object
    myobj = CommandableImageSprite(screen, OBJECT_START_POSITION, in_object_path, scale=TRUCK_SCALE)

    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    # Now set up all the motions

    # move to the blocker's height, below RESPONSE1
    Q.append(
        obj=myobj,
        action="wagglemove",
        duration=TIME_TO_GET_TO_RESPONSE1,
        period=1.0,
        amount=1.25,
        pos=[RESPONSE1_X, BLOCKER_Y + 48],
    )
    Q.append(
        obj=response1, action="respond", duration=Infinity
    )  # duration=Infinity in order to keep this waiting as long as necessary

    # Q.append(obj=response1, action='hide') # After ms_nov7, we do NOT hide in order to make things better

    for fi in range(len(functions)):
        Q.append(
            obj=myobj,
            action="wagglemove",
            duration=TIME_BETWEEN_SCREENS,
            period=1.0,
            amount=0.25,
            pos=[displayscreens[fi].get_x() - 20, displayscreens[fi].get_y() + 38],
        )
        Q.append(obj=myobj, action="waggle", duration=TIME_TO_APPLY_FUNCTION, period=0.5, amount=1.2)

        Q.append_simultaneous(
            obj=displayscreens[fi],
            action="swapblink",
            image=screen_active[fi],
            scale=SCREEN_SCALE,
            rotation=0,
            duration=TIME_TO_APPLY_FUNCTION,
            period=0.5,
        )

        Q.append(obj=myobj, action="restore")

        # and leave the screen on
        Q.append(obj=displayscreens[fi], action="swap", image=screen_active[fi], rotation=0, scale=SCREEN_SCALE)

    Q.append(
        obj=response2, action="respond", duration=Infinity
    )  # duration=Infinity in order to keep this waiting as long as necessary

    # in training, reveal the object
    if training:
        Q.append(obj=myobj, action="swap", image=out_object_path, rotation=0, scale=TRUCK_SCALE)

        for fi in range(len(functions)):
            Q.append(obj=displayscreens[fi], action="move", pos=[displayscreens[fi].get_x(), -500], duration=1.0)

        Q.append(obj=myobj, action="wait", duration=2.0)

        # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
        # Set up the updates, etc.

        # NOTE: this controls the z-depth
    ou = OrderedUpdates(myobj, response1, response2, *displayscreens)

    for event in kelpy_standard_event_loop(screen, Q, ou, bottom_smile_updates):
        if event.type == pygame.MOUSEBUTTONDOWN and pygame.mouse.get_pressed()[0]:
            for x in dos:
                x.clickme(pygame.mouse.get_pos())  # everyone gets to process this:
        if event.type == QUIT:
            quit()
        if event.type == KEYDOWN and event.key == K_ESCAPE:
            quit()