def __init__(self): ''' Constructor ''' SogalForm.__init__(self, fading = True, fading_duration = 0.3, enableMask = True,backgroundColor = (0,0,0,0.6)) self.reparentTo(aspect2d,sort = 101) self.frame = DirectScrolledFrame(parent = self, canvasSize = CANVASSIZE, frameSize = FRAMESIZE, autoHideScrollBars = AUTO_HIDE_SCROLLBARS, ) self.reloadTheme() self.height = TOP self.shiftedHeight = 0 self.shifter = NodePath('text_history_shifter') self.shifter.reparentTo(self.frame.getCanvas()) if not prop_set_up: nameprops = TextProperties() # @UndefinedVariable nameprops.setTextScale(0.75) TextPropertiesManager.getGlobalPtr().setProperties("th_name", nameprops) # @UndefinedVariable self.labels = []
def __init__(self, xml): sys.stdout = PseudoFile(self.writeOut) sys.stderr = PseudoFile(self.writeErr) tpErr = TextProperties() tpErr.setTextColor(1, 0.5, 0.5, 1) TextPropertiesManager.getGlobalPtr().setProperties("err", tpErr) font = loader.loadFont("cmss12") self.frame = DirectFrame(parent=base.a2dTopCenter, text_align=TextNode.ALeft, text_pos=(-base.getAspectRatio() + TEXT_MARGIN[0], TEXT_MARGIN[1]), text_scale=0.05, text_fg=(1, 1, 1, 1), frameSize=(-2.0, 2.0, -0.5, 0.0), frameColor=(0, 0, 0, 0.5), text='', text_font=font) self.entry = DirectEntry(parent=base.a2dTopLeft, command=self.command, scale=0.05, width=1000.0, pos=(-0.02, 0, -0.48), relief=None, text_pos=(1.5, 0, 0), text_fg=(1, 1, 0.5, 1), rolloverSound=None, clickSound=None, text_font=font) self.otext = OnscreenText(parent=self.entry, scale=1, align=TextNode.ALeft, pos=(1, 0, 0), fg=(1, 1, 0.5, 1), text=':', font=font) self.lines = [''] * 9 self.commands = [] # All previously sent commands self.cscroll = None # Index of currently navigated command, None if current self.command = '' # Currently entered command self.block = '' # Temporarily stores a block of commands self.hide() self.initialized = False self.toggleKeys = manager.controls.registerKeyAll("Toggle Console", "`", self.toggle, self)
def __init__(self): sys.stdout = PseudoFile(self.write_out) sys.stderr = PseudoFile(self.write_err) tp_err = TextProperties() tp_err.setTextColor(1, 0.5, 0.5, 1) TextPropertiesManager.getGlobalPtr().setProperties('err', tp_err) font = loader.loadFont('cmss12') self.frame = DirectFrame(parent=base.a2dTopCenter, text_align=TextNode.ALeft, text_pos=(base.a2dLeft + TEXT_MARGIN[0], TEXT_MARGIN[1]), text_scale=0.05, text_fg=(1, 1, 1, 1), frameSize=(-2.0, 2.0, -1, 0.0), frameColor=(0, 0, 0, 0.5), text='', text_font=font, sortOrder=4) self.entry = DirectEntry(parent=base.a2dTopLeft, command=self.command, scale=0.05, width=1000.0, pos=(-0.02, 0, -0.98), relief=None, text_pos=(1.5, 0, 0), text_fg=(1, 1, 0.5, 1), rolloverSound=None, clickSound=None, text_font=font) self.otext = OnscreenText(parent=self.entry, scale=1, align=TextNode.ALeft, pos=(1, 0, 0), fg=(1, 1, 0.5, 1), text=':', font=font) self.lines = [''] * 19 self.commands = [] # All previously sent commands self.cscroll = None # Index of currently navigated command, None if current self.command = '' # Currently entered command self.block = '' # Temporarily stores a block of commands self.hide() self.initialized = False
def run(self): self.implicit_markers = False base.win.setClearColor((0, 0, 0, 1)) self.marker(0) # Send one event to trigger the whole event sending process # Define text properties tp_gray = TextProperties() tp_gray.setTextColor(0.5, 0.5, 0.5, 1) tpMgr = TextPropertiesManager.getGlobalPtr() tpMgr.setProperties("gray", tp_gray) self.write('When you are ready,\npress the space bar to begin.' + '\n\n\1gray\1[Press Space to continue]\2', duration='space', align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) self.sleep(4) self.watchfor('space')
def __init__(self, cr): base.messenger.send("registerLoadEvent", ["loadChatDone"]) Chat.__init__(self, base.a2dTopRight) self.cr = cr self.frmChat.setPos( -self.frmChat["frameSize"][1], self.frmChat.getY(), self.frmChat.getZ()) self.btnToggleChat.hide() self.btnToggleChat.setPos( -self.btnToggleChat["frameSize"][1], self.btnToggleChat.getY(), self.btnToggleChat.getZ()) self.btnToggleChatOrigTextFG = self.btnToggleChat.component("text1").fg self.btnToggleChat["sortOrder"] = 990 self.frmChat["sortOrder"] = 990 self.txtMessage["focusInCommand"] = self.focusInCommandFunc self.txtMessage["focusOutCommand"] = self.focusOutCommandFunc self.txtMessage["command"] = self.sendMessage tpMgr = TextPropertiesManager.getGlobalPtr() tpBold = TextProperties() font = loader.loadFont("assets/fonts/OldaniaADFStd-Bold.otf") tpBold.setFont(font) tpMgr.setProperties("bold", tpBold) self.lblMessages = OnscreenText( text="\1bold\1Messages:\2", scale = 0.05, pos = (self.frmMessages["canvasSize"][0], -0.05), align = TextNode.ALeft, wordwrap = 14, parent = self.frmMessages.getCanvas()) self.accept("sendMessage", self.sendMessage) self.accept("setText", self.addMessage) self.accept("toggleChat", self.toggleChat) self.hide()
def __init__(self, **options): Formatter.__init__(self, **options) self.styles = {} manager = TextPropertiesManager.getGlobalPtr() n = 1 / 255 for token, style in self.style: start = end = '' if style['color']: # hex color (#FF0000) to RGB 255 (255,0,0) color = tuple( int(style['color'][i:i + 2], 16) for i in (0, 2, 4)) # RGB 255 to vec4 (1,0,0,1), obsolete step? color = (n * color[0], n * color[1], n * color[2], 1) tp = TextProperties() tp.setTextColor(color) manager.setProperties(str(color), tp) # \1tag\1 starts a TextProperties start += '\1%s\1' % str(color) # \2 resets TextProperties end = str('\2' + end) self.styles[token] = (start, end)
def run(self): self.implicit_markers = False base.win.setClearColor((0, 0, 0, 1)) self.marker( 0) # Send one event to trigger the whole event sending process # Define text properties tp_gray = TextProperties() tp_gray.setTextColor(0.5, 0.5, 0.5, 1) tpMgr = TextPropertiesManager.getGlobalPtr() tpMgr.setProperties("gray", tp_gray) self.write('When you are ready,\npress the space bar to begin.' + '\n\n\1gray\1[Press Space to continue]\2', duration='space', align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) self.sleep(4) self.watchfor('space')
def __init__(self, font_family): mistune.Renderer.__init__(self) self.font_normal = fontsManager.get_font(font_family, Font.STYLE_NORMAL) if self.font_normal is not None: self.font_normal = self.font_normal.load() self.font_bold = fontsManager.get_font(font_family, Font.STYLE_BOLD) if self.font_bold is not None: self.font_bold = self.font_bold.load() if self.font_bold is None: self.font_bold = self.font_normal self.font_italic = fontsManager.get_font(font_family, Font.STYLE_ITALIC) if self.font_italic is not None: self.font_italic = self.font_italic.load() if self.font_italic is None: self.font_italic = self.font_normal if not MarkdownRenderer.init: #TODO: names should be linked to instance and deleted when not needed tpMgr = TextPropertiesManager.getGlobalPtr() tp_normal = TextProperties() tp_normal.set_font(self.font_normal) tpMgr.setProperties("md_normal", tp_normal) tp_underscore = TextProperties() tp_underscore.set_underscore(True) tpMgr.setProperties("md_under", tp_underscore) tp_bold = TextProperties() tp_bold.set_font(self.font_bold) tpMgr.setProperties("md_bold", tp_bold) tp_italic = TextProperties() tp_italic.set_font(self.font_italic) tpMgr.setProperties("md_italic", tp_italic) for i in range(1, 7): header = TextProperties() header.set_text_scale(1.0 + (7 - i) / 10.0) tpMgr.setProperties("md_header%i" % i, header) MarkdownRenderer.init = True
def run(self): self.implicit_markers = False base.win.setClearColor((0, 0, 0, 1)) self.marker( 0) # Send one event to trigger the whole event sending process if self.training: self.stimuli_order = self.stimuli_order[:16] self.n_blocks = 1 self.n_runs = 1 if self.av_type == 'auditory': self.file_list = [ 'studies/obj_animals/stimuli/' + k.strip() + '_f.wav' for k in self.stimuli ] for f in self.file_list: self.precache_sound(f) self.precache_sound('buzz.wav') self.precache_sound('beep.wav') # Define text properties tp_gray = TextProperties() tp_gray.setTextColor(0.5, 0.5, 0.5, 1) tpMgr = TextPropertiesManager.getGlobalPtr() tpMgr.setProperties("gray", tp_gray) # Show instructions if self.training: if self.av_type == 'visual': verb = 'see' else: verb = 'hear' self.write( 'This is a word association experiment.\nYou will complete several trials in this block.\n\n' + '\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05) self.write( 'The experiment consists of two conditions.\nIn each trial, ' + 'you will be prompted to perform\none of these two tasks:\n' + '(1) touch a button on the screen,\n' + '(2) press the space bar.\n\n\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05) self.write( 'In each trial, you will ' + verb + ' a word.\nWhen the word is an animal,\n' + 'touch the button on the screen.\n\n\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05) self.write( 'When the word is an object,\npress the space bar.\n\n\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05) self.write( 'You will hear a beep for correct answers.\nYou will hear a buzz for incorrect answers.\n\n\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05) self.write('When you are ready,\npress the space bar to begin.' + '\n\n\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05) t = time.localtime() t_str = '-'.join([ str(k) for k in [t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec] ]) f = open('studies/obj_animals/log/' + t_str + '.txt', 'w') f.write('Stimulus No.\tStimulus\tCategory\tButton position\tScore\n') self.sleep(5) counter = 0 # Needed for breaks between blocks for k in self.stimuli_order: # Short break if not self.training and counter in xrange( len(self.stimuli_order) / self.n_blocks / 2, len(self.stimuli_order), len(self.stimuli_order) / self.n_blocks / 2): self.write('Time for a short break.\n\n' + '\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05) self.sleep(2) counter += 1 # I have to calculate button positions in case the window size changed ar = base.getAspectRatio() button_frame = (-ar / 9, ar / 9, -1.0 / 4, 1.0 / 4) buttons = [] for k1 in xrange(2, 7): for k2 in xrange(4): buttons.append((-ar + ar / 9 + k1 * ar / 4.5, 0, 1 - 1.0 / 4 - k2 / 2.0)) # Delete middle buttons del buttons[5:7] del buttons[7:9] del buttons[9:11] choice = random.randint(0, len(buttons) - 1) button = buttons[choice] f.write( str(k) + '\t' + self.stimuli[k].strip() + '\t' + self.conditions[self.target[k]] + '\t' + str(choice) + '\t') # Visual or auditory presentation if self.av_type == 'auditory': self.sound(self.file_list[k], volume=0.5) self.sleep(0.2) self.write('+', duration=self.isi - self.pause, block=False, scale=0.15, fg=(1, 1, 1, 1)) elif self.av_type == 'visual': self.sleep(0.2) self.write(self.stimuli[k], duration=self.isi - self.pause, block=False, scale=0.15, fg=(1, 1, 1, 1)) self.marker(k + 10000) btn = DirectButton(frameSize=button_frame, pos=button, frameColor=(0.75, 0, 0, 1), borderWidth=(0.01, 0.01), rolloverSound=None, clickSound=None, command=messenger.send, extraArgs=('button_pressed', )) latencies = self.waitfor_multiple(['button_pressed', 'space'], self.isi) if not latencies: response = 'none' wait_time = self.pause self.sound('buzz.wav', volume=0.5) else: response = latencies[0] wait_time = self.pause + self.isi - latencies[1] if self.target[ k] == 1 and response == 'button_pressed': # Check if values in dictionary are not empty self.score += int(100 * (self.isi - latencies[1]) / self.isi) self.sound('beep.wav', volume=0.5) elif self.target[k] == 0 and response == 'space': self.score += int(10 * (self.isi - latencies[1]) / self.isi) self.sound('beep.wav', volume=0.5) elif (self.target[k] == 1 and response == 'space') or ( self.target[k] == 0 and response == 'button_pressed'): self.score -= 5 if self.score < 0: self.score = 0 self.sound('buzz.wav', volume=0.5) f.write(str(self.score) + '\n') try: btn.destroy() except: pass self.sleep(wait_time - 0.2) f.close() if not self.training: self.write( 'You successfully completed\none run of the experiment.\n\nThank you!', duration=5, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1))
def __init__(self): self.frameMain = DirectFrame( frameSize = (base.a2dLeft, base.a2dRight, base.a2dBottom, base.a2dTop), frameColor = (0.05, 0.05, 0.05, 1)) self.frameMain.setTransparency(1) tpBig = TextProperties() tpBig.setTextScale(1.5) tpSmall = TextProperties() tpSmall.setTextScale(0.75) tpUs = TextProperties() tpUs.setUnderscore(True) tpMgr = TextPropertiesManager.getGlobalPtr() tpMgr.setProperties("big", tpBig) tpMgr.setProperties("small", tpSmall) tpMgr.setProperties("us", tpUs) creditsText = "" with open("credits.txt") as f: creditsText = f.read() self.lblCredits = DirectLabel( text = creditsText, text_fg = (1,1,1,1), text_bg = (0,0,0,0), frameColor = (0,0,0,0), text_align = TextNode.ACenter, scale = 0.1, pos = (0, 0, base.a2dTop - 0.2)) self.lblCredits.setTransparency(1) self.lblCredits.reparentTo(self.frameMain) self.creditsScroll = LerpPosInterval( self.lblCredits, 12.0, (0, 0, base.a2dTop + 3.5), startPos=(0, 0, base.a2dBottom), name="CreditsScroll") self.btnBack = DirectButton( text = "BACK", text_fg = (1,1,1,1), text_align = TextNode.ALeft, scale = 0.1, pad = (0.15, 0.15), pos = (base.a2dLeft + 0.08, 0, base.a2dBottom + 0.05), frameColor = ( (0.2,0.2,0.2,0.8), (0.4,0.4,0.4,0.8), (0.4,0.4,0.4,0.8), (0.1,0.1,0.1,0.8), ), relief = 1, command = base.messenger.send, extraArgs = ["Credits-Back"], pressEffect = False, rolloverSound = None, clickSound = None) self.btnBack.setTransparency(1) self.btnBack.reparentTo(self.frameMain) self.hide()
def run(self): self.implicit_markers = False base.win.setClearColor((0, 0, 0, 1)) # Precache sounds self.file_list_f = ['studies/speech/stimuli/' + k.strip() + '_f.wav' for k in self.stimuli] for f in self.file_list_f: self.precache_sound(f) self.file_list_m = ['studies/speech/stimuli/' + k.strip() + '_m.wav' for k in self.stimuli] for f in self.file_list_m: self.precache_sound(f) # Define text properties tp_gray = TextProperties() tp_gray.setTextColor(0.5, 0.5, 0.5, 1) tpMgr = TextPropertiesManager.getGlobalPtr() tpMgr.setProperties("gray", tp_gray) # Show instructions if self.training: self.n_runs = 1 self.write('This is a speech perception/production\n' + 'experiment. You will complete several trials\n' + 'in each block.\n\n' + '\1gray\1[Press Space to continue]\2', duration='space', align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) self.write('The experiment consists of three conditions.\n' + 'You will perform one of these tasks:\n\n' + '(1) Speak a word\n' + '(2) Imagine speaking a word\n' + '(3) Press the Space bar' + '\n\n\1gray\1[Press Space to continue]\2', duration='space', align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) self.write('You will be instructed which task to perform\n' + 'at the beginning of each block.\n\n' + 'Specifically, the tasks are as follows:\n\n' + '\1gray\1[Press Space to continue]\2', duration='space', align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) txt = self.write('(1) Speak the word aloud.\n\n' + 'You will either see or hear the word.\n\n' + '\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) pic = self.picture('studies/speech/overt.png', duration=10000, block=False, scale=0.25, pos=(0, 0.5)) self.waitfor('space') txt.destroy() pic.destroy() txt = self.write('(2) Imagine speaking the word.\n\n' + 'You will either see or hear the word.\n\n' + '\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) pic = self.picture('studies/speech/covert.png', duration=10000, block=False, scale=0.25, pos=(0, 0.5)) self.waitfor('space') txt.destroy() pic.destroy() txt = self.write('(3) Press the Space bar when the word\n' + 'was green or when the word was spoken\n' + 'by a female voice.\n\n' + '\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) pic = self.picture('studies/speech/control.png', duration=10000, block=False, scale=0.25, pos=(0, 0.5)) self.waitfor('space') txt.destroy() pic.destroy() self.write('When you are ready,\npress the Space bar to begin.' + '\n\n\1gray\1[Press Space to continue]\2', duration='space', align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) # Create log file t = time.localtime() t_str = '-'.join([str(k) for k in [t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec]]) f = open('studies/speech/log/' + t_str + '.txt', 'w') for run in range(self.n_runs): # Create randomized block sequence blocks = [] for i1 in range(len(self.tasks)): # Task (overt, covert, none) for i2 in range(len(self.conditions)): # Condition (visual, auditory) blocks.append([i1, i2]) random.shuffle(blocks) for block in blocks: # Create randomized stimulus presentation sequence trials = range(len(self.stimuli)) random.shuffle(trials) # Show instructions if block[0] == 0: # Overt if block[1] == 0: # Visual txt = self.write('You will see words on the screen.\n\n' + 'Speak the word aloud.\n\n' + '\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) elif block[1] == 1: # Auditory txt = self.write('You will hear words.\n\n' + 'Speak the word aloud.\n\n' + '\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) pic = self.picture('studies/speech/overt.png', duration=10000, block=False, scale=0.25, pos=(0, 0.5)) elif block[0] == 1: # Covert if block[1] == 0: # Visual txt = self.write('You will see words on the screen.\n\n' + 'Imagine speaking the word.\n\n' + '\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) elif block[1] == 1: # Auditory txt = self.write('You will hear words.\n\n' + 'Imagine speaking the word.\n\n' + '\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) pic = self.picture('studies/speech/covert.png', duration=10000, block=False, scale=0.25, pos=(0, 0.5)) elif block[0] == 2: # Control if block[1] == 0: # Visual txt = self.write('You will see words on the screen.\n\n' + 'Press the Space bar if the words was green.\n\n' + '\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) elif block[1] == 1: # Auditory txt = self.write('You will hear words.\n\n' + 'Press the Space bar if the voice was female.\n\n' + '\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) pic = self.picture('studies/speech/control.png', duration=10000, block=False, scale=0.25, pos=(0, 0.5)) self.waitfor('space') txt.destroy() pic.destroy() self.sleep(4) if self.training: trials = trials[:8] # Use only the first 16 items for the training block for trial in trials: rand_color_voice = random.randint(0, 1) # 0 ... blue word, female voice; 1 ... green word, male voice f.write(self.stimuli[trial].strip() + '\t' + self.tasks[block[0]] + '\t' + self.conditions[block[1]] + '\t' + str(rand_color_voice) + '\t') self.marker(k) if rand_color_voice == 0: # Blue text, female voice if self.conditions[block[1]] == 'visual': self.write(self.stimuli[trial], duration=self.isi-self.pause, block=False, scale=0.15, fg=(0, 0.666667, 1, 1)) else: self.sound(self.file_list_f[trial], volume=0.5) self.write('+', duration=self.isi-self.pause, block=False, scale=0.15, fg=(1, 1, 1, 1)) else: # Green text, male voice if self.conditions[block[1]] == 'visual': self.write(self.stimuli[trial], duration=self.isi-self.pause, block=False, scale=0.15, fg=(0, 1, 0, 1)) else: self.sound(self.file_list_m[trial], volume=0.5) self.write('+', duration=self.isi-self.pause, block=False, scale=0.15, fg=(1, 1, 1, 1)) #self.sleep(self.isi) latencies = self.watchfor('space', self.isi) if latencies: f.write('yes\n') # Space bar was pressed else: f.write('no\n') # Space bar was not pressed self.write('You completed one run of the experiment.\n\n' + '\1gray\1[Press Space to continue]\2', duration='space', align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) f.close()
def __init__(self): self.said_this_frame = [] self.string_id = 0 self.output = render2d.attach_new_node(TextNode("output text")) self.output.node().text = "" self.output.node().align = 2 self.output.node().font = base.font self.output.set_scale(0.025, 0.025, 0.045) self.output.set_z(-0.75) self.to_output = [] for i in range(5): self.say("") self.inventory = Inventory() self.money = self.inventory.add(Money(0, True)) self.inventory.hide() self.say("press escape for menu") self.room, self.checkpoint, self.test = world() self.room.node.reparent_to(render) base.play_music(self.room.song) self.current = self.room self.location = render2d.attach_new_node(TextNode("location text")) self.location.node().text = self.room.node.name self.location.node().align = 2 self.location.node().font = base.font self.location.set_scale(0.025, 0.025, 0.045) self.location.set_z(0.1) self.hp = 10 self.max_hp = 10 self.stats = { "offence": 0, "defence": 0, "endurance": 0, } self.equipment = {} self.creature_codex = [] self.character = Inventory() self.character.hide() def get_health(): base.interface.say("You have {}/{} hp.".format( self.hp, self.max_hp + self.stats["endurance"])) def get_stats(): base.interface.say("Offence: {}.".format(self.stats["offence"])) base.interface.say("Defence: {}.".format(self.stats["defence"])) base.interface.say("Endurance: {}.".format( self.stats["endurance"])) manager = TextPropertiesManager.getGlobalPtr() tp = TextProperties() tp.setTextColor((0.2, 0.2, 0.2, 1)) manager.setProperties("grey", tp) tp = TextProperties() tp.setTextColor((1, 1, 1, 1)) manager.setProperties("white", tp) self.character.add(Option("inventory")).function = self.open_inventory self.character.add(Option("health")).function = get_health self.character.add(Option("stats")).function = get_stats quit = self.character.add(Menu("quit game")) quit.empty() quit.add(Return("yes, quit!", "")).function = sys.exit quit.add(Return("no, keep playing!", "")) self.dead = Rolodex("dead") self.dead.add(Option("You died."))
def run(self): self.implicit_markers = False base.win.setClearColor((0, 0, 0, 1)) self.marker(0) # Send one event to trigger the whole event sending process # Precache sounds self.file_list = ['studies/speech/stimuli/' + k.strip() + '_f.wav' for k in self.stimuli] for f in self.file_list: self.precache_sound(f) # Define text properties tp_gray = TextProperties() tp_gray.setTextColor(0.5, 0.5, 0.5, 1) tpMgr = TextPropertiesManager.getGlobalPtr() tpMgr.setProperties("gray", tp_gray) # Show instructions (only in training run) if self.training: self.n_blocks = 1 self.write('This is a speech perception/production\nexperiment. You will complete\nseveral trials in each block.\n\n' + '\1gray\1[Press Space to continue]\2', duration='space', align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) self.write('The experiment consists of three conditions.\nIn each trial, ' + 'you will be prompted to perform\none of these three tasks:\n' + '(1) speak a word,\n' + '(2) imagine speaking a word,\n' + '(3) press the space bar.\n\n\1gray\1[Press Space to continue]\2', duration='space', align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) self.write('In each trial, you will see or hear a word.\n\nYou will also see ' + 'a visual cue\nto indicate which task to perform:\n\n\1gray\1[Press Space to continue]\2', duration='space', align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) txt = self.write('(1) Speak a word\nwhen you see a speech bubble.\n\n\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) pic = self.picture('studies/speech/overt.png', duration=10000, block=False, scale=0.25, pos=(0, 0.5)) self.waitfor('space') txt.destroy() pic.destroy() txt = self.write('(2) Imagine speaking a word\nwhen you see a thought bubble.\n\n\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) pic = self.picture('studies/speech/covert.png', duration=10000, block=False, scale=0.25, pos=(0, 0.5)) self.waitfor('space') txt.destroy() pic.destroy() txt = self.write('(3) Press the space bar\nwhen you see a rectangle.\n\n\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) pic = self.picture('studies/speech/control.png', duration=10000, block=False, scale=(0.25, 1, 0.1), pos=(0, 0.5)) self.waitfor('space') txt.destroy() pic.destroy() self.write('When you are ready,\npress the space bar to begin.' + '\n\n\1gray\1[Press Space to continue]\2', duration='space', align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) # Create log file t = time.localtime() t_str = '-'.join([str(k) for k in [t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec]]) f = open('studies/speech/log/' + t_str + '.txt', 'w') self.sleep(4) for block in range(self.n_blocks): # Create randomized stimulus presentation sequence items = [] for i1 in range(len(self.stimuli)): # Stimulus for i2 in range(len(self.tasks)): # Task (overt, covert, control) for i3 in range(len(self.conditions)): # Condition (visual, auditory) items.append([i1, i2, i3]) random.shuffle(items) if self.training: items = items[:32] # Use only the first 16 items for the training block counter = 0 for trial in items: if not self.training and counter == len(items)/2: # Break in the middle of each run self.write('Time for a short break.\n\n' + '\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05) self.sleep(4) counter += 1 f.write(self.stimuli[trial[0]].strip() + '\t' + self.tasks[trial[1]] + '\t' + self.conditions[trial[2]] + '\t') if self.tasks[trial[1]] == 'overt': self.picture('studies/speech/overt.png', duration=self.isi-self.pause, block=False, scale=0.5, pos=(0, -0.05)) elif self.tasks[trial[1]] == 'covert': self.picture('studies/speech/covert.png', duration=self.isi-self.pause, block=False, scale=0.5, pos=(0, -0.05)) else: self.picture('studies/speech/control.png', duration=self.isi-self.pause, block=False, scale=(0.5, 1, 0.2)) # Format: 1zyxx, xx: stimulus (0-35), y: task (0, 1, 2), z: condition (0, 1) self.marker(trial[0] + trial[1] * 100 + trial[2] * 1000 + 10000) if self.conditions[trial[2]] == 'visual': self.write(self.stimuli[trial[0]], duration=self.isi-self.pause, block=False, scale=0.15, fg=(1, 1, 1, 1)) else: self.sound(self.file_list[trial[0]], volume=0.5) self.write('+', duration=self.isi-self.pause, block=False, scale=0.15, fg=(1, 1, 1, 1)) if self.watchfor('space', self.isi): f.write('space\n') # Space bar was pressed else: f.write('-\n') # Space bar was not pressed if block < self.n_blocks - 1: # If it's not the last block self.write('Time for a short break.\n\n' + '\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05) self.sleep(4) f.close() if not self.training: self.write('You successfully completed\none run of the experiment.\n\nThank you!', duration=5, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1))
def __init__(self): ShowBase.__init__(self) # Print all events sent through the messenger #self.messenger.toggleVerbose() # Load the graphics for the gamepad buttons and register them, so that # we can embed them in our information text. graphics = loader.loadModel("models/xbone-icons.egg") mgr = TextPropertiesManager.getGlobalPtr() for name in [ "face_a", "face_b", "face_x", "face_y", "ltrigger", "rtrigger", "lstick", "rstick" ]: graphic = graphics.find("**/" + name) graphic.setScale(1.5) mgr.setGraphic(name, graphic) graphic.setZ(-0.5) # Show the informational text in the corner. self.lblInfo = OnscreenText(parent=self.a2dBottomLeft, pos=(0.1, 0.3), fg=(1, 1, 1, 1), bg=(0.2, 0.2, 0.2, 0.9), align=TextNode.A_left, text=INFO_TEXT) self.lblInfo.textNode.setCardAsMargin(0.5, 0.5, 0.5, 0.2) self.lblWarning = OnscreenText(text="No devices found", fg=(1, 0, 0, 1), scale=.25) self.lblAction = OnscreenText(text="Action", fg=(1, 1, 1, 1), scale=.15) self.lblAction.hide() # Is there a gamepad connected? self.gamepad = None devices = self.devices.getDevices(InputDevice.DeviceClass.gamepad) if devices: self.connect(devices[0]) # Accept device dis-/connection events self.accept("connect-device", self.connect) self.accept("disconnect-device", self.disconnect) self.accept("escape", exit) # Accept button events of the first connected gamepad self.accept("gamepad-back", exit) self.accept("gamepad-start", exit) self.accept("gamepad-face_x", self.reset) self.accept("gamepad-face_a", self.action, extraArgs=["face_a"]) self.accept("gamepad-face_a-up", self.actionUp) self.accept("gamepad-face_b", self.action, extraArgs=["face_b"]) self.accept("gamepad-face_b-up", self.actionUp) self.accept("gamepad-face_y", self.action, extraArgs=["face_y"]) self.accept("gamepad-face_y-up", self.actionUp) self.environment = loader.loadModel("environment") self.environment.reparentTo(render) # Disable the default mouse-camera controls since we need to handle # our own camera controls. self.disableMouse() self.reset() self.taskMgr.add(self.moveTask, "movement update task")
def run(self): self.implicit_markers = False base.win.setClearColor((0, 0, 0, 1)) # Precache sounds self.file_list_f = [ 'studies/speech/stimuli/' + k.strip() + '_f.wav' for k in self.stimuli ] for f in self.file_list_f: self.precache_sound(f) self.file_list_m = [ 'studies/speech/stimuli/' + k.strip() + '_m.wav' for k in self.stimuli ] for f in self.file_list_m: self.precache_sound(f) # Define text properties tp_gray = TextProperties() tp_gray.setTextColor(0.5, 0.5, 0.5, 1) tpMgr = TextPropertiesManager.getGlobalPtr() tpMgr.setProperties("gray", tp_gray) # Show instructions if self.training: self.n_runs = 1 self.write('This is a speech perception/production\n' + 'experiment. You will complete several trials\n' + 'in each block.\n\n' + '\1gray\1[Press Space to continue]\2', duration='space', align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) self.write('The experiment consists of three conditions.\n' + 'You will perform one of these tasks:\n\n' + '(1) Speak a word\n' + '(2) Imagine speaking a word\n' + '(3) Press the Space bar' + '\n\n\1gray\1[Press Space to continue]\2', duration='space', align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) self.write('You will be instructed which task to perform\n' + 'at the beginning of each block.\n\n' + 'Specifically, the tasks are as follows:\n\n' + '\1gray\1[Press Space to continue]\2', duration='space', align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) txt = self.write('(1) Speak the word aloud.\n\n' + 'You will either see or hear the word.\n\n' + '\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) pic = self.picture('studies/speech/overt.png', duration=10000, block=False, scale=0.25, pos=(0, 0.5)) self.waitfor('space') txt.destroy() pic.destroy() txt = self.write('(2) Imagine speaking the word.\n\n' + 'You will either see or hear the word.\n\n' + '\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) pic = self.picture('studies/speech/covert.png', duration=10000, block=False, scale=0.25, pos=(0, 0.5)) self.waitfor('space') txt.destroy() pic.destroy() txt = self.write('(3) Press the Space bar when the word\n' + 'was green or when the word was spoken\n' + 'by a female voice.\n\n' + '\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) pic = self.picture('studies/speech/control.png', duration=10000, block=False, scale=0.25, pos=(0, 0.5)) self.waitfor('space') txt.destroy() pic.destroy() self.write('When you are ready,\npress the Space bar to begin.' + '\n\n\1gray\1[Press Space to continue]\2', duration='space', align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) # Create log file t = time.localtime() t_str = '-'.join([ str(k) for k in [t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec] ]) f = open('studies/speech/log/' + t_str + '.txt', 'w') for run in range(self.n_runs): # Create randomized block sequence blocks = [] for i1 in range(len(self.tasks)): # Task (overt, covert, none) for i2 in range(len( self.conditions)): # Condition (visual, auditory) blocks.append([i1, i2]) random.shuffle(blocks) for block in blocks: # Create randomized stimulus presentation sequence trials = range(len(self.stimuli)) random.shuffle(trials) # Show instructions if block[0] == 0: # Overt if block[1] == 0: # Visual txt = self.write( 'You will see words on the screen.\n\n' + 'Speak the word aloud.\n\n' + '\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) elif block[1] == 1: # Auditory txt = self.write('You will hear words.\n\n' + 'Speak the word aloud.\n\n' + '\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) pic = self.picture('studies/speech/overt.png', duration=10000, block=False, scale=0.25, pos=(0, 0.5)) elif block[0] == 1: # Covert if block[1] == 0: # Visual txt = self.write( 'You will see words on the screen.\n\n' + 'Imagine speaking the word.\n\n' + '\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) elif block[1] == 1: # Auditory txt = self.write('You will hear words.\n\n' + 'Imagine speaking the word.\n\n' + '\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) pic = self.picture('studies/speech/covert.png', duration=10000, block=False, scale=0.25, pos=(0, 0.5)) elif block[0] == 2: # Control if block[1] == 0: # Visual txt = self.write( 'You will see words on the screen.\n\n' + 'Press the Space bar if the words was green.\n\n' + '\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) elif block[1] == 1: # Auditory txt = self.write( 'You will hear words.\n\n' + 'Press the Space bar if the voice was female.\n\n' + '\1gray\1[Press Space to continue]\2', duration=10000, block=False, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) pic = self.picture('studies/speech/control.png', duration=10000, block=False, scale=0.25, pos=(0, 0.5)) self.waitfor('space') txt.destroy() pic.destroy() self.sleep(4) if self.training: trials = trials[: 8] # Use only the first 16 items for the training block for trial in trials: rand_color_voice = random.randint( 0, 1 ) # 0 ... blue word, female voice; 1 ... green word, male voice f.write(self.stimuli[trial].strip() + '\t' + self.tasks[block[0]] + '\t' + self.conditions[block[1]] + '\t' + str(rand_color_voice) + '\t') self.marker(k) if rand_color_voice == 0: # Blue text, female voice if self.conditions[block[1]] == 'visual': self.write(self.stimuli[trial], duration=self.isi - self.pause, block=False, scale=0.15, fg=(0, 0.666667, 1, 1)) else: self.sound(self.file_list_f[trial], volume=0.5) self.write('+', duration=self.isi - self.pause, block=False, scale=0.15, fg=(1, 1, 1, 1)) else: # Green text, male voice if self.conditions[block[1]] == 'visual': self.write(self.stimuli[trial], duration=self.isi - self.pause, block=False, scale=0.15, fg=(0, 1, 0, 1)) else: self.sound(self.file_list_m[trial], volume=0.5) self.write('+', duration=self.isi - self.pause, block=False, scale=0.15, fg=(1, 1, 1, 1)) #self.sleep(self.isi) latencies = self.watchfor('space', self.isi) if latencies: f.write('yes\n') # Space bar was pressed else: f.write('no\n') # Space bar was not pressed self.write('You completed one run of the experiment.\n\n' + '\1gray\1[Press Space to continue]\2', duration='space', align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1)) f.close()
def __init__(self): ShowBase.__init__(self) # Print all events sent through the messenger #self.messenger.toggleVerbose() # Load the graphics for the gamepad buttons and register them, so that # we can embed them in our information text. graphics = loader.loadModel("models/xbone-icons.egg") mgr = TextPropertiesManager.getGlobalPtr() for name in ["face_a", "face_b", "face_x", "face_y", "ltrigger", "rtrigger", "lstick", "rstick"]: graphic = graphics.find("**/" + name) graphic.setScale(1.5) mgr.setGraphic(name, graphic) graphic.setZ(-0.5) # Show the informational text in the corner. self.lblInfo = OnscreenText( parent = self.a2dBottomLeft, pos = (0.1, 0.3), fg = (1, 1, 1, 1), bg = (0.2, 0.2, 0.2, 0.9), align = TextNode.A_left, text = INFO_TEXT) self.lblInfo.textNode.setCardAsMargin(0.5, 0.5, 0.5, 0.2) self.lblWarning = OnscreenText( text = "No devices found", fg=(1,0,0,1), scale = .25) self.lblAction = OnscreenText( text = "Action", fg=(1,1,1,1), scale = .15) self.lblAction.hide() # Is there a gamepad connected? self.gamepad = None devices = self.devices.getDevices(InputDevice.DeviceClass.gamepad) if devices: self.connect(devices[0]) # Accept device dis-/connection events self.accept("connect-device", self.connect) self.accept("disconnect-device", self.disconnect) self.accept("escape", exit) # Accept button events of the first connected gamepad self.accept("gamepad-back", exit) self.accept("gamepad-start", exit) self.accept("gamepad-face_x", self.reset) self.accept("gamepad-face_a", self.action, extraArgs=["face_a"]) self.accept("gamepad-face_a-up", self.actionUp) self.accept("gamepad-face_b", self.action, extraArgs=["face_b"]) self.accept("gamepad-face_b-up", self.actionUp) self.accept("gamepad-face_y", self.action, extraArgs=["face_y"]) self.accept("gamepad-face_y-up", self.actionUp) self.environment = loader.loadModel("environment") self.environment.reparentTo(render) # Disable the default mouse-camera controls since we need to handle # our own camera controls. self.disableMouse() self.reset() self.taskMgr.add(self.moveTask, "movement update task")
from panda3d.core import TextPropertiesManager from panda3d.core import TextProperties from panda3d.core import TextNode from .common import COLORS # Make each color a TextProperties manager = TextPropertiesManager.getGlobalPtr() for color in COLORS: tp = TextProperties() tp.setTextColor(COLORS[color]) manager.setProperties(color, tp) class DynamicTextOutput(): def __init__(self): self.text = TextNode("output") self.text.set_font(base.font) self.text.set_shadow(0.08) self.text.set_shadow_color((0, 0, 0, 1)) self.root = render2d.attach_new_node(self.text) self.root.set_scale(0.06) self.root.set_pos((-0.95, 0, 0.9)) self.lines = [] self.max_lines = 100 def clear(self): self.lines = [] self.text.text = "" def print(self, text, color="white"): lines = text.split("\n")
def loadGUI(self): from panda3d.core import TextNode, TextProperties, TextPropertiesManager DGG.setDefaultFont( loader.loadFont('phase_3/models/fonts/ImpressBT.ttf')) DGG.setDefaultRolloverSound( loader.loadSfx('phase_3/audio/sfx/GUI_rollover.ogg')) DGG.setDefaultClickSound( loader.loadSfx('phase_3/audio/sfx/GUI_create_toon_fwd.ogg')) DGG.setDefaultDialogGeom( loader.loadModel('phase_3/models/gui/dialog_box_gui')) red = TextProperties() red.setTextColor(1, 0, 0, 1) TextPropertiesManager.getGlobalPtr().setProperties('red', red) green = TextProperties() green.setTextColor(0, 1, 0, 1) TextPropertiesManager.getGlobalPtr().setProperties('green', green) yellow = TextProperties() yellow.setTextColor(1, 1, 0, 1) frame = DirectFrame(parent=base.a2dTopRight, relief=DGG.SUNKEN, borderWidth=(0.01, 0.01), frameSize=(-0.3, 0.3, -0.95, 0.3), pos=(-0.3, 0, -0.3)) nextHead = DirectButton(parent=frame, relief=2, text='Next Head', text_scale=0.04, borderWidth=(0.01, 0.01), frameSize=(-0.1, 0.1, -0.05, 0.05), pos=(0.15, 0, 0.2), command=self.changeHead, extraArgs=[1]) previousHead = DirectButton(parent=frame, relief=2, text='Prev Head', text_scale=0.04, borderWidth=(0.01, 0.01), frameSize=(-0.1, 0.1, -0.05, 0.05), pos=(-0.15, 0, 0.2), command=self.changeHead, extraArgs=[-1]) nextTorso = DirectButton(parent=frame, relief=2, text='Next Torso', text_scale=0.04, borderWidth=(0.01, 0.01), frameSize=(-0.1, 0.1, -0.05, 0.05), pos=(0.15, 0, 0.05), command=self.changeTorso, extraArgs=[1]) previousTorso = DirectButton(parent=frame, relief=2, text='Prev Torso', text_scale=0.04, borderWidth=(0.01, 0.01), frameSize=(-0.1, 0.1, -0.05, 0.05), pos=(-0.15, 0, 0.05), command=self.changeTorso, extraArgs=[-1]) nextBackpack = DirectButton(parent=frame, relief=2, text='Next Backpack', text_scale=0.030, borderWidth=(0.01, 0.01), frameSize=(-0.1, 0.1, -0.05, 0.05), pos=(0.15, 0, -0.10), command=self.changeBackpack, extraArgs=[1]) previousBackpack = DirectButton(parent=frame, relief=2, text='Prev Backpack', text_scale=0.030, borderWidth=(0.01, 0.01), frameSize=(-0.1, 0.1, -0.05, 0.05), pos=(-0.15, 0, -0.10), command=self.changeBackpack, extraArgs=[-1]) nextHat = DirectButton(parent=frame, relief=2, text='Next Hat', text_scale=0.04, borderWidth=(0.01, 0.01), frameSize=(-0.1, 0.1, -0.05, 0.05), pos=(0.15, 0, -0.25), command=self.changeHat, extraArgs=[1]) previousHat = DirectButton(parent=frame, relief=2, text='Prev Hat', text_scale=0.04, borderWidth=(0.01, 0.01), frameSize=(-0.1, 0.1, -0.05, 0.05), pos=(-0.15, 0, -0.25), command=self.changeHat, extraArgs=[-1]) nextGlasses = DirectButton(parent=frame, relief=2, text='Next Glasses', text_scale=0.035, borderWidth=(0.01, 0.01), frameSize=(-0.1, 0.1, -0.05, 0.05), pos=(0.15, 0, -0.40), command=self.changeGlasses, extraArgs=[1]) #delta +- 25 previousGlasses = DirectButton(parent=frame, relief=2, text='Prev Glasses', text_scale=0.035, borderWidth=(0.01, 0.01), frameSize=(-0.1, 0.1, -0.05, 0.05), pos=(-0.15, 0, -0.40), command=self.changeGlasses, extraArgs=[-1]) clearBackpack = DirectButton(parent=frame, relief=2, text='Clear Backpack', text_scale=0.030, borderWidth=(0.01, 0.01), frameSize=(-0.1, 0.1, -0.05, 0.05), pos=(-0.15, 0, -0.55), command=self.clearBackpack) clearHat = DirectButton(parent=frame, relief=2, text='Clear Hat', text_scale=0.035, borderWidth=(0.01, 0.01), frameSize=(-0.1, 0.1, -0.05, 0.05), pos=(0.15, 0, -0.55), command=self.clearHat) clearGlasses = DirectButton(parent=frame, relief=2, text='Clear Glasses', text_scale=0.035, borderWidth=(0.01, 0.01), frameSize=(-0.1, 0.1, -0.05, 0.05), pos=(-0.15, 0, -0.70), command=self.clearGlasses) saveButton = DirectButton(parent=frame, relief=2, text='Save', text_scale=0.035, borderWidth=(0.01, 0.01), frameSize=(-0.1, 0.1, -0.05, 0.05), pos=(-0.15, 0, -0.85), command=self.save) self.autosaveButton = DirectButton( parent=frame, relief=2, text='Autosave:\n\x01red\x01off\x02', text_scale=0.035, borderWidth=(0.01, 0.01), frameSize=(-0.1, 0.1, -0.05, 0.05), text_pos=(0, 0.01), pos=(0.15, 0, -0.85), command=self.autosave) self.backpackLabel = DirectLabel(parent=base.a2dBottomCenter, relief=None, text='Backpack:', text_scale=0.05, pos=(0, 0, 0.1), text_align=TextNode.ACenter) self.hatLabel = DirectLabel(parent=base.a2dBottomCenter, relief=None, text='Hat:', text_scale=0.05, pos=(0, 0, 0.2), text_align=TextNode.ACenter) self.glassesLabel = DirectLabel(parent=base.a2dBottomCenter, relief=None, text='Glasses:', text_scale=0.05, pos=(0, 0, 0.3), text_align=TextNode.ACenter)
def run(self): self.implicit_markers = False base.win.setClearColor((0, 0, 0, 1)) self.marker(0) # Send one event to trigger the whole event sending process if self.training: self.stimuli_order = self.stimuli_order[:16] self.n_blocks = 1 self.n_runs = 1 if self.av_type == 'auditory': self.file_list = ['studies/obj_animals/stimuli/' + k.strip() + '_f.wav' for k in self.stimuli] for f in self.file_list: self.precache_sound(f) self.precache_sound('buzz.wav') self.precache_sound('beep.wav') # Define text properties tp_gray = TextProperties() tp_gray.setTextColor(0.5, 0.5, 0.5, 1) tpMgr = TextPropertiesManager.getGlobalPtr() tpMgr.setProperties("gray", tp_gray) # Show instructions if self.training: if self.av_type == 'visual': verb = 'see' else: verb = 'hear' self.write('This is a word association experiment.\nYou will complete several trials in this block.\n\n' + '\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05) self.write('The experiment consists of two conditions.\nIn each trial, ' + 'you will be prompted to perform\none of these two tasks:\n' + '(1) touch a button on the screen,\n' + '(2) press the space bar.\n\n\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05) self.write('In each trial, you will ' + verb + ' a word.\nWhen the word is an animal,\n' + 'touch the button on the screen.\n\n\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05) self.write('When the word is an object,\npress the space bar.\n\n\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05) self.write('You will hear a beep for correct answers.\nYou will hear a buzz for incorrect answers.\n\n\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05) self.write('When you are ready,\npress the space bar to begin.' + '\n\n\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05) t = time.localtime() t_str = '-'.join([str(k) for k in [t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec]]) f = open('studies/obj_animals/log/' + t_str + '.txt', 'w') f.write('Stimulus No.\tStimulus\tCategory\tButton position\tScore\n') self.sleep(5) counter = 0 # Needed for breaks between blocks for k in self.stimuli_order: # Short break if not self.training and counter in xrange(len(self.stimuli_order)/self.n_blocks/2, len(self.stimuli_order), len(self.stimuli_order)/self.n_blocks/2): self.write('Time for a short break.\n\n' + '\1gray\1[Press Space to continue]\2', fg=(1, 1, 1, 1), duration='space', align='left', pos=(-0.5, 0), scale=0.05) self.sleep(2) counter += 1 # I have to calculate button positions in case the window size changed ar = base.getAspectRatio() button_frame = (-ar/9, ar/9, -1.0/4, 1.0/4) buttons = [] for k1 in xrange(2, 7): for k2 in xrange(4): buttons.append((-ar + ar / 9 + k1 * ar / 4.5, 0, 1 - 1.0 / 4 - k2 / 2.0)) # Delete middle buttons del buttons[5:7] del buttons[7:9] del buttons[9:11] choice = random.randint(0, len(buttons) - 1) button = buttons[choice] f.write(str(k) + '\t' + self.stimuli[k].strip() + '\t' + self.conditions[self.target[k]] + '\t' + str(choice) + '\t') # Visual or auditory presentation if self.av_type == 'auditory': self.sound(self.file_list[k], volume=0.5) self.sleep(0.2) self.write('+', duration=self.isi-self.pause, block=False, scale=0.15, fg=(1, 1, 1, 1)) elif self.av_type == 'visual': self.sleep(0.2) self.write(self.stimuli[k], duration=self.isi-self.pause, block=False, scale=0.15, fg=(1, 1, 1, 1)) self.marker(k + 10000) btn = DirectButton(frameSize=button_frame, pos=button, frameColor=(0.75, 0, 0, 1), borderWidth=(0.01, 0.01), rolloverSound=None, clickSound=None, command=messenger.send, extraArgs=('button_pressed',)) latencies = self.waitfor_multiple(['button_pressed', 'space'], self.isi) if not latencies: response = 'none' wait_time = self.pause self.sound('buzz.wav', volume=0.5) else: response = latencies[0] wait_time = self.pause + self.isi - latencies[1] if self.target[k] == 1 and response == 'button_pressed': # Check if values in dictionary are not empty self.score += int(100 * (self.isi - latencies[1]) / self.isi) self.sound('beep.wav', volume=0.5) elif self.target[k] == 0 and response == 'space': self.score += int(10 * (self.isi - latencies[1]) / self.isi) self.sound('beep.wav', volume=0.5) elif (self.target[k] == 1 and response == 'space') or (self.target[k] == 0 and response == 'button_pressed'): self.score -= 5 if self.score < 0: self.score = 0 self.sound('buzz.wav', volume=0.5) f.write(str(self.score) + '\n') try: btn.destroy() except: pass self.sleep(wait_time - 0.2) f.close() if not self.training: self.write('You successfully completed\none run of the experiment.\n\nThank you!', duration=5, align='left', pos=(-0.5, 0), scale=0.05, fg=(1, 1, 1, 1))