def switchOn(sampleRate=44100): """Must explicitly switch on the microphone before use, can take several seconds. """ # imports from pyo, creates globals including pyoServer and pyoSamplingRate global haveMic haveMic = False t0 = time.time() try: global Server, Record, Input, Clean_objects, SfPlayer, serverCreated, serverBooted from pyo import Server, Record, Input, Clean_objects, SfPlayer, serverCreated, serverBooted global getVersion, pa_get_input_devices, pa_get_output_devices from pyo import getVersion, pa_get_input_devices, pa_get_output_devices haveMic = True except ImportError: msg = 'Microphone class not available, needs pyo; see http://code.google.com/p/pyo/' logging.error(msg) raise ImportError(msg) global pyoSamplingRate pyoSamplingRate = sampleRate global pyoServer if serverCreated(): pyoServer.setSamplingRate(sampleRate) pyoServer.boot() else: pyoServer = Server(sr=sampleRate, nchnls=2, duplex=1).boot() pyoServer.start() logging.exp('%s: switch on (%dhz) took %.3fs' % (__file__.strip('.py'), sampleRate, time.time() - t0))
def _checkout(requestedVersion): """Look for a Maj.min.patch requested version, download (fetch) if needed. """ # Check tag of repo if currentTag() == requestedVersion: return requestedVersion # See if the tag already exists in repos if requestedVersion not in _localVersions(forceCheck=True): # Grab new tags msg = _translate("Couldn't find version {} locally. Trying github...") logging.info(msg.format(requestedVersion)) subprocess.check_output('git fetch github'.split()) # is requested here now? forceCheck to refresh cache if requestedVersion not in _localVersions(forceCheck=True): msg = _translate("{} is not currently available.") logging.error(msg.format(requestedVersion)) return '' # Checkout the requested tag cmd = ['git', 'checkout', requestedVersion] out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, cwd=VERSIONSDIR) logging.debug(out) logging.exp('Success: ' + ' '.join(cmd)) return requestedVersion
def record(self, sec, file='', block=True): """Capture sound input for duration <sec>, save to a file. Return the path/name to the new file. Uses onset time (epoch) as a meaningful identifier for filename and log. """ while self.recorder.running: pass self.duration = float(sec) self.onset = core.getTime() # note: report onset time in log, and use in filename logging.data('%s: Record: onset %.3f, capture %.3fs' % (self.loggingId, self.onset, self.duration) ) if not file: onsettime = '-%.3f' % self.onset self.savedFile = onsettime.join(os.path.splitext(self.wavOutFilename)) else: self.savedFile = os.path.abspath(file).strip('.wav') + '.wav' t0 = core.getTime() self.recorder.run(self.savedFile, self.duration, self.sampletype) self.rate = sound.pyoSndServer.getSamplingRate() if block: core.wait(self.duration - .0008) # .0008 fudge factor for better reporting # actual timing is done by Clean_objects logging.exp('%s: Record: stop. %.3f, capture %.3fs (est)' % (self.loggingId, core.getTime(), core.getTime() - t0) ) else: logging.exp('%s: Record: return immediately, no blocking' % (self.loggingId) ) return self.savedFile
def record(self, sec, block=True): """Capture sound input for duration <sec>, save to a file. Return the path/name to the new file. Uses onset time (epoch) as a meaningful identifier for filename and log. """ RECORD_SECONDS = float(sec) self.onset = time.time() # note: report onset time in log, and use in filename logging.data('%s: Record: onset %.3f, capture %.3fs' % (self.loggingId, self.onset, RECORD_SECONDS) ) self.savedFile = self.wavOutFilename.replace(ONSET_TIME_HERE, '-%.3f' % self.onset) inputter = Input(chnl=0, mul=1) # chnl=[0,1] for stereo input t0 = time.time() # launch the recording, saving to file: recorder = Record(inputter, self.savedFile, chnls=2, fileformat=0, # .wav format sampletype=0, buffering=4) # 4 is default # launch recording, block as needed, and clean up: clean = Clean_objects(RECORD_SECONDS, recorder) # set up to stop recording clean.start() # the timer starts now, ends automatically whether block or not if block: time.sleep(RECORD_SECONDS - 0.0008) # Clean_objects() set-up takes ~0.0008s, for me logging.exp('%s: Record: stop. %.3f, capture %.3fs (est)' % (self.loggingId, time.time(), time.time() - t0) ) else: logging.exp('%s: Record: return immediately, no blocking' % (self.loggingId) ) self.duration = RECORD_SECONDS # used in playback() return self.savedFile # filename, or None
def playback(self, block=True, loops=0, stop=False, log=True): """Plays the saved .wav file, as just recorded or resampled. Execution blocks by default, but can return immediately with `block=False`. `loops` : number of extra repetitions; 0 = play once `stop` : True = immediately stop ongoing playback (if there is one), and return """ if not self.savedFile or not os.path.isfile(self.savedFile): msg = "%s: Playback requested but no saved file" % self.loggingId logging.error(msg) raise ValueError(msg) if stop: if hasattr(self, "current_recording") and self.current_recording.status == PLAYING: self.current_recording.stop() return # play this file: name = self.name + ".current_recording" self.current_recording = sound.Sound(self.savedFile, name=name, loops=loops) self.current_recording.play() if block: core.wait(self.duration * (loops + 1)) # set during record() if log and self.autoLog: if loops: logging.exp( "%s: Playback: play %.3fs x %d (est) %s" % (self.loggingId, self.duration, loops + 1, self.savedFile) ) else: logging.exp("%s: Playback: play %.3fs (est) %s" % (self.loggingId, self.duration, self.savedFile))
def setMarker(self, tone=19000, secs=0.015, volume=0.03, log=True): """Sets the onset marker, where `tone` is either in hz or a custom sound. The default tone (19000 Hz) is recommended for auto-detection, as being easier to isolate from speech sounds (and so reliable to detect). The default duration and volume are appropriate for a quiet setting such as a lab testing room. A louder volume, longer duration, or both may give better results when recording loud sounds or in noisy environments, and will be auto-detected just fine (even more easily). If the hardware microphone in use is not physically near the speaker hardware, a louder volume is likely to be required. Custom sounds cannot be auto-detected, but are supported anyway for presentation purposes. E.g., a recording of someone saying "go" or "stop" could be passed as the onset marker. """ if hasattr(tone, "play"): self.marker_hz = 0 self.marker = tone if log and self.autoLog: logging.exp("custom sound set as marker; getMarkerOnset() will not be able to auto-detect onset") else: self.marker_hz = float(tone) sampleRate = sound.pyoSndServer.getSamplingRate() if sampleRate < 2 * self.marker_hz: # NyquistError logging.warning( "Recording rate (%i Hz) too slow for %i Hz-based marker detection." % (int(sampleRate), self.marker_hz) ) if log and self.autoLog: logging.exp("frequency of recording onset marker: %.1f" % self.marker_hz) self.marker = sound.Sound(self.marker_hz, secs, volume=volume, name=self.name + ".marker_tone")
def __init__(self, win, size, pos=(0,0), ori=0, nVert=120, shape='circle', units=None, name='', autoLog=True): #what local vars are defined (these are the init params) for use by __repr__ self._initParams = dir() self._initParams.remove('self') #set self params self.autoLog=False #set this False first and change after attribs are set self.win=win self.name = name #unit conversions if units!=None and len(units): self.units = units else: self.units = win.units if shape.lower() == 'square': ori += 45 nVert = 4 elif shape.lower() == 'triangle': nVert = 3 self.ori = ori self.nVert = 120 if type(nVert) == int: self.nVert = nVert self.quad=GL.gluNewQuadric() #needed for gluDisk self.setSize(size, needReset=False) self.setPos(pos, needReset=False) self._reset()#implicitly runs an self.enable() self.autoLog= autoLog if autoLog: logging.exp("Created %s = %s" %(self.name, str(self)))
def setVolume(self, newVol, log=True): """Sets the current volume of the sound (0.0 to 1.0, inclusive)""" self.volume = min(1.0, max(0.0, newVol)) self.needsUpdate = True if log and self.autoLog: logging.exp("Sound %s set volume %.3f" % (self.name, self.volume), obj=self) return self.getVolume()
def setSound(self, value, secs=0.5, octave=4, hamming=True, log=True): """Set the sound to be played. Often this is not needed by the user - it is called implicitly during initialisation. :parameters: value: can be a number, string or an array: * If it's a number between 37 and 32767 then a tone will be generated at that frequency in Hz. * It could be a string for a note ('A','Bfl','B','C','Csh'...). Then you may want to specify which octave as well * Or a string could represent a filename in the current location, or mediaLocation, or a full path combo * Or by giving an Nx2 numpy array of floats (-1:1) you can specify the sound yourself as a waveform secs: duration (only relevant if the value is a note name or a frequency value) octave: is only relevant if the value is a note name. Middle octave of a piano is 4. Most computers won't output sounds in the bottom octave (1) and the top octave (8) is generally painful """ self._snd = None # Re-init sound to ensure bad values will raise RuntimeError during setting try:#could be '440' meaning 440 value = float(value) #we've been asked for a particular Hz except (ValueError, TypeError): pass #this is a string that can't be a number else: self._fromFreq(value, secs, hamming=hamming) if isinstance(value, basestring): if capitalize(value) in knownNoteNames: if not self._fromNoteName(capitalize(value), secs, octave, hamming=hamming): self._snd = None else: #try finding the file self.fileName=None for filePath in ['', mediaLocation]: p = path.join(filePath, value) if path.isfile(p): self.fileName = p elif path.isfile(p + '.wav'): self.fileName = p + '.wav' if self.fileName is None: raise IOError, "setSound: could not find a sound file named " + value elif not self._fromFile(value): self._snd = None elif type(value) in [list,numpy.ndarray]: #create a sound from the input array/list if not self._fromArray(value): self._snd = None #did we succeed? if self._snd is None: raise RuntimeError, "Could not make a "+value+" sound" else: if log and self.autoLog: logging.exp("Set %s sound=%s" %(self.name, value), obj=self) self.status=NOT_STARTED
def play(self, loops=None, autoStop=True, log=True): """Starts playing the sound on an available channel. loops : int (same as above) For playing a sound file, you cannot specify the start and stop times when playing the sound, only when creating the sound initially. Playing a sound runs in a separate thread i.e. your code won't wait for the sound to finish before continuing. To pause while playing, you need to use a `psychopy.core.wait(mySound.getDuration())`. If you call `play()` while something is already playing the sounds will be played over each other. """ if loops is not None and self.loops != loops: self.setLoops(loops) if self.needsUpdate: self._updateSnd() # ~0.00015s, regardless of the size of self._sndTable self._snd.out() self.status=STARTED if autoStop or self.loops != 0: # pyo looping is boolean: loop forever or not at all # so track requested loops using time; limitations: not sample-accurate if self.loops >= 0: duration = self.getDuration() * (self.loops + 1) else: duration = FOREVER self.terminator = threading.Timer(duration, self._onEOS) self.terminator.start() if log and self.autoLog: logging.exp("Sound %s started" %(self.name), obj=self) return self
def reset(self, log=True): """Restores to fresh state, ready to record again """ if log and self.autoLog: msg = '%s: resetting at %.3f' logging.exp(msg % (self.loggingId, core.getTime())) self.__init__(name=self.name, saveDir=self.saveDir)
def stop(self, log=True): """Stops the sound immediately """ self._snd.stop() self.status = STOPPED if log and self.autoLog: logging.exp("Sound %s stopped" % (self.name), obj=self)
def play(self, fromStart=True, log=True, loops=0): """Starts playing the sound on an available channel. Parameters ---------- fromStart : bool Not yet implemented. log : bool Whether or not to log the playback event. loops : int How many times to repeat the sound after it plays once. If `loops` == -1, the sound will repeat indefinitely until stopped. Notes ----- If no sound channels are available, it will not play and return None. This runs off a separate thread i.e. your code won't wait for the sound to finish before continuing. You need to use a psychopy.core.wait() command if you want things to pause. If you call play() whiles something is already playing the sounds will be played over each other. """ self._snd.play(loops=loops) self.status=STARTED if log and self.autoLog: logging.exp("Sound %s started" %(self.name), obj=self) return self
def __init__(self): """Class to detect and report `ioLab button box <http://www.iolab.co.uk>`_. The ioLabs library needs to be installed. It is included in the *Standalone* distributions of PsychoPy as of version 1.62.01. Otherwise try "pip install ioLabs" Usage:: from psychopy.hardware import iolab bbox = iolab.ButtonBox() For examples see the demos menu of the PsychoPy Coder or go to the URL above. All times are reported in units of seconds. """ ioLabs.USBBox.__init__(self) logging.debug('init iolabs bbox') self.events = [] self.status = None # helps Builder self._lastReset = 0.0 # time on baseclock at which the bbox clock was reset self._baseclock = core.Clock() # for basetime, not RT time self.resetClock(log=True) # internal clock on the bbox logging.exp('button box resetClock(log=True) took %.4fs' % self._baseclock.getTime()) self.commands.add_callback(REPORT.KEYDN, self._onKeyDown) self.commands.add_callback(REPORT.KEYUP, self._onKeyUp) self.commands.add_callback(REPORT.RTCREP, self._onRtcRep)
def __init__(self, win, size, pos=(0,0), ori=0, nVert=120, shape='circle', units=None, name='', autoLog=True): #what local vars are defined (these are the init params) for use by __repr__ self._initParams = dir() self._initParams.remove('self') #set self params self.autoLog=False #set this False first and change after attribs are set self.win=win self.name = name self.ori = ori #unit conversions if units!=None and len(units): self.units = units else: self.units = win.units # ugly hack for setting vertices using combination of shape and nVerts if type(nVert) == int: self.nVert = nVert regularPolygon = True self.shape = shape unrecognized = False if shape is None: pass #just use the nVert we were given elif type(shape) in [str, unicode]: if shape.lower() == 'circle': pass #just use the nVert we were given elif shape.lower() == 'square': regularPolygon = False # if we use polygon then we have to hack the orientation vertices = [[0.5,-0.5],[-0.5,-0.5],[-0.5,0.5],[0.5,0.5]] elif shape.lower() == 'triangle': regularPolygon = False # if we use polygon then we have to hack the orientation vertices = [[0.5,-0.5],[0,0.5],[-0.5,-0.5]] else: unrecognized = True elif type(shape) in [tuple, list, numpy.ndarray]: regularPolygon = False vertices = shape else: unrecognized = True if unrecognized: logging.warn("Unrecognized shape for aperture. Expected 'circle', 'square', 'triangle', vertices or None but got %s" %(repr(shape))) if regularPolygon: self._shape = polygon.Polygon(win=self.win, edges=self.nVert, fillColor=1, lineColor=None, interpolate=False, pos=pos, size=size) else: self._shape = ShapeStim(win=self.win, vertices=vertices, fillColor=1, lineColor=None, interpolate=False, pos=pos, size=size) self._needVertexUpdate = True self._reset()#implicitly runs an self.enable() self.autoLog= autoLog if autoLog: logging.exp("Created %s = %s" %(self.name, str(self)))
def setLoop(self, newLoop, log=True): """Sets the current loop (True or False""" self.loop = (newLoop == True) self.needsUpdate = True if log and self.autoLog: logging.exp("Sound %s set loop %s" % (self.name, self.loop), obj=self) return self.getLoop()
def setLoops(self, newLoops, log=True): """Sets the current requested extra loops (int)""" self.loops = int(newLoops) self.needsUpdate = True if log and self.autoLog: logging.exp("Sound %s set loops %s" % (self.name, self.loops), obj=self)
def _record(self, sec, filename="", block=True, log=True): while self.recorder.running: pass self.duration = float(sec) self.onset = core.getTime() # for duration estimation, high precision self.fileOnset = core.getAbsTime() # for log and filename, 1 sec precision ms = "%.3f" % (core.getTime() - int(core.getTime())) if log and self.autoLog: logging.data("%s: Record: onset %d, capture %.3fs" % (self.loggingId, self.fileOnset, self.duration)) if not filename: onsettime = "-%d" % self.fileOnset + ms[1:] self.savedFile = onsettime.join(os.path.splitext(self.wavOutFilename)) else: self.savedFile = os.path.abspath(filename).strip(".wav") + ".wav" t0 = core.getTime() self.recorder.run(self.savedFile, self.duration, **self.options) self.rate = sound.pyoSndServer.getSamplingRate() if block: core.wait(self.duration, 0) if log and self.autoLog: logging.exp( "%s: Record: stop. %.3f, capture %.3fs (est)" % (self.loggingId, core.getTime(), core.getTime() - t0) ) while self.recorder.running: core.wait(0.001, 0) else: if log and self.autoLog: logging.exp("%s: Record: return immediately, no blocking" % (self.loggingId)) return self.savedFile
def _record(self, sec, filename='', block=True): while self.recorder.running: pass self.duration = float(sec) self.onset = core.getTime() # for duration estimation, high precision self.fileOnset = core.getAbsTime() # for log and filename, 1 sec precision logging.data('%s: Record: onset %d, capture %.3fs' % (self.loggingId, self.fileOnset, self.duration) ) if not file: onsettime = '-%d' % self.fileOnset self.savedFile = onsettime.join(os.path.splitext(self.wavOutFilename)) else: self.savedFile = os.path.abspath(filename).strip('.wav') + '.wav' t0 = core.getTime() self.recorder.run(self.savedFile, self.duration, **self.options) self.rate = sound.pyoSndServer.getSamplingRate() if block: core.wait(self.duration, 0) logging.exp('%s: Record: stop. %.3f, capture %.3fs (est)' % (self.loggingId, core.getTime(), core.getTime() - t0) ) while self.recorder.running: core.wait(.001, 0) else: logging.exp('%s: Record: return immediately, no blocking' % (self.loggingId) ) return self.savedFile
def _switchToVersion(requestedVersion): """Checkout (or clone then checkout) the requested version, set sys.path so that the new version will be found when import is called. Upon exit, the checked out version remains checked out, but the sys.path reverts. NB When installed with pip/easy_install PsychoPy will live in a site-packages directory, which should *not* be removed as it may contain other relevant and needed packages. """ if not os.path.exists(prefs.paths['userPrefsDir']): os.mkdir(prefs.paths['userPrefsDir']) try: if os.path.exists(VERSIONSDIR): _checkout(requestedVersion) else: _clone(requestedVersion) except CalledProcessError as e: if 'did not match any file(s) known to git' in e.output: msg = _translate("'{}' is not a valid PsychoPy version.") logging.error(msg.format(requestedVersion)) raise RuntimeError(msg) else: raise # make sure the checked-out version comes first on the python path: sys.path = [VERSIONSDIR] + sys.path logging.exp('Prepended `{}` to sys.path'.format(VERSIONSDIR))
def __init__(self, win, units='', lineWidth=1.5, lineColor='white', lineColorSpace='rgb', fillColor=None, fillColorSpace='rgb', vertices=((-0.5, 0), (0, +0.5), (+0.5, 0)), windingRule=None, # default GL.GLU_TESS_WINDING_ODD closeShape=True, # False for a line pos=(0, 0), size=1, ori=0.0, opacity=1.0, contrast=1.0, depth=0, interpolate=True, name=None, autoLog=None, autoDraw=False): """ """ # what local vars are defined (init params, for use by __repr__) self._initParamsOrig = dir() self._initParamsOrig.remove('self') super(ShapeStim, self).__init__(win, units=units, lineWidth=lineWidth, lineColor=lineColor, lineColorSpace=lineColorSpace, fillColor=fillColor, fillColorSpace=fillColorSpace, vertices=(), # dummy verts closeShape=self.closeShape, pos=pos, size=size, ori=ori, opacity=opacity, contrast=contrast, depth=depth, interpolate=interpolate, name=name, autoLog=False, autoDraw=autoDraw) self.closeShape = closeShape self.windingRule = windingRule self._initVertices(vertices) # remove deprecated params (from ShapeStim.__init__): self._initParams = self._initParamsOrig # set autoLog now that params have been initialised wantLog = autoLog or autoLog is None and self.win.autoLog self.__dict__['autoLog'] = wantLog if self.autoLog: logging.exp("Created %s = %s" % (self.name, str(self)))
def resetClock(self, log=True): """Reset the clock on the bbox internal clock, e.g., at the start of a trial. """ # better / faster than self.reset_clock() (no wait for report): self.commands.resrtc() self._lastReset = self._baseclock.getTime() if log: logging.exp('reset bbox internal clock at basetime = %.3f' % self._lastReset)
def setVolume(self, newVol, log=True): """Sets the current volume of the sound (0.0:1.0) """ self._snd.set_volume(newVol) if log and self.autoLog: msg = "Set Sound %s volume=%.3f" logging.exp(msg % (self.name, newVol), obj=self) return self.getVolume()
def add(self, key, func, func_args=(), func_kwargs=None, modifiers=(), name=None): """ Add a global event key. :Parameters: key : string The key to add. func : function The function to invoke once the specified keys were pressed. func_args : iterable Positional arguments to be passed to the specified function. func_kwargs : dict Keyword arguments to be passed to the specified function. modifiers : collection of strings Modifier keys. Valid keys are: 'shift', 'ctrl', 'alt' (not on macOS), 'capslock', 'scrolllock', 'command' (macOS only), 'option' (macOS only) Num Lock is not supported. name : string The name of the event. Will be used for logging. If None, will use the name of the specified function. :Raises: ValueError If the specified key or modifiers are invalid, or if the key / modifier combination has already been assigned to a global event. """ if key not in self._valid_keys: raise ValueError('Unknown key specified: %s' % key) if not set(modifiers).issubset(self._valid_modifiers): raise ValueError('Unknown modifier key specified.') index_key = self._gen_index_key((key, modifiers)) if index_key in self._events: msg = ('The specified key is already assigned to a global event. ' 'Use `.remove()` to remove it first.') raise ValueError(msg) if func_kwargs is None: func_kwargs = {} if name is None: name = func.__name__ self._events[index_key] = self._GlobalEvent(func, func_args, func_kwargs, name) logging.exp('Added new global key event: %s' % name)
def __init__(self, win, image="", units="", pos=(0.0, 0.0), flipHoriz=False, flipVert=False, name=None, autoLog=None): """ """ # all doc is in the attributeSetter # what local vars are defined (these are the init params) for use by # __repr__ self._initParams = dir() self._initParams.remove('self') self.autoLog = False self.__dict__['win'] = win super(SimpleImageStim, self).__init__(name=name) self.units = units # call attributeSetter # call attributeSetter. Use shaders if available by default, this is a # good thing self.useShaders = win._haveShaders self.pos = pos # call attributeSetter self.image = image # call attributeSetter # check image size against window size if (self.size[0] > self.win.size[0] or self.size[1] > self.win.size[1]): msg = ("Image size (%s, %s) was larger than window size " "(%s, %s). Will draw black screen.") logging.warning(msg % (self.size[0], self.size[1], self.win.size[0], self.win.size[1])) # check position with size, warn if stimuli not fully drawn if ((self.pos[0] + self.size[0]/2) > self.win.size[0]/2 or (self.pos[0] - self.size[0]/2) < -self.win.size[0]/2): logging.warning("The image does not completely fit inside " "the window in the X direction.") if ((self.pos[1] + self.size[1]/2) > self.win.size[1]/2 or (self.pos[1] - self.size[1]/2) < -self.win.size[1]/2): logging.warning("The image does not completely fit inside " "the window in the Y direction.") # flip if necessary # initially it is false, then so the flip according to arg above self.__dict__['flipHoriz'] = False self.flipHoriz = flipHoriz # call attributeSetter # initially it is false, then so the flip according to arg above self.__dict__['flipVert'] = False self.flipVert = flipVert # call attributeSetter self._calcPosRendered() # set autoLog (now that params have been initialised) wantLog = autoLog is None and self.win.autoLog self.__dict__['autoLog'] = autoLog or wantLog if self.autoLog: logging.exp("Created {} = {}".format(self.name, self))
def __delitem__(self, key): index_key = self._gen_index_key(key) event = self._events.pop(index_key, None) if event is None: msg = 'Requested to remove unregistered global event key.' raise KeyError(msg) else: logging.exp("Removed global key event: '%s'." % event.name)
def resample(self, newRate=16000, keep=True, log=True): """Re-sample the saved file to a new rate, return the full path. Can take several visual frames to resample a 2s recording. The default values for resample() are for google-speech, keeping the original (presumably recorded at 48kHz) to archive. A warning is generated if the new rate not an integer factor / multiple of the old rate. To control anti-aliasing, use pyo.downsamp() or upsamp() directly. """ if not self.savedFile or not os.path.isfile(self.savedFile): msg = "%s: Re-sample requested but no saved file" % self.loggingId logging.error(msg) raise ValueError(msg) if newRate <= 0 or type(newRate) != int: msg = "%s: Re-sample bad new rate = %s" % (self.loggingId, repr(newRate)) logging.error(msg) raise ValueError(msg) # set-up: if self.rate >= newRate: ratio = float(self.rate) / newRate info = "-ds%i" % ratio else: ratio = float(newRate) / self.rate info = "-us%i" % ratio if ratio != int(ratio): logging.warn("%s: old rate is not an integer factor of new rate" % self.loggingId) ratio = int(ratio) newFile = info.join(os.path.splitext(self.savedFile)) # use pyo's downsamp or upsamp based on relative rates: if not ratio: logging.warn("%s: Re-sample by %sx is undefined, skipping" % (self.loggingId, str(ratio))) elif self.rate >= newRate: t0 = core.getTime() downsamp(self.savedFile, newFile, ratio) # default 128-sample anti-aliasing if log and self.autoLog: logging.exp( "%s: Down-sampled %sx in %.3fs to %s" % (self.loggingId, str(ratio), core.getTime() - t0, newFile) ) else: t0 = core.getTime() upsamp(self.savedFile, newFile, ratio) # default 128-sample anti-aliasing if log and self.autoLog: logging.exp( "%s: Up-sampled %sx in %.3fs to %s" % (self.loggingId, str(ratio), core.getTime() - t0, newFile) ) # clean-up: if not keep: os.unlink(self.savedFile) self.savedFile = newFile self.rate = newRate return os.path.abspath(newFile)
def stop(self, log=True): """Stops the sound immediately""" self._snd.stop() try: self.terminator.cancel() except: # pragma: no cover pass self.status=STOPPED if log and self.autoLog: logging.exp("Sound %s stopped" %(self.name), obj=self)
def switchOff(): """Must explicitly switch off the microphone when done. """ t0 = time.time() global pyoServer, pyoSamplingRate if serverBooted(): pyoServer.stop() time.sleep(.25) # give it a chance to stop before shutdown() if serverCreated(): pyoServer.shutdown() del pyoSamplingRate logging.exp('%s: switch off took %.3fs' % (__file__.strip('.py'), time.time() - t0))
def _test_upload(): def _upload(stuff): """assumes that SELECTOR_FOR_TEST_UPLOAD is a configured http server """ selector = SELECTOR_FOR_TEST_UPLOAD basicAuth = BASIC_AUTH_CREDENTIALS # make a tmp dir just for testing: tmp = mkdtemp() filename = "test.txt" tmp_filename = os.path.join(tmp, filename) f = open(tmp_filename, "w+") f.write(stuff) f.close() # get local sha256 before cleanup: digest = hashlib.sha256() digest.update(open(tmp_filename).read()) dgst = digest.hexdigest() # upload: status = upload(selector, tmp_filename, basicAuth) shutil.rmtree(tmp) # cleanup; do before asserts # test good_upload = True disgest_match = False if not status.startswith("success"): good_upload = False elif status.find(dgst) > -1: logging.exp("digests match") digest_match = True else: logging.error("digest mismatch") logging.flush() assert good_upload # remote server FAILED to report success assert digest_match # sha256 mismatch local vs remote file return int(status.split()[3]) # bytes # test upload: normal text, binary: msg = PSYCHOPY_USERAGENT # can be anything print "text: " bytes = _upload(msg) # normal text assert bytes == len(msg) # FAILED to report len() bytes print "binary: " digest = hashlib.sha256() # to get binary, 256 bits digest.update(msg) bytes = _upload(digest.digest()) assert bytes == 32 # FAILED to report 32 bytes for a 256-bit binary file (= odd if digests match) logging.exp("binary-file byte-counts match")
def prepare_trial(self, trial): logging.debug( u'Preparing trial for sentence {}, critical target {}'.format( trial['sentence_number'], trial['critical_target'])) trial['critical_distractor'] = trial['distractors'][self.conditions[ trial['condition']]] logging.exp(u'Set critical distractor to: {}'.format( trial['critical_distractor'])) trial['target_sentence'], trial['alt_sentence'], trial[ 'critical_index'] = self.process_sentence( trial['sentence'], trial['critical_distractor']) del trial['sentence'] del trial['distractors'] if 'original_distractors' in trial: del trial['original_distractors'] if self.autorun: trial[ 'AUTORUN_DATA'] = u'EXPERIMENT IN AUTORUN MODE DO NOT USE DATA' return trial
def __init__(self, win, lineWidth=1.5, lineColor=(1.0, 1.0, 1.0), lineColorSpace='rgb', opacity=1.0, closeShape=False, buttonRequired=True, name=None, depth=0, autoLog=True, autoDraw=False): super(Brush, self).__init__(name=name, autoLog=False) self.win = win self.name = name self.depth = depth self.lineColor = lineColor self.lineColorSpace = lineColorSpace self.lineWidth = lineWidth self.opacity = opacity self.closeShape = closeShape self.buttonRequired = buttonRequired self.pointer = event.Mouse(win=self.win) self.shapes = [] self.brushPos = [] self.strokeIndex = -1 self.atStartPoint = False self.autoLog = autoLog self.autoDraw = autoDraw if self.autoLog: logging.exp("Created {name} = {obj}".format(name=self.name, obj=str(self)))
def run(self, exp_win, ctl_win): print('Next task: %s' % str(self)) # show instruction if hasattr(self, 'instructions'): for _ in self.instructions(exp_win, ctl_win): yield True # wait for TTL fmri.get_ttl() # flush any remaining TTL keys if self.use_fmri: ttl_index = 0 logging.exp(msg="waiting for fMRI TTL") while True: if fmri.get_ttl(): #TODO: log real timing of TTL? logging.exp(msg="fMRI TTL %d" % ttl_index) ttl_index += 1 break yield False # no need to draw logging.info('GO') # send start trigger/marker to MEG + Biopac (or anything else on parallel port) if self.use_meg: meg.send_signal(meg.MEG_settings['TASK_START_CODE']) self.task_timer = core.Clock() # initialize a progress bar if we know the duration of the task progress_bar = False if hasattr(self, 'duration'): progress_bar = tqdm.tqdm(total=self.duration) frame_idx = 0 for _ in self._run(exp_win, ctl_win): if self.use_fmri: if fmri.get_ttl(): logging.exp(msg="fMRI TTL %d" % ttl_index) ttl_index += 1 # increment the progress bar every second if progress_bar: frame_idx += 1 if not frame_idx % config.FRAME_RATE: progress_bar.update(1) yield True # send stop trigger/marker to MEG + Biopac (or anything else on parallel port) if self.use_meg: meg.send_signal(meg.MEG_settings['TASK_STOP_CODE']) if progress_bar: progress_bar.clear() progress_bar.close()
def get_response(self, target_pos): logging.exp(u'Waiting for response...') if self.autorun: logging.exp(u'Autorun active. Sending automatic response...') auto_response_time = 0.5 + random() if randint(0, 100) < 98: return 1, auto_response_time, target_pos else: return 0, auto_response_time, int(not target_pos) if self.use_srbox: response, response_time = self.srbox.waitKeys( keyList=[1, 5], timeStamped=self.pair_clock) response = response[0] else: response, response_time = event.waitKeys( keyList=['c', 'm'], timeStamped=self.pair_clock)[0] logging.exp(u'Key presses received') # response_time = self.pair_clock.getTime() if response in ('c', 1): response = 0 elif response in ('m', 5): response = 1 logging.exp(u'Kepress position: {}'.format(response)) if response == target_pos: logging.info(u'Kepress does match target position.') logging.data(u'Response: {}'.format(response)) logging.data(u'Acc: {}'.format(1)) logging.data(u'Response time: {}'.format(response_time)) return 1, response_time, response else: logging.info(u'Kepress does match target position.') logging.data(u'Response: {}'.format(response)) logging.data(u'Acc: {}'.format(0)) logging.data(u'Response time: {}'.format(response_time)) return 0, response_time, response
def update(self, uKey: KeyTracker, iKey: KeyTracker, spaceKey: KeyTracker, sm: StateMachine, thisExp: ExperimentHandler): if self.firstFrame: logging.exp(f"Trial first frame") sm.lock() sm.dontDrawSM() self.firstFrame = False if not self.welcomedMsg and spaceKey.getKeyUp(): logging.exp( f"Trial user confirmed welcome, begin episode: {self.episodeNum}" ) self.welcomedMsg = True self.resetSM(sm) sm.unlock() sm.doDrawSM() self.episodeTimer = core.Clock() if self.welcomedMsg and not self.endEpisode: if sm.movesLeft == 0: logging.exp(f"Trial user finished episode") self.TrialScores.append(sm.totalScore) self.TrialTimes.append(self.episodeTimer.getTime()) self.endEpisode = True sm.dontDrawSM() sm.lock() if self.endEpisode and spaceKey.getKeyUp(): self.endEpisode = False self.episodeNum += 1 logging.exp( f"Trial user confirmed end episode, begin episode: {self.episodeNum}" ) self.episodeTimer = core.Clock() self.resetSM(sm) sm.unlock() sm.doDrawSM() if not self.complete and self.episodes <= self.episodeNum: self.complete = True thisExp.addData('Trial Scores', self.TrialScores) thisExp.addData('Trial Times', self.TrialTimes) self.TrialTimes = [] self.TrialScores = []
def _setResponse(self, item, question): """Makes calls to methods which make Slider or TextBox response objects for Form Parameters ---------- item : dict The dict entry for a single item question : TextStim The question text object Returns ------- psychopy.visual.slider.Slider The Slider object for response psychopy.visual.TextBox The TextBox object for response respHeight The height of the response object as type float """ if self.autoLog: logging.exp("Response type: {}".format(item['type'])) logging.exp("Response layout: {}".format(item['layout'])) logging.exp(u"Response options: {}".format(item['options'])) # Create position of response object pos = self._getResponsePos(item, question) if item['type'].lower() == 'free text': respCtrl, respHeight = self._makeTextBox(item, pos) elif item['type'].lower() in ['heading', 'description']: respCtrl, respHeight = None, self.textHeight elif item['type'].lower() in ['rating', 'slider', 'choice', 'radio']: respCtrl, respHeight = self._makeSlider(item, pos) item['responseCtrl'] = respCtrl return respCtrl, float(respHeight)
def run(self, exp_win, ctl_win): print('Next task: %s' % str(self)) if hasattr(self, 'instructions'): for _ in self.instructions(exp_win, ctl_win): yield True fmri.get_ttl() # flush any remaining TTL keys if self.use_fmri: ttl_index = 0 logging.exp(msg="waiting for fMRI TTL") while True: if fmri.get_ttl(): #TODO: log real timing of TTL? logging.exp(msg="fMRI TTL %d" % ttl_index) ttl_index += 1 break yield False # no need to draw logging.info('GO') self.task_timer = core.Clock() for _ in self._run(exp_win, ctl_win): if self.use_fmri: if fmri.get_ttl(): logging.exp(msg="fMRI TTL %d" % ttl_index) ttl_index += 1 yield True
def setSound(self, value, secs=0.5, octave=4, hamming=True, log=True): """Set the sound to be played. Often this is not needed by the user - it is called implicitly during initialisation. Parameters ---------- value : ArrayLike, int or str If it's a number between 37 and 32767 then a tone will be generated at that frequency in Hz. It could be a string for a note ('A', 'Bfl', 'B', 'C', 'Csh'. ...). Then you may want to specify which octave.O r a string could represent a filename in the current location, or media location, or a full path combo. Or by giving an Nx2 numpy array of floats (-1:1). secs : float Duration of a tone if a note name is to `value`. octave : int Is only relevant if the value is a note name. Middle octave of a piano is 4. Most computers won't output sounds in the bottom octave (1) and the top octave (8) is generally painful. hamming : bool To indicate if the sound should be apodized (i.e., the onset and offset smoothly ramped up from down to zero). The function apodize uses a Hanning window, but arguments named 'hamming' are preserved so that existing code is not broken by the change from Hamming to Hanning internally. Not applied to sounds from files. """ # Re-init sound to ensure bad values will raise error during setting: self._snd = None # Coerces pathlib obj to string, else returns inputted value value = pathToString(value) try: # could be '440' meaning 440 value = float(value) except (ValueError, TypeError): # this is a string that can't be a number, eg, a file or note pass else: # we've been asked for a particular Hz if value < 37 or value > 20000: msg = 'Sound: bad requested frequency %.0f' raise ValueError(msg % value) self._setSndFromFreq(value, secs, hamming=hamming) if isinstance(value, basestring): if value.capitalize() in knownNoteNames: self._setSndFromNote(value.capitalize(), secs, octave, hamming=hamming) else: # try finding a file self.fileName = None for filePath in ['', mediaLocation]: p = path.join(filePath, value) if path.isfile(p): self.fileName = p break elif path.isfile(p + '.wav'): self.fileName = p = p + '.wav' break if self.fileName is None: msg = "setSound: could not find a sound file named " raise ValueError(msg + value) else: self._setSndFromFile(p) elif type(value) in [list, numpy.ndarray]: # create a sound from the input array/list self._setSndFromArray(numpy.array(value)) # did we succeed? if self._snd is None: pass # raise ValueError, "Could not make a "+value+" sound" else: if log and self.autoLog: logging.exp("Set %s sound=%s" % (self.name, value), obj=self) self.status = NOT_STARTED
def setSound(self, value, secs=0.5, octave=4, hamming=True, log=True): """Set the sound to be played. Often this is not needed by the user - it is called implicitly during initialisation. :parameters: value: can be a number, string or an array: * If it's a number between 37 and 32767 then a tone will be generated at that frequency in Hz. * It could be a string for a note ('A', 'Bfl', 'B', 'C', 'Csh'. ...). Then you may want to specify which octave. * Or a string could represent a filename in the current location, or mediaLocation, or a full path combo * Or by giving an Nx2 numpy array of floats (-1:1) you can specify the sound yourself as a waveform secs: duration (only relevant if the value is a note name or a frequency value) octave: is only relevant if the value is a note name. Middle octave of a piano is 4. Most computers won't output sounds in the bottom octave (1) and the top octave (8) is generally painful """ # Re-init sound to ensure bad values will raise error during setting: self._snd = None try: # could be '440' meaning 440 value = float(value) except (ValueError, TypeError): # this is a string that can't be a number, eg, a file or note pass else: # we've been asked for a particular Hz if value < 37 or value > 20000: msg = 'Sound: bad requested frequency %.0f' raise ValueError(msg % value) self._setSndFromFreq(value, secs, hamming=hamming) if isinstance(value, basestring): if value.capitalize() in knownNoteNames: self._setSndFromNote(value.capitalize(), secs, octave, hamming=hamming) else: # try finding a file self.fileName = None for filePath in ['', mediaLocation]: p = path.join(filePath, value) if path.isfile(p): self.fileName = p break elif path.isfile(p + '.wav'): self.fileName = p + '.wav' break if self.fileName is None: msg = "setSound: could not find a sound file named " raise ValueError, msg + value else: self._setSndFromFile(value) elif type(value) in [list, numpy.ndarray]: # create a sound from the input array/list self._setSndFromArray(numpy.array(value)) # did we succeed? if self._snd is None: pass # raise ValueError, "Could not make a "+value+" sound" else: if log and self.autoLog: logging.exp("Set %s sound=%s" % (self.name, value), obj=self) self.status = NOT_STARTED
def add(self, key, func, func_args=(), func_kwargs=None, modifiers=(), name=None): """ Add a global event key. :Parameters: key : string The key to add. func : function The function to invoke once the specified keys were pressed. func_args : iterable Positional arguments to be passed to the specified function. func_kwargs : dict Keyword arguments to be passed to the specified function. modifiers : collection of strings Modifier keys. Valid keys are: 'shift', 'ctrl', 'alt' (not on macOS), 'capslock', 'numlock', 'scrolllock', 'command' (macOS only), 'option' (macOS only) name : string The name of the event. Will be used for logging. If None, will use the name of the specified function. :Raises: ValueError If the specified key or modifiers are invalid, or if the key / modifier combination has already been assigned to a global event. """ if key not in self._valid_keys: raise ValueError('Unknown key specified: %s' % key) if not set(modifiers).issubset(self._valid_modifiers): raise ValueError('Unknown modifier key specified.') index_key = self._gen_index_key((key, modifiers)) if index_key in self._events: msg = ('The specified key is already assigned to a global event. ' 'Use `.remove()` to remove it first.') raise ValueError(msg) if func_kwargs is None: func_kwargs = {} if name is None: name = func.__name__ self._events[index_key] = self._GlobalEvent(func, func_args, func_kwargs, name) logging.exp('Added new global key event: %s' % name)
def __init__(self, win, tex="sin", mask="none", units="", pos=(0.0, 0.0), size=None, sf=None, ori=0.0, phase=(0.0, 0.0), texRes=128, rgb=None, dkl=None, lms=None, color=(1.0, 1.0, 1.0), colorSpace='rgb', contrast=1.0, opacity=1.0, depth=0, rgbPedestal=(0.0, 0.0, 0.0), interpolate=False, blendmode='avg', name=None, autoLog=None, autoDraw=False, maskParams=None): """ """ # Empty docstring. All doc is in attributes # what local vars are defined (these are the init params) for use by # __repr__ self._initParams = dir() for unecess in ['self', 'rgb', 'dkl', 'lms']: self._initParams.remove(unecess) # initialise parent class super(GratingStim, self).__init__(win, units=units, name=name, autoLog=False) # use shaders if available by default, this is a good thing self.__dict__['useShaders'] = win._haveShaders # UGLY HACK: Some parameters depend on each other for processing. # They are set "superficially" here. # TO DO: postpone calls to _createTexture, setColor and # _calcCyclesPerStim whin initiating stimulus self.__dict__['contrast'] = 1 self.__dict__['size'] = 1 self.__dict__['sf'] = 1 self.__dict__['tex'] = tex self.__dict__['maskParams'] = maskParams # initialise textures and masks for stimulus self._texID = GL.GLuint() GL.glGenTextures(1, ctypes.byref(self._texID)) self._maskID = GL.GLuint() GL.glGenTextures(1, ctypes.byref(self._maskID)) self.__dict__['texRes'] = texRes # must be power of 2 self.interpolate = interpolate # NB Pedestal isn't currently being used during rendering - this is a # place-holder self.rgbPedestal = val2array(rgbPedestal, False, length=3) # No need to invoke decorator for color updating. It is done just # below. self.__dict__['colorSpace'] = colorSpace if rgb != None: logging.warning("Use of rgb arguments to stimuli are deprecated." " Please use color and colorSpace args instead") self.setColor(rgb, colorSpace='rgb', log=False) elif dkl != None: logging.warning("Use of dkl arguments to stimuli are deprecated." " Please use color and colorSpace args instead") self.setColor(dkl, colorSpace='dkl', log=False) elif lms != None: logging.warning("Use of lms arguments to stimuli are deprecated." " Please use color and colorSpace args instead") self.setColor(lms, colorSpace='lms', log=False) else: self.setColor(color, colorSpace=colorSpace, log=False) # set other parameters self.ori = float(ori) self.phase = val2array(phase, False) self._origSize = None # updated if an image texture is loaded self._requestedSize = size self.size = val2array(size) self.sf = val2array(sf) self.pos = val2array(pos, False, False) self.depth = depth self.tex = tex self.mask = mask self.contrast = float(contrast) self.opacity = float(opacity) self.autoLog = autoLog self.autoDraw = autoDraw self.blendmode = blendmode # fix scaling to window coords self._calcCyclesPerStim() # generate a displaylist ID self._listID = GL.glGenLists(1) # JRG: doing self._updateList() here means MRO issues for RadialStim, # which inherits from GratingStim but has its own _updateList code. # So don't want to do the update here (= ALSO the init of RadialStim). # Could potentially define a BaseGrating class without # updateListShaders code, and have GratingStim and RadialStim # inherit from it and add their own _updateList stuff. # Seems unnecessary. Instead, simply defer the update to the # first .draw(), should be fast: # self._updateList() # ie refresh display list self._needUpdate = True # set autoLog now that params have been initialised wantLog = autoLog is None and self.win.autoLog self.__dict__['autoLog'] = autoLog or wantLog if self.autoLog: logging.exp("Created {} = {}".format(self.name, self))
def useVersion(requestedVersion): """Manage paths and checkout psychopy libraries for requested versions of PsychoPy. requestedVersion : A string with the requested version of PsychoPy to be used. Can be major.minor.patch, e.g., '1.83.01', or a partial version, such as '1.81', or even '1'; uses the most recent version within that series. 'latest' means the most recent release having a tag on github. returns: Returns the current (new) version if it was successfully loaded. Raises a RuntimeError if git is needed and not present, or if other PsychoPy modules have already been loaded. Raises a subprocess CalledProcessError if an invalid git tag/version was checked out. Usage (at the top of an experiment script): from psychopy import useVersion useVersion('1.80') from psychopy import visual, event, ... See also: ensureMinimal() """ # Sanity Checks imported = _psychopyComponentsImported() if imported: msg = _translate("Please request a version before importing any " "PsychoPy modules. (Found: {})") raise RuntimeError(msg.format(imported)) # Get a proper full-version tag from a partial tag: reqdMajorMinorPatch = fullVersion(requestedVersion) logging.exp('Requested: useVersion({}) = {}'.format(requestedVersion, reqdMajorMinorPatch)) if not reqdMajorMinorPatch: msg = _translate('Unknown version `{}`') raise ValueError(msg.format(requestedVersion)) if not os.path.isdir(VERSIONSDIR): _clone(requestedVersion) # Allow the versions subdirectory to be built if constants.PY3: py3Compatible = _versionFilter(versionOptions(local=False), None) py3Compatible += _versionFilter(availableVersions(local=False), None) py3Compatible.sort(reverse=True) if reqdMajorMinorPatch not in py3Compatible: msg = _translate("Please request a version of PsychoPy that is compatible with Python 3. " "You can choose from the following versions: {}. " "Alternatively, run a Python 2 installation of PsychoPy < v1.9.0.\n") logging.error(msg.format(py3Compatible)) return if psychopy.__version__ != reqdMajorMinorPatch: # Switching required, so make sure `git` is available. if not _gitPresent(): msg = _translate("Please install git; needed by useVersion()") raise RuntimeError(msg) # Setup Requested Version _switchToVersion(reqdMajorMinorPatch) # Reload! reload(psychopy) reload(logging) reload(web) if _versionTuple(reqdMajorMinorPatch) >= (1, 80): reload(tools) # because this file is within tools # TODO check for other submodules that have already been imported logging.exp('Version now set to: {}'.format(psychopy.__version__)) return psychopy.__version__
def __init__(self, win, units=None, fieldPos=(0.0, 0.0), fieldSize=(1.0, 1.0), fieldShape='circle', nElements=100, sizes=2.0, xys=None, rgbs=None, colors=(1.0, 1.0, 1.0), colorSpace='rgb', opacities=1.0, depths=0, fieldDepth=0, oris=0, sfs=1.0, contrs=1, phases=0, elementTex='sin', elementMask='gauss', texRes=48, interpolate=True, name=None, autoLog=None, maskParams=None): """ :Parameters: win : a :class:`~psychopy.visual.Window` object (required) units : **None**, 'height', 'norm', 'cm', 'deg' or 'pix' If None then the current units of the :class:`~psychopy.visual.Window` will be used. See :ref:`units` for explanation of other options. nElements : number of elements in the array. """ # what local vars are defined (these are the init params) for use by # __repr__ self._initParams = dir() self._initParams.remove('self') super(ElementArrayStim, self).__init__(name=name, autoLog=False) self.autoLog = False # until all params are set self.win = win # Not pretty (redefined later) but it works! self.__dict__['texRes'] = texRes self.__dict__['maskParams'] = maskParams # unit conversions if units != None and len(units): self.units = units else: self.units = win.units self.__dict__['fieldShape'] = fieldShape self.nElements = nElements # info for each element self.__dict__['sizes'] = sizes self.verticesBase = xys self._needVertexUpdate = True self._needColorUpdate = True self.useShaders = True self.interpolate = interpolate self.__dict__['fieldDepth'] = fieldDepth self.__dict__['depths'] = depths if self.win.winType != 'pyglet': raise TypeError('ElementArrayStim requires a pyglet context') if not self.win._haveShaders: raise Exception("ElementArrayStim requires shaders support" " and floating point textures") self.colorSpace = colorSpace if rgbs != None: msg = ("Use of the rgb argument to ElementArrayStim is deprecated" ". Please use colors and colorSpace args instead") logging.warning(msg) self.setColors(rgbs, colorSpace='rgb', log=False) else: self.setColors(colors, colorSpace=colorSpace, log=False) # Deal with input for fieldpos and fieldsize self.__dict__['fieldPos'] = val2array(fieldPos, False, False) self.__dict__['fieldSize'] = val2array(fieldSize, False) # create textures self._texID = GL.GLuint() GL.glGenTextures(1, ctypes.byref(self._texID)) self._maskID = GL.GLuint() GL.glGenTextures(1, ctypes.byref(self._maskID)) self.setMask(elementMask, log=False) self.texRes = texRes self.setTex(elementTex, log=False) self.setContrs(contrs, log=False) # opacities is used by setRgbs, so this needs to be early self.setOpacities(opacities, log=False) self.setXYs(xys, log=False) self.setOris(oris, log=False) # set sizes before sfs (sfs may need it formatted) self.setSizes(sizes, log=False) self.setSfs(sfs, log=False) self.setPhases(phases, log=False) self._updateVertices() # set autoLog now that params have been initialised wantLog = autoLog is None and self.win.autoLog self.__dict__['autoLog'] = autoLog or wantLog if self.autoLog: logging.exp("Created %s = %s" % (self.name, str(self)))
def __init__(self, win, size=1, pos=(0, 0), ori=0, nVert=120, shape='circle', inverted=False, units=None, name=None, autoLog=None): # what local vars are defined (these are the init params) for use by # __repr__ self._initParams = dir() self._initParams.remove('self') super(Aperture, self).__init__(name=name, autoLog=False) # set self params self.autoLog = False # change after attribs are set self.win = win if not win.allowStencil: logging.error('Aperture has no effect in a window created ' 'without allowStencil=True') core.quit() self.__dict__['size'] = size self.__dict__['pos'] = pos self.__dict__['ori'] = ori self.__dict__['inverted'] = inverted self.__dict__['filename'] = False # unit conversions if units != None and len(units): self.units = units else: self.units = win.units # set vertices using shape, or default to a circle with nVerts edges if hasattr(shape, 'lower') and not os.path.isfile(shape): shape = shape.lower() if shape is None or shape == 'circle': # NB: pentagon etc point upwards by setting x,y to be y,x # (sin,cos): vertices = [(0.5 * sin(radians(theta)), 0.5 * cos(radians(theta))) for theta in numpy.linspace(0, 360, nVert, False)] elif shape == 'square': vertices = [[0.5, -0.5], [-0.5, -0.5], [-0.5, 0.5], [0.5, 0.5]] elif shape == 'triangle': vertices = [[0.5, -0.5], [0, 0.5], [-0.5, -0.5]] elif type(shape) in [tuple, list, numpy.ndarray] and len(shape) > 2: vertices = shape elif isinstance(shape, basestring): # is a string - see if it points to a file if os.path.isfile(shape): self.__dict__['filename'] = shape else: msg = ("Unrecognized shape for aperture. Expected 'circle'," " 'square', 'triangle', vertices, filename, or None;" " got %s") logging.error(msg % repr(shape)) if self.__dict__['filename']: self._shape = ImageStim(win=self.win, image=self.__dict__['filename'], pos=pos, size=size, autoLog=False, units=self.units) else: self._shape = BaseShapeStim(win=self.win, vertices=vertices, fillColor=1, lineColor=None, colorSpace='rgb', interpolate=False, pos=pos, size=size, autoLog=False, units=self.units) self.vertices = self._shape.vertices self._needVertexUpdate = True self._needReset = True # Default when setting attributes # implicitly runs a self.enabled = True. Also sets # self._needReset = True on every call self._reset() # set autoLog now that params have been initialised wantLog = autoLog is None and self.win.autoLog self.__dict__['autoLog'] = autoLog or wantLog if self.autoLog: logging.exp("Created {} = {}".format(self.name, self))
def __init__(self, win, ticks=(1, 2, 3, 4, 5), labels=None, pos=None, size=None, units=None, flip=False, style='rating', granularity=0, readOnly=False, color='LightGray', font='Helvetica Bold', depth=0, name=None, autoDraw=False, autoLog=True): """ Parameters ---------- win : psychopy.visual.Window Into which the scale will be rendered ticks : list or tuple A set of values for tick locations. If given a list of numbers then these determine the locations of the ticks (the first and last determine the endpoints and the rest are spaced according to their values between these endpoints. labels : a list or tuple The text to go with each tick (or spaced evenly across the ticks). If you give 3 labels but 5 tick locations then the end and middle ticks will be given labels. If the labels can't be distributed across the ticks then an error will be raised. If you want an uneven distribution you should include a list matching the length of ticks but with some values set to None pos : XY pair (tuple, array or list) size : w,h pair (tuple, array or list) The size for the scale defines the area taken up by the line and the ticks. This also controls whether the scale is horizontal or vertical. units : the units to interpret the pos and size flip : bool By default the labels will be below or left of the line. This puts them above (or right) granularity : int or float The smallest valid increments for the scale. 0 gives a continuous (e.g. "VAS") scale. 1 gives a traditional likert scale. Something like 0.1 gives a limited fine-grained scale. color : Color of the line/ticks/labels according to the color space font : font name autodraw : depth : name : autoLog : """ # what local vars are defined (these are the init params) for use by # __repr__ self._initParams = dir() super(Slider, self).__init__(name=name, autoLog=False) self.win = win self.ticks = np.asarray(ticks) self.labels = labels if pos is None: self.pos = (0, 0) else: self.pos = pos if units is None: self.units = win.units else: self.units = units if size is None: self._size = defaultSizes[self.units] else: self._size = size self.flip = flip self.granularity = granularity self._color = color self.font = font self.autoDraw = autoDraw self.depth = depth self.name = name self.autoLog = autoLog self.readOnly = readOnly self.categorical = False # will become True if no ticks set only labels self.rating = None # current value (from a response) self.markerPos = None # current value (maybe not from a response) self.rt = None self.history = [] self.marker = None self.line = None self.tickLines = [] self.tickLocs = None self.labelLocs = None self._lineAspectRatio = 0.01 self._updateMarkerPos = True self._dragging = False self.mouse = event.Mouse() self._mouseStateClick = None # so we can rule out long click probs self._mouseStateXY = None # so we can rule out long click probs self.validArea = None self._createElements() # some things must wait until elements created self.contrast = 1.0 # set autoLog (now that params have been initialised) self.autoLog = autoLog if autoLog: logging.exp("Created %s = %s" % (self.name, repr(self))) self.status = NOT_STARTED self.responseClock = core.Clock() #set the style when everything else is set self.style = style
def __init__(self, win, buffer='back', rect=(-1, 1, 1, -1), sqPower2=False, stim=(), interpolate=True, flipHoriz=False, flipVert=False, mask='None', pos=(0, 0), name=None, autoLog=None): """ :Parameters: buffer : the screen buffer to capture from, default is 'back' (hidden). 'front' is the buffer in view after win.flip() rect : a list of edges [left, top, right, bottom] defining a screen rectangle which is the area to capture from the screen, given in norm units. default is fullscreen: [-1, 1, 1, -1] stim : a list of item(s) to be drawn to the back buffer (in order). The back buffer is first cleared (without the win being flip()ed), then stim items are drawn, and finally the buffer (or part of it) is captured. Each item needs to have its own .draw() method, and have the same window as win. interpolate : whether to use interpolation (default = True, generally good, especially if you change the orientation) sqPower2 : - False (default) = use rect for size if OpenGL = 2.1+ - True = use square, power-of-two image sizes flipHoriz : horizontally flip (mirror) the captured image, default = False flipVert : vertically flip (mirror) the captured image; default = False """ # depends on: window._getRegionOfFrame #what local vars are defined (these are the init params) for use by __repr__ self._initParams = dir() self._initParams.remove('self') self.autoLog = False #set this False first and change after attribs are set _clock = core.Clock() if stim: # draw all stim to the back buffer win.clearBuffer() buffer = 'back' if hasattr(stim, '__iter__'): for stimulus in stim: try: if stimulus.win == win: stimulus.draw() else: logging.warning( 'BufferImageStim.__init__: user requested "%s" drawn in another window' % repr(stimulus)) except AttributeError: logging.warning( 'BufferImageStim.__init__: "%s" failed to draw' % repr(stimulus)) else: raise (ValueError( 'Stim is not iterable in BufferImageStim. It should be a list of stimuli.' )) # take a screenshot of the buffer using win._getRegionOfFrame(): glversion = pyglet.gl.gl_info.get_version() if glversion >= '2.1' and not sqPower2: region = win._getRegionOfFrame(buffer=buffer, rect=rect) else: if not sqPower2: logging.debug( 'BufferImageStim.__init__: defaulting to square power-of-2 sized image (%s)' % glversion) region = win._getRegionOfFrame(buffer=buffer, rect=rect, squarePower2=True) if stim: win.clearBuffer() # turn the RGBA region into an ImageStim() object: if win.units in ['norm']: pos *= win.size / 2. size = region.size / win.size / 2. super(BufferImageStim, self).__init__(win, image=region, units='pix', mask=mask, pos=pos, size=size, interpolate=interpolate, name=name, autoLog=False) self.size = region.size # to improve drawing speed, move these out of draw: self.desiredRGB = self._getDesiredRGB(self.rgb, self.colorSpace, self.contrast) self.thisScale = numpy.array([4, 4]) self.flipHoriz = flipHoriz self.flipVert = flipVert # set autoLog now that params have been initialised self.__dict__[ 'autoLog'] = autoLog or autoLog is None and self.win.autoLog if self.autoLog: logging.exp("Created %s = %s" % (self.name, str(self))) logging.exp('BufferImageStim %s: took %.1fms to initialize' % (name, 1000 * _clock.getTime()))
def _onEOS(self, log=True): if log and self.autoLog: logging.exp("Sound %s finished" % (self.name), obj=self) self.status = FINISHED
def stop(self, log=True): """Stops the sound immediately""" self._stream.abort() # _stream.stop() finishes current buffer self.status = STOPPED if log and self.autoLog: logging.exp("Sound %s stopped" % (self.name), obj=self)
def __init__(self, win, units='', nDots=1, coherence=0.5, fieldPos=(0.0, 0.0), fieldSize=(1.0, 1.0), fieldShape='sqr', dotSize=2.0, dotLife=3, dir=0.0, speed=0.5, rgb=None, color=(1.0, 1.0, 1.0), colorSpace='rgb', opacity=None, contrast=1.0, depth=0, element=None, signalDots='same', noiseDots='direction', name=None, autoLog=None): """ Parameters ---------- win : window.Window Window this stimulus is associated with. units : str Units to use. nDots : int Number of dots to present in the field. coherence : float Proportion of dots which are coherent. This value can be set using the `coherence` property after initialization. fieldPos : array_like (x,y) or [x,y] position of the field. This value can be set using the `fieldPos` property after initialization. fieldSize : array_like, int or float (x,y) or [x,y] or single value (applied to both dimensions). Sizes can be negative and can extend beyond the window. This value can be set using the `fieldSize` property after initialization. fieldShape : str Defines the envelope used to present the dots. If changed while drawing by setting the `fieldShape` property, dots outside new envelope will be respawned., valid values are 'square', 'sqr' or 'circle'. dotSize : array_like or float Size of the dots. If given an array, the sizes of individual dots will be set. The array must have length `nDots`. If a single value is given, all dots will be set to the same size. dotLife : int Lifetime of a dot in frames. Dot lives are initiated randomly from a uniform distribution from 0 to dotLife. If changed while drawing, the lives of all dots will be randomly initiated again. A value of -1 results in dots having an infinite lifetime. This value can be set using the `dotLife` property after initialization. dir : float Direction of the coherent dots in degrees. At 0 degrees, coherent dots will move from left to right. Increasing the angle will rotate the direction counter-clockwise. This value can be set using the `dir` property after initialization. speed : float Speed of the dots (in *units* per frame). This value can be set using the `speed` property after initialization. rgb : array_like, optional Color of the dots in form (r, g, b) or [r, g, b]. **Deprecated**, use `color` instead. color : array_like or str Color of the dots in form (r, g, b) or [r, g, b]. colorSpace : str Colorspace to use. opacity : float Opacity of the dots from 0.0 to 1.0. contrast : float Contrast of the dots 0.0 to 1.0. This value is simply multiplied by the `color` value. depth : float **Deprecated**, depth is now controlled simply by drawing order. element : object This can be any object that has a ``.draw()`` method and a ``.setPos([x,y])`` method (e.g. a GratingStim, TextStim...)!! DotStim assumes that the element uses pixels as units. ``None`` defaults to dots. signalDots : str If 'same' then the signal and noise dots are constant. If different then the choice of which is signal and which is noise gets randomised on each frame. This corresponds to Scase et al's (1996) categories of RDK. This value can be set using the `signalDots` property after initialization. noiseDots : str Determines the behaviour of the noise dots, taken directly from Scase et al's (1996) categories. For 'position', noise dots take a random position every frame. For 'direction' noise dots follow a random, but constant direction. For 'walk' noise dots vary their direction every frame, but keep a constant speed. This value can be set using the `noiseDots` property after initialization. name : str, optional Optional name to use for logging. autoLog : bool Enable automatic logging. """ # what local vars are defined (these are the init params) for use by # __repr__ self._initParams = __builtins__['dir']() self._initParams.remove('self') super(DotStim, self).__init__(win, units=units, name=name, autoLog=False) # set at end of init self.nDots = nDots # pos and size are ambiguous for dots so DotStim explicitly has # fieldPos = pos, fieldSize=size and then dotSize as additional param self.fieldPos = fieldPos # self.pos is also set here self.fieldSize = val2array(fieldSize, False) # self.size is also set if type(dotSize) in (tuple, list): self.dotSize = np.array(dotSize) else: self.dotSize = dotSize if self.win.useRetina: self.dotSize *= 2 # double dot size to make up for 1/2-size pixels self.fieldShape = fieldShape self.__dict__['dir'] = dir self.speed = speed self.element = element self.dotLife = dotLife self.signalDots = signalDots self.useShaders = False # not needed for dots? if rgb != None: logging.warning("Use of rgb arguments to stimuli are deprecated." " Please use color and colorSpace args instead") self.colorSpace = 'rgba' self.color = rgb else: self.colorSpace = colorSpace self.color = color self.opacity = opacity self.contrast = float(contrast) self.depth = depth # initialise the dots themselves - give them all random dir and then # fix the first n in the array to have the direction specified self.coherence = coherence # using the attributeSetter self.noiseDots = noiseDots # initialise a random array of X,Y self._verticesBase = self._dotsXY = self._newDotsXY(self.nDots) # all dots have the same speed self._dotsSpeed = np.ones(self.nDots, dtype=float) * self.speed # abs() means we can ignore the -1 case (no life) self._dotsLife = np.abs(dotLife) * np.random.rand(self.nDots) # pre-allocate array for flagging dead dots self._deadDots = np.zeros(self.nDots, dtype=bool) # set directions (only used when self.noiseDots='direction') self._dotsDir = np.random.rand(self.nDots) * _2pi self._dotsDir[self._signalDots] = self.dir * _piOver180 self._update_dotsXY() # set autoLog now that params have been initialised wantLog = autoLog is None and self.win.autoLog self.__dict__['autoLog'] = autoLog or wantLog if self.autoLog: logging.exp("Created %s = %s" % (self.name, str(self)))
def __init__(self, win, units ='', lineWidth=1.0, lineColor=(1.0,1.0,1.0), lineColorSpace='rgb', fillColor=None, fillColorSpace='rgb', vertices=((-0.5,0),(0,+0.5),(+0.5,0)), closeShape=True, pos= (0,0), size=1, ori=0.0, opacity=1.0, contrast=1.0, depth =0, interpolate=True, lineRGB=None, fillRGB=None, name='', autoLog=True): """ :Parameters: lineWidth : int (or float?) specifying the line width in **pixels** vertices : a list of lists or a numpy array (Nx2) specifying xy positions of each vertex closeShape : True or False Do you want the last vertex to be automatically connected to the first? interpolate : True or False If True the edge of the line will be antialiased. """ #what local vars are defined (these are the init params) for use by __repr__ self._initParams = dir() self._initParams.remove('self') # Initialize inheritance and remove unwanted methods super(ShapeStim, self).__init__(win, units=units, name=name, autoLog=False) #autoLog is set later self.__dict__['setColor'] = None self.__dict__['color'] = None self.__dict__['colorSpace'] = None self.contrast = float(contrast) self.opacity = float(opacity) self.pos = numpy.array(pos, float) self.closeShape=closeShape self.lineWidth=lineWidth self.interpolate=interpolate # Color stuff self.useShaders=False#since we don't ned to combine textures with colors self.__dict__['lineColorSpace'] = lineColorSpace self.__dict__['fillColorSpace'] = fillColorSpace if lineRGB!=None: logging.warning("Use of rgb arguments to stimuli are deprecated. Please use color and colorSpace args instead") self.setLineColor(lineRGB, colorSpace='rgb') else: self.setLineColor(lineColor, colorSpace=lineColorSpace) if fillRGB!=None: logging.warning("Use of rgb arguments to stimuli are deprecated. Please use color and colorSpace args instead") self.setFillColor(fillRGB, colorSpace='rgb') else: self.setFillColor(fillColor, colorSpace=fillColorSpace) # Other stuff self.depth=depth self.ori = numpy.array(ori,float) self.size = numpy.array([0.0,0.0]) self.setSize(size, log=False) self.setVertices(vertices, log=False) #set autoLog (now that params have been initialised) self.autoLog= autoLog if autoLog: logging.exp("Created %s = %s" %(self.name, str(self)))
def launchScan(win, settings, globalClock=None, simResponses=None, mode=None, esc_key='escape', instr='select Scan or Test, press enter', wait_msg="waiting for scanner...", wait_timeout=300, log=True): """ Accepts up to four fMRI scan parameters (TR, volumes, sync-key, skip), and launches an experiment in one of two modes: Scan, or Test. :Usage: See Coder Demo -> experiment control -> fMRI_launchScan.py. In brief: 1) from psychopy.hardware.emulator import launchScan; 2) Define your args; and 3) add 'vol = launchScan(args)' at the top of your experiment script. launchScan() waits for the first sync pulse and then returns, allowing your experiment script to proceed. The key feature is that, in test mode, it first starts an autonomous thread that emulates sync pulses (i.e., emulated by your CPU rather than generated by an MRI machine). The thread places a character in the key buffer, exactly like a keyboard event does. launchScan will wait for the first such sync pulse (i.e., character in the key buffer). launchScan returns the number of sync pulses detected so far (i.e., 1), so that a script can account for them explicitly. If a globalClock is given (highly recommended), it is reset to 0.0 when the first sync pulse is detected. If a mode was not specified when calling launchScan, the operator is prompted to select Scan or Test. If **scan mode** is selected, the script will wait until the first scan pulse is detected. Typically this would be coming from the scanner, but note that it could also be a person manually pressing that key. If **test mode** is selected, launchScan() starts a separate thread to emit sync pulses / key presses. Note that this thread is effectively nothing more than a key-pressing metronome, emitting a key at the start of every TR, doing so with high temporal precision. If your MR hardware interface does not deliver a key character as a sync flag, you can still use launchScan() to test script timing. You have to code your experiment to trigger on either a sync character (to test timing) or your usual sync flag (for actual scanning). :Parameters: win: a :class:`~psychopy.visual.Window` object (required) settings : a dict containing up to 5 parameters (2 required: TR, volumes) TR : seconds per whole-brain volume (minimum value = 0.1s) volumes : number of whole-brain (3D) volumes to obtain in a given scanning run. sync : (optional) key for sync timing, default = '5'. skip : (optional) how many volumes to silently omit initially (during T1 stabilization, no sync pulse). default = 0. sound : (optional) whether to play a sound when simulating scanner sync pulses globalClock : optional but highly recommended :class:`~psychopy.core.Clock` to be used during the scan; if one is given, it is reset to 0.000 when the first sync pulse is received. simResponses : optional list of tuples [(time, key), (time, key), ...]. time values are seconds after the first scan pulse is received. esc_key : key to be used for user-interrupt during launch. default = 'escape' mode : if mode is 'Test' or 'Scan', launchScan() will start in that mode. instr : instructions to be displayed to the scan operator during mode selection. wait_msg : message to be displayed to the subject while waiting for the scan to start (i.e., after operator indicates start but before the first scan pulse is received). wait_timeout : time in seconds that launchScan will wait before assuming something went wrong and exiting. Defaults to 300sec (5 minutes). Raises a TimeoutError if no sync pulse is received in the allowable time. """ if not 'sync' in settings: settings.update({'sync': '5'}) if not 'skip' in settings: settings.update({'skip': 0}) try: wait_timeout = max(0.01, float(wait_timeout)) except ValueError: raise ValueError("wait_timeout must be number-like, but instead it was %s." % str(wait_timeout)) settings['sync'] = unicode(settings['sync']) settings['TR'] = float(settings['TR']) settings['volumes'] = int(settings['volumes']) settings['skip'] = int(settings['skip']) runInfo = "vol: %(volumes)d TR: %(TR).3fs skip: %(skip)d sync: '%(sync)s'" % (settings) if log: # pragma: no cover logging.exp('launchScan: ' + runInfo) instructions = visual.TextStim(win, text=instr, height=.05, pos=(0,0), color=.4, autoLog=False) parameters = visual.TextStim(win, text=runInfo, height=.05, pos=(0,-0.5), color=.4, autoLog=False) # if a valid mode was specified, use it; otherwise query via RatingScale: mode = str(mode).capitalize() if mode not in ['Scan', 'Test']: run_type = visual.RatingScale(win, choices=['Scan', 'Test'], marker='circle', markerColor='DarkBlue', size=.8, stretch=.3, pos=(0.8,-0.9), markerStart='Test', lineColor='DarkGray', autoLog=False) while run_type.noResponse: instructions.draw() parameters.draw() run_type.draw() win.flip() if event.getKeys([esc_key]): break mode = run_type.getRating() doSimulation = bool(mode == 'Test') win.setMouseVisible(False) if doSimulation: wait_msg += ' (simulation)' msg = visual.TextStim(win, color='DarkGray', text=wait_msg, autoLog=False) msg.draw() win.flip() event.clearEvents() # do before starting the threads if doSimulation: syncPulse = SyncGenerator(**settings) syncPulse.start() # start emitting sync pulses core.runningThreads.append(syncPulse) if simResponses: roboResponses = ResponseEmulator(simResponses) roboResponses.start() # start emitting simulated user responses core.runningThreads.append(roboResponses) # wait for first sync pulse: timeoutClock = core.Clock() # zeroed now allKeys = [] while not settings['sync'] in allKeys: allKeys = event.getKeys() if esc_key and esc_key in allKeys: # pragma: no cover core.quit() if timeoutClock.getTime() > wait_timeout: raise TimeoutError('Waiting for scanner has timed out in %.3f seconds.' % wait_timeout) if globalClock: globalClock.reset() if log: # pragma: no cover logging.exp('launchScan: start of scan') win.flip() # blank the screen on first sync pulse received elapsed = 1 # one sync pulse has been caught so far return elapsed
def __init__(self, win, image=None, mask=None, units="", pos=(0.0, 0.0), size=None, ori=0.0, color=(1.0, 1.0, 1.0), colorSpace='rgb', contrast=1.0, opacity=1.0, depth=0, interpolate=False, flipHoriz=False, flipVert=False, texRes=128, name=None, autoLog=None, maskParams=None): """ """ # Empty docstring. All doc is in attributes # what local vars are defined (these are the init params) for use by # __repr__ self._initParams = dir() self._initParams.remove('self') super(ImageStim, self).__init__(win, units=units, name=name, autoLog=False) # set at end of init # use shaders if available by default, this is a good thing self.__dict__['useShaders'] = win._haveShaders # initialise textures for stimulus self._texID = GL.GLuint() GL.glGenTextures(1, ctypes.byref(self._texID)) self._maskID = GL.GLuint() GL.glGenTextures(1, ctypes.byref(self._maskID)) self.__dict__['maskParams'] = maskParams self.__dict__['mask'] = mask # Not pretty (redefined later) but it works! self.__dict__['texRes'] = texRes # Other stuff self._imName = image self.isLumImage = None self.interpolate = interpolate self.flipHoriz = flipHoriz self.flipVert = flipVert self._requestedSize = size self._origSize = None # updated if an image texture gets loaded self.size = val2array(size) self.pos = numpy.array(pos, float) self.ori = float(ori) self.depth = depth # color and contrast etc self.contrast = float(contrast) self.opacity = float(opacity) self.__dict__['colorSpace'] = colorSpace # omit decorator self.setColor(color, colorSpace=colorSpace, log=False) # does an rgb pedestal make sense for an image? self.rgbPedestal = [0, 0, 0] # Set the image and mask- self.setImage(image, log=False) self.texRes = texRes # rebuilds the mask # generate a displaylist ID self._listID = GL.glGenLists(1) self._updateList() # ie refresh display list # set autoLog now that params have been initialised wantLog = autoLog is None and self.win.autoLog self.__dict__['autoLog'] = autoLog or wantLog if self.autoLog: logging.exp("Created %s = %s" % (self.name, str(self)))
def __init__( self, win, filename="", units='pix', size=None, pos=(0.0, 0.0), ori=0.0, flipVert=False, flipHoriz=False, color=(1.0, 1.0, 1.0), colorSpace='rgb', opacity=1.0, volume=1.0, name=None, loop=False, autoLog=None, depth=0.0, ): """ :Parameters: filename : a string giving the relative or absolute path to the movie. Can be any movie that AVbin can read (e.g. mpeg, DivX) flipVert : True or *False* If True then the movie will be top-bottom flipped flipHoriz : True or *False* If True then the movie will be right-left flipped volume : The nominal level is 1.0, and 0.0 is silence, see pyglet.media.Player loop : bool, optional Whether to start the movie over from the beginning if draw is called and the movie is done. """ # what local vars are defined (these are the init params) for use by # __repr__ self._initParams = dir() self._initParams.remove('self') super(MovieStim, self).__init__(win, units=units, name=name, autoLog=False) self._verticesBase *= numpy.array([[-1, 1]]) # unflip if not havePygletMedia: msg = ("pyglet.media is needed for MovieStim and could not be" " imported.\nThis can occur for various reasons;" " - psychopy.visual was imported too late (after a lib" " that uses scipy)" " - no audio output is enabled (no audio card or no " "speakers attached)" " - avbin is not installed") raise ImportError(msg) self._movie = None # the actual pyglet media object self._player = pyglet.media.ManagedSoundPlayer() self._player.volume = volume try: self._player_default_on_eos = self._player.on_eos except Exception: # pyglet 1.1.4? self._player_default_on_eos = self._player._on_eos self.filename = filename self.duration = None self.loop = loop if loop and pyglet.version >= '1.2': logging.error("looping of movies is not currently supported " "for pyglet >= 1.2 (only for version 1.1.4)") self.loadMovie(self.filename) self.format = self._movie.video_format self.pos = numpy.asarray(pos, float) self.depth = depth self.flipVert = flipVert self.flipHoriz = flipHoriz self.opacity = float(opacity) self.status = NOT_STARTED # size if size is None: self.size = numpy.array([self.format.width, self.format.height], float) else: self.size = val2array(size) self.ori = ori self._updateVertices() if win.winType != 'pyglet': logging.error('Movie stimuli can only be used with a ' 'pyglet window') core.quit() # set autoLog now that params have been initialised wantLog = autoLog is None and self.win.autoLog self.__dict__['autoLog'] = autoLog or wantLog if self.autoLog: logging.exp("Created %s = %s" % (self.name, str(self)))
def __init__(self, win, text="Hello World", font="", pos=(0.0, 0.0), depth=0, rgb=None, color=(1.0, 1.0, 1.0), colorSpace='rgb', opacity=1.0, contrast=1.0, units="", ori=0.0, height=None, antialias=True, bold=False, italic=False, alignHoriz=None, alignVert=None, alignText='center', anchorHoriz='center', anchorVert='center', fontFiles=(), wrapWidth=None, flipHoriz=False, flipVert=False, languageStyle='LTR', name=None, autoLog=None, autoDraw=False): """ **Performance OBS:** in general, TextStim is slower than many other visual stimuli, i.e. it takes longer to change some attributes. In general, it's the attributes that affect the shapes of the letters: ``text``, ``height``, ``font``, ``bold`` etc. These make the next .draw() slower because that sets the text again. You can make the draw() quick by calling re-setting the text (``myTextStim.text = myTextStim.text``) when you've changed the parameters. In general, other attributes which merely affect the presentation of unchanged shapes are as fast as usual. This includes ``pos``, ``opacity`` etc. The following attribute can only be set at initialization (see further down for a list of attributes which can be changed after initialization): **languageStyle** Apply settings to correctly display content from some languages that are written right-to-left. Currently there are three (case- insensitive) values for this parameter: - ``'LTR'`` is the default, for typical left-to-right, Latin-style languages. - ``'RTL'`` will correctly display text in right-to-left languages such as Hebrew. By applying the bidirectional algorithm, it allows mixing portions of left-to-right content (such as numbers or Latin script) within the string. - ``'Arabic'`` applies the bidirectional algorithm but additionally will _reshape_ Arabic characters so they appear in the cursive, linked form that depends on neighbouring characters, rather than in their isolated form. May also be applied in other scripts, such as Farsi or Urdu, that use Arabic-style alphabets. :Parameters: """ # what local vars are defined (these are the init params) for use by # __repr__ self._initParams = dir() self._initParams.remove('self') """ October 2018: In place to remove the deprecation warning for pyglet.font.Text. Temporary fix until pyglet.text.Label use is identical to pyglet.font.Text. """ warnings.filterwarnings(message='.*text.Label*', action='ignore') super(TextStim, self).__init__(win, units=units, name=name, autoLog=False) if win.blendMode == 'add': logging.warning("Pyglet text does not honor the Window setting " "`blendMode='add'` so 'avg' will be used for the " "text (but objects drawn after can be added)") self._needUpdate = True self._needVertexUpdate = True # use shaders if available by default, this is a good thing self.__dict__['antialias'] = antialias self.__dict__['font'] = font self.__dict__['bold'] = bold self.__dict__['italic'] = italic # NB just a placeholder - real value set below self.__dict__['text'] = '' self.__dict__['depth'] = depth self.__dict__['ori'] = ori self.__dict__['flipHoriz'] = flipHoriz self.__dict__['flipVert'] = flipVert self.__dict__['languageStyle'] = languageStyle self._pygletTextObj = None self.pos = pos # deprecated attributes if alignVert: self.__dict__['alignVert'] = alignVert logging.warning("TextStim.alignVert is deprecated. Use the " "anchorVert attribute instead") # for compatibility, alignText was historically 'left' anchorVert = alignHoriz if alignHoriz: self.__dict__['alignHoriz'] = alignHoriz logging.warning("TextStim.alignHoriz is deprecated. Use alignText " "and anchorHoriz attributes instead") # for compatibility, alignText was historically 'left' alignText, anchorHoriz = alignHoriz, alignHoriz # alignment and anchors self.alignText = alignText self.anchorHoriz = anchorHoriz self.anchorVert = anchorVert # generate the texture and list holders self._listID = GL.glGenLists(1) # pygame text needs a surface to render to: if not self.win.winType in ["pyglet", "glfw"]: self._texID = GL.GLuint() GL.glGenTextures(1, ctypes.byref(self._texID)) # Color stuff self.colorSpace = colorSpace self.color = color if rgb != None: logging.warning( "Use of rgb arguments to stimuli are deprecated. Please " "use color and colorSpace args instead") self.color = Color(rgb, 'rgb') self.__dict__['fontFiles'] = [] self.fontFiles = list(fontFiles) # calls attributeSetter self.setHeight(height, log=False) # calls setFont() at some point # calls attributeSetter without log setAttribute(self, 'wrapWidth', wrapWidth, log=False) self.opacity = opacity self.contrast = contrast # self.width and self._fontHeightPix get set with text and # calcSizeRendered is called self.setText(text, log=False) self._needUpdate = True self.autoDraw = autoDraw # set autoLog now that params have been initialised wantLog = autoLog is None and self.win.autoLog self.__dict__['autoLog'] = autoLog or wantLog if self.autoLog: logging.exp("Created %s = %s" % (self.name, str(self)))
def __init__(self, win, text="Hello World", font="", pos=(0.0, 0.0), depth=0, rgb=None, color=(1.0, 1.0, 1.0), colorSpace='rgb', opacity=1.0, contrast=1.0, units="", ori=0.0, height=None, antialias=True, bold=False, italic=False, alignHoriz='center', alignVert='center', fontFiles=[], wrapWidth=None, flipHoriz=False, flipVert=False, name=None, autoLog=None): """ **Performance OBS:** in general, TextStim is slower than many other visual stimuli, i.e. it takes longer to change some attributes. In general, it's the attributes that affect the shapes of the letters: ``text``, ``height``, ``font``, ``bold`` etc. These make the next .draw() slower because that sets the text again. You can make the draw() quick by calling re-setting the text (```myTextStim.text = myTextStim.text) when you've changed the parameters. In general, other attributes which merely affect the presentation of unchanged shapes are as fast as usual. This includes ``pos``, ``opacity`` etc. """ #what local vars are defined (these are the init params) for use by __repr__ self._initParams = dir() self._initParams.remove('self') super(TextStim, self).__init__(win, units=units, name=name, autoLog=False) self._needUpdate = True self._needVertexUpdate = True self.__dict__[ 'useShaders'] = win._haveShaders #use shaders if available by default, this is a good thing self.__dict__['alignHoriz'] = alignHoriz self.__dict__['alignVert'] = alignVert self.__dict__['antialias'] = antialias self.__dict__['font'] = font self.__dict__['bold'] = bold self.__dict__['italic'] = italic self.__dict__[ 'text'] = '' #NB just a placeholder - real value set below self.__dict__['depth'] = depth self.__dict__['ori'] = ori self.__dict__['flipHoriz'] = flipHoriz self.__dict__['flipVert'] = flipVert self._pygletTextObj = None self.__dict__['pos'] = numpy.array(pos, float) #generate the texture and list holders self._listID = GL.glGenLists(1) if not self.win.winType == "pyglet": #pygame text needs a surface to render to self._texID = GL.GLuint() GL.glGenTextures(1, ctypes.byref(self._texID)) # Color stuff self.colorSpace = colorSpace if rgb != None: logging.warning( "Use of rgb arguments to stimuli are deprecated. Please use color and colorSpace args instead" ) self.setColor(rgb, colorSpace='rgb', log=False) else: self.setColor(color, log=False) self.__dict__['fontFiles'] = [] self.fontFiles = fontFiles # calls attributeSetter self.setHeight(height, log=False) # calls setFont() at some point setAttribute(self, 'wrapWidth', wrapWidth, log=False) # calls attributeSetter without log self.__dict__['opacity'] = float(opacity) self.__dict__['contrast'] = float(contrast) self.setText( text, log=False ) #self.width and self._fontHeightPix get set with text and calcSizeRendered is called self._needUpdate = True # set autoLog now that params have been initialised self.__dict__[ 'autoLog'] = autoLog or autoLog is None and self.win.autoLog if self.autoLog: logging.exp("Created %s = %s" % (self.name, str(self)))
def __init__(self, win, tex="sqrXsqr", mask="none", units="", pos=(0.0, 0.0), size=(1.0, 1.0), radialCycles=3, angularCycles=4, radialPhase=0, angularPhase=0, ori=0.0, texRes=64, angularRes=100, visibleWedge=(0, 360), rgb=None, color=(1.0, 1.0, 1.0), colorSpace='rgb', dkl=None, lms=None, contrast=1.0, opacity=1.0, depth=0, rgbPedestal=(0.0, 0.0, 0.0), interpolate=False, name=None, autoLog=None, maskParams=None): """ """ # Empty docstring on __init__ # what local vars are defined (these are the init params) for use by # __repr__ self._initParams = dir() self._initParams.remove('self') super(RadialStim, self).__init__(win, units=units, name=name, autoLog=False) # start off false # use shaders if available by default, this is a good thing self.useShaders = win._haveShaders # UGLY HACK again. (See same section in GratingStim for ideas) self.__dict__['contrast'] = 1 self.__dict__['size'] = 1 self.__dict__['sf'] = 1 self.__dict__['tex'] = tex # initialise textures for stimulus self._texID = GL.GLuint() GL.glGenTextures(1, ctypes.byref(self._texID)) self._maskID = GL.GLuint() GL.glGenTextures(1, ctypes.byref(self._maskID)) self.__dict__['maskParams'] = maskParams self.maskRadialPhase = 0 self.texRes = texRes # must be power of 2 self.interpolate = interpolate self.rgbPedestal = val2array(rgbPedestal, False, length=3) # these are defined for GratingStim but can only cause confusion here self.setSF = None self.setPhase = None self.colorSpace = colorSpace if rgb != None: logging.warning("Use of rgb arguments to stimuli are deprecated." " Please use color and colorSpace args instead") self.setColor(rgb, colorSpace='rgb', log=False) elif dkl != None: logging.warning("Use of dkl arguments to stimuli are deprecated. " "Please use color and colorSpace args instead") self.setColor(dkl, colorSpace='dkl', log=False) elif lms != None: logging.warning("Use of lms arguments to stimuli are deprecated." " Please use color and colorSpace args instead") self.setColor(lms, colorSpace='lms', log=False) else: self.setColor(color, log=False) self.ori = float(ori) self.__dict__['angularRes'] = angularRes self.__dict__['radialPhase'] = radialPhase self.__dict__['radialCycles'] = radialCycles self.__dict__['visibleWedge'] = numpy.array(visibleWedge) self.__dict__['angularCycles'] = angularCycles self.__dict__['angularPhase'] = angularPhase self.pos = numpy.array(pos, float) self.depth = depth self.__dict__['sf'] = 1 self.size = val2array(size, False) self.tex = tex self.mask = mask self.contrast = float(contrast) self.opacity = float(opacity) # self._updateEverything() # set autoLog now that params have been initialised wantLog = autoLog is None and self.win.autoLog self.__dict__['autoLog'] = autoLog or wantLog if self.autoLog: logging.exp("Created %s = %s" % (self.name, str(self)))
def upload(selector, filename, basicAuth=None, host=None, https=False): """Upload a local file over the internet to a configured http server. This method handshakes with a php script on a remote server to transfer a local file to another machine via http (using POST). Returns "success" plus a sha256 digest of the file on the server and a byte count. If the upload was not successful, an error code is returned (eg, "too_large" if the file size exceeds the limit specified server-side in up.php, or "no_file" if there was no POST attachment). .. note:: The server that receives the files needs to be configured before uploading will work. php files and notes for a sys-admin are included in `psychopy/contrib/http/`. In particular, the php script `up.php` needs to be copied to the server's web-space, with appropriate permissions and directories, including apache basic auth and https (if desired). The maximum size for an upload can be configured within up.php A configured test-server is available; see the Coder demo for details (upload size is limited to ~1500 characters for the demo). **Parameters:** `selector` : (required, string) a standard URL of the form `http://host/path/to/up.php`, e.g., `http://upload.psychopy.org/test/up.php` .. note:: Limited https support is provided (see below). `filename` : (required, string) the path to the local file to be transferred. The file can be any format: text, utf-8, binary. All files are hex encoded while in transit (increasing the effective file size). .. note:: Encryption (*beta*) is available as a separate step. That is, first :mod:`~psychopy.contrib.opensslwrap.encrypt()` the file, then :mod:`~psychopy.web.upload()` the encrypted file in the same way that you would any other file. `basicAuth` : (optional) apache 'user:password' string for basic authentication. If a `basicAuth` value is supplied, it will be sent as the auth credentials (in cleartext); using https will encrypt the credentials. `host` : (optional) The default process is to extract host information from the `selector`. The `host` option allows you to specify a host explicitly (i.e., if it differs from the `selector`). `https` : (optional) If the remote server is configured to use https, passing the parameter `https=True` will encrypt the transmission including all data and `basicAuth` credentials. It is approximately as secure as using a self-signed X.509 certificate. An important caveat is that the authenticity of the certificate returned from the server is not checked, and so the certificate could potentially be spoofed (see the warning under HTTPSConnection http://docs.python.org/library/httplib.html). Overall, using https can still be much more secure than not using it. The encryption is good, but that of itself does not eliminate all risk. Importantly, it is not as secure as one might expect, given that all major web browsers do check certificate authenticity. The idea behind this parameter is to require people to explicitly indicate that they want to proceed anyway, in effect saying "I know what I am doing and accept the risks (of using un-verified certificates)". **Example:** See Coder demo / misc / http_upload.py Author: Jeremy R. Gray, 2012 """ fields = [('name', 'PsychoPy_upload'), ('type', 'file')] if not selector: logging.error('upload: need a selector, http://<host>/path/to/up.php') raise ValueError( 'upload: need a selector, http://<host>/path/to/up.php') if not host: host = selector.split('/')[2] logging.info('upload: host extracted from selector = %s' % host) if selector.startswith('https'): if https is not True: logging.error( 'upload: https not explicitly requested. use https=True to proceed anyway (see API for security caveats).' ) raise ValueError( 'upload: https not fully supported (see API for caveats and usage), exiting.' ) else: logging.exp( 'upload: https requested; note that security is not fully assured (see API)' ) elif https: msg = 'upload: to use https, the selector URL must start with "https"' logging.error(msg) raise ValueError(msg) if not os.path.isfile(filename): logging.error('upload: file not found (%s)' % filename) raise ValueError('upload: file not found (%s)' % filename) contents = open( filename).read() # base64 encoded in _encode_multipart_formdata() file = [('file_1', filename, contents)] # initiate the POST: logging.exp('upload: uploading file %s to %s' % (os.path.abspath(filename), selector)) try: status, reason, result = _post_multipart(host, selector, fields, file, basicAuth=basicAuth, https=https) except TypeError: status = 'no return value from _post_multipart(). ' reason = 'config error?' result = status + reason except urllib2.URLError as ex: logging.error('upload: URL Error. (no internet connection?)') raise ex # process the result: if status == 200: result_fields = result.split() #result = 'status_msg digest' # if using up.php if result_fields[0] == 'good_upload': outcome = 'success' + ' ' + result else: outcome = result # failure code elif status == 404: outcome = '404 Not_Found: server config error' elif status == 403: outcome = '403 Forbidden: server config error' elif status == 401: outcome = '401 Denied: failed apache Basic authorization, or config error' elif status == 400: outcome = '400 Bad request: failed, possible config error' else: outcome = str(status) + ' ' + reason if status == -1 or status > 299 or type(status) == str: logging.error('upload: ' + outcome[:102]) else: if outcome.startswith('success'): logging.info('upload: ' + outcome[:102]) else: logging.error('upload: ' + outcome[:102]) return outcome
def main_loop( all_tasks, subject, session, output_ds, enable_eyetracker=False, use_fmri=False, use_meg=False, show_ctl_win=False, allow_run_on_battery=False, enable_ptt=False, record_movie=False, ): # force screen resolution to solve issues with video splitter at scanner """xrandr = Popen([ 'xrandr', '--output', 'eDP-1', '--mode', '%dx%d'%config.EXP_WINDOW['size'], '--rate', str(config.FRAME_RATE)]) time.sleep(5)""" if not utils.check_power_plugged(): print("*" * 25 + "WARNING: the power cord is not connected" + "*" * 25) if not allow_run_on_battery: return bids_sub_ses = ("sub-%s" % subject, "ses-%s" % session) log_path = os.path.abspath( os.path.join(output_ds, "sourcedata", *bids_sub_ses)) if not os.path.exists(log_path): os.makedirs(log_path, exist_ok=True) log_name_prefix = "sub-%s_ses-%s_%s" % ( subject, session, datetime.datetime.now().strftime("%Y%m%d-%H%M%S"), ) logfile_path = os.path.join(log_path, log_name_prefix + ".log") log_file = logging.LogFile(logfile_path, level=logging.INFO, filemode="w") exp_win = visual.Window(**config.EXP_WINDOW, monitor=config.EXP_MONITOR) exp_win.mouseVisible = False if show_ctl_win: ctl_win = visual.Window(**config.CTL_WINDOW) ctl_win.name = "Stimuli" else: ctl_win = None ptt = None if enable_ptt: from .ptt import PushToTalk ptt = PushToTalk() eyetracker_client = None gaze_drawer = None if enable_eyetracker: print("creating et client") eyetracker_client = eyetracking.EyeTrackerClient( output_path=log_path, output_fname_base=log_name_prefix, profile=False, debug=False, ) print("starting et client") eyetracker_client.start() print("done") all_tasks = sum(([ eyetracking.EyetrackerCalibration(eyetracker_client, name="EyeTracker-Calibration"), t ] for t in all_tasks), []) if show_ctl_win: gaze_drawer = eyetracking.GazeDrawer(ctl_win) if use_fmri: setup_video_path = glob.glob( os.path.join("data", "videos", "subject_setup_videos", "sub-%s_*" % subject)) if not len(setup_video_path): setup_video_path = [ os.path.join( "data", "videos", "subject_setup_videos", "sub-default_setup_video.mp4", ) ] all_tasks.insert( 0, video.VideoAudioCheckLoop(setup_video_path[0], name="setup_soundcheck_video"), ) all_tasks.insert( 1, task_base.Pause( """We are completing the setup and initializing the scanner. We will start the tasks in a few minutes. Please remain still."""), ) all_tasks.append( task_base.Pause("""We are done for today. The scanner might run for a few seconds to acquire reference images. Please remain still. We are coming to get you out of the scanner shortly.""")) else: all_tasks.append( task_base.Pause("""We are done with the tasks for today. Thanks for your participation!""")) # list of tasks to be ran in a session print("Here are the stimuli planned for today\n" + "_" * 50) for task in all_tasks: print("- " + task.name) print("_" * 50) try: for task in all_tasks: # clear events buffer in case the user pressed a lot of buttoons event.clearEvents() use_eyetracking = False if enable_eyetracker and task.use_eyetracking: use_eyetracking = True # setup task files (eg. video) task.setup( exp_win, log_path, log_name_prefix, use_fmri=use_fmri, use_eyetracking=use_eyetracking, use_meg=use_meg, ) print("READY") while True: # force focus on the task window to ensure getting keys, TTL, ... exp_win.winHandle.activate() # record frame intervals for debug shortcut_evt = run_task( task, exp_win, ctl_win, eyetracker_client, gaze_drawer, record_movie=record_movie, ) if shortcut_evt == "n": # restart the task logging.exp(msg="task - %s: restart" % str(task)) task.restart() continue elif shortcut_evt: # abort/skip or quit logging.exp(msg="task - %s: abort" % str(task)) break else: # task completed logging.exp(msg="task - %s: complete" % str(task)) # send stop trigger/marker to MEG + Biopac (or anything else on parallel port) break logging.flush() if record_movie: out_fname = os.path.join( task.output_path, "%s_%s.mp4" % (task.output_fname_base, task.name)) print(f"saving movie as {out_fname}") exp_win.saveMovieFrames(out_fname, fps=10) task.unload() if shortcut_evt == "q": print("quit") break elif shortcut_evt is None: # add a delay between tasks to avoid remaining TTL to start next task # do that only if the task was not aborted to save time # there is anyway the duration of the instruction before listening to TTL for i in range(DELAY_BETWEEN_TASK * config.FRAME_RATE): exp_win.flip(clearBuffer=False) exp_win.saveFrameIntervals("exp_win_frame_intervals.txt") if ctl_win: ctl_win.saveFrameIntervals("ctl_win_frame_intervals.txt") except KeyboardInterrupt as ki: print(traceback.format_exc()) logging.exp(msg="user killing the program") print("you killing me!") finally: if enable_eyetracker: eyetracker_client.join(TIMEOUT)
def importConditions(fileName, returnFieldNames=False, selection=""): """Imports a list of conditions from an .xlsx, .csv, or .pkl file The output is suitable as an input to :class:`TrialHandler` `trialList` or to :class:`MultiStairHandler` as a `conditions` list. If `fileName` ends with: - .csv: import as a comma-separated-value file (header + row x col) - .xlsx: import as Excel 2007 (xlsx) files. No support for older (.xls) is planned. - .pkl: import from a pickle file as list of lists (header + row x col) The file should contain one row per type of trial needed and one column for each parameter that defines the trial type. The first row should give parameter names, which should: - be unique - begin with a letter (upper or lower case) - contain no spaces or other punctuation (underscores are permitted) `selection` is used to select a subset of condition indices to be used It can be a list/array of indices, a python `slice` object or a string to be parsed as either option. e.g.: - "1,2,4" or [1,2,4] or (1,2,4) are the same - "2:5" # 2, 3, 4 (doesn't include last whole value) - "-10:2:" # tenth from last to the last in steps of 2 - slice(-10, 2, None) # the same as above - random(5) * 8 # five random vals 0-8 """ def _attemptImport(fileName, sep=',', dec='.'): """Attempts to import file with specified settings and raises ConditionsImportError if fails due to invalid format :param filename: str :param sep: str indicating the separator for cells (',', ';' etc) :param dec: str indicating the decimal point ('.', '.') :return: trialList, fieldNames """ if fileName.endswith(('.csv', '.tsv')): trialsArr = pd.read_csv(fileName, encoding='utf-8-sig', sep=sep, decimal=dec) logging.debug(u"Read csv file with pandas: {}".format(fileName)) elif fileName.endswith(('.xlsx', '.xls', '.xlsm')): trialsArr = pd.read_excel(fileName) logging.debug(u"Read Excel file with pandas: {}".format(fileName)) # then try to convert array to trialList and fieldnames unnamed = trialsArr.columns.to_series().str.contains('^Unnamed: ') trialsArr = trialsArr.loc[:, ~unnamed] # clear unnamed cols logging.debug(u"Clearing unnamed columns from {}".format(fileName)) trialList, fieldNames = pandasToDictList(trialsArr) return trialList, fieldNames def _assertValidVarNames(fieldNames, fileName): """screens a list of names as candidate variable names. if all names are OK, return silently; else raise with msg """ fileName = pathToString(fileName) if not all(fieldNames): msg = ('Conditions file %s: Missing parameter name(s); ' 'empty cell(s) in the first row?') raise exceptions.ConditionsImportError(msg % fileName) for name in fieldNames: OK, msg = isValidVariableName(name) if not OK: # tailor message to importConditions msg = msg.replace('Variables', 'Parameters (column headers)') raise exceptions.ConditionsImportError( 'Conditions file %s: %s%s"%s"' % (fileName, msg, os.linesep * 2, name)) if fileName in ['None', 'none', None]: if returnFieldNames: return [], [] return [] if not os.path.isfile(fileName): msg = 'Conditions file not found: %s' raise ValueError(msg % os.path.abspath(fileName)) def pandasToDictList(dataframe): """Convert a pandas dataframe to a list of dicts. This helper function is used by csv or excel imports via pandas """ # convert the resulting dataframe to a numpy recarray trialsArr = dataframe.to_records(index=False) # Check for new line characters in strings, and replace escaped characters for record in trialsArr: for idx, element in enumerate(record): if isinstance(element, str): record[idx] = element.replace('\\n', '\n') if trialsArr.shape == (): # convert 0-D to 1-D with one element: trialsArr = trialsArr[np.newaxis] fieldNames = list(trialsArr.dtype.names) _assertValidVarNames(fieldNames, fileName) # convert the record array into a list of dicts trialList = [] for trialN, trialType in enumerate(trialsArr): thisTrial = OrderedDict() for fieldN, fieldName in enumerate(fieldNames): val = trialsArr[trialN][fieldN] if isinstance(val, basestring): if val.startswith('[') and val.endswith(']'): # val = eval('%s' %unicode(val.decode('utf8'))) val = eval(val) elif type(val) == np.string_: val = str(val.decode('utf-8-sig')) # if it looks like a list, convert it: if val.startswith('[') and val.endswith(']'): # val = eval('%s' %unicode(val.decode('utf8'))) val = eval(val) elif np.isnan(val): val = None thisTrial[fieldName] = val trialList.append(thisTrial) return trialList, fieldNames if (fileName.endswith(('.csv', '.tsv')) or (fileName.endswith( ('.xlsx', '.xls', '.xlsm')) and haveXlrd)): if fileName.endswith(('.csv', '.tsv', '.dlm')): # delimited text file for sep, dec in [ (',', '.'), (';', ','), # most common in US, EU ('\t', '.'), ('\t', ','), (';', '.') ]: try: trialList, fieldNames = _attemptImport(fileName=fileName, sep=sep, dec=dec) break # seems to have worked except exceptions.ConditionsImportError as e: continue # try a different format else: trialList, fieldNames = _attemptImport(fileName=fileName) elif fileName.endswith(('.xlsx', '.xlsm')): # no xlsread so use openpyxl if not haveOpenpyxl: raise ImportError('openpyxl or xlrd is required for loading excel ' 'files, but neither was found.') # data_only was added in 1.8 if parse_version(openpyxl.__version__) < parse_version('1.8'): wb = load_workbook(filename=fileName) else: wb = load_workbook(filename=fileName, data_only=True) ws = wb.worksheets[0] logging.debug(u"Read excel file with openpyxl: {}".format(fileName)) try: # in new openpyxl (2.3.4+) get_highest_xx is deprecated nCols = ws.max_column nRows = ws.max_row except Exception: # version openpyxl 1.5.8 (in Standalone 1.80) needs this nCols = ws.get_highest_column() nRows = ws.get_highest_row() # get parameter names from the first row header fieldNames = [] for colN in range(nCols): if parse_version(openpyxl.__version__) < parse_version('2.0'): fieldName = ws.cell(_getExcelCellName(col=colN, row=0)).value else: # From 2.0, cells are referenced with 1-indexing: A1 == cell(row=1, column=1) fieldName = ws.cell(row=1, column=colN + 1).value fieldNames.append(fieldName) _assertValidVarNames(fieldNames, fileName) # loop trialTypes trialList = [] for rowN in range(1, nRows): # skip header first row thisTrial = {} for colN in range(nCols): if parse_version(openpyxl.__version__) < parse_version('2.0'): val = ws.cell(_getExcelCellName(col=colN, row=0)).value else: # From 2.0, cells are referenced with 1-indexing: A1 == cell(row=1, column=1) val = ws.cell(row=rowN + 1, column=colN + 1).value # if it looks like a list or tuple, convert it if (isinstance(val, basestring) and (val.startswith('[') and val.endswith(']') or val.startswith('(') and val.endswith(')'))): val = eval(val) fieldName = fieldNames[colN] thisTrial[fieldName] = val trialList.append(thisTrial) elif fileName.endswith('.pkl'): f = open(fileName, 'rb') # Converting newline characters. if PY3: # 'b' is necessary in Python3 because byte object is # returned when file is opened in binary mode. buffer = f.read().replace(b'\r\n', b'\n').replace(b'\r', b'\n') else: buffer = f.read().replace('\r\n', '\n').replace('\r', '\n') try: trialsArr = pickle.loads(buffer) except Exception: raise IOError('Could not open %s as conditions' % fileName) f.close() trialList = [] if PY3: # In Python3, strings returned by pickle() is unhashable. # So, we have to convert them to str. trialsArr = [[ str(item) if isinstance(item, str) else item for item in row ] for row in trialsArr] fieldNames = trialsArr[0] # header line first _assertValidVarNames(fieldNames, fileName) for row in trialsArr[1:]: thisTrial = {} for fieldN, fieldName in enumerate(fieldNames): # type is correct, being .pkl thisTrial[fieldName] = row[fieldN] trialList.append(thisTrial) else: raise IOError('Your conditions file should be an ' 'xlsx, csv, dlm, tsv or pkl file') # if we have a selection then try to parse it if isinstance(selection, basestring) and len(selection) > 0: selection = indicesFromString(selection) if not isinstance(selection, slice): for n in selection: try: assert n == int(n) except AssertionError: raise TypeError("importConditions() was given some " "`indices` but could not parse them") # the selection might now be a slice or a series of indices if isinstance(selection, slice): trialList = trialList[selection] elif len(selection) > 0: allConds = trialList trialList = [] for ii in selection: trialList.append(allConds[int(round(ii))]) logging.exp('Imported %s as conditions, %d conditions, %d params' % (fileName, len(trialList), len(fieldNames))) if returnFieldNames: return (trialList, fieldNames) else: return trialList
def __init__(self, win, newPos=None, visible=True, leftLimit=None, topLimit=None, rightLimit=None, bottomLimit=None, showLimitBox=False, clickOnUp=False, pointer=None, name=None, autoLog=None): """Class for customizing the appearance and behavior of the mouse. Use a custom mouse for extra control over the pointer appearance and function. Its probably slower to render than the regular system mouse. Create your `visual.Window` before creating a CustomMouse. :Parameters: win : required, `visual.Window` the window to which this mouse is attached visible : **True** or False makes the mouse invisbile if necessary newPos : **None** or [x,y] gives the mouse a particular starting position (pygame or pyglet) leftLimit : left edge of a virtual box within which the mouse can move topLimit : top edge of virtual box rightLimit : right edge of virtual box bottomLimit : lower edge of virtual box showLimitBox : default is False display the boundary of the area within which the mouse can move. pointer : The visual display item to use as the pointer; must have .draw() and setPos() methods. If your item has .setOpacity(), you can alter the mouse's opacity. clickOnUp : when to count a mouse click as having occured default is False, record a click when the mouse is first pressed down. True means record a click when the mouse button is released. :Note: CustomMouse is a new feature, and subject to change. `setPos()` does not work yet. `getRel()` returns `[0,0]` and `mouseMoved()` always returns `False`. `clickReset()` may not be working. """ #what local vars are defined (these are the init params) for use by __repr__ self._initParams = dir() self._initParams.remove('self') super(CustomMouse, self).__init__(name=name, autoLog=False) self.autoLog = False # set properly at end of init self.win = win self.mouse = event.Mouse(win=self.win) # maybe inheriting from Mouse would be easier? its not that simple self.getRel = self.mouse.getRel self.getWheelRel = self.mouse.getWheelRel self.mouseMoved = self.mouse.mouseMoved # FAILS self.mouseMoveTime = self.mouse.mouseMoveTime self.getPressed = self.mouse.getPressed self.clickReset = self.mouse.clickReset # ??? self._pix2windowUnits = self.mouse._pix2windowUnits self._windowUnits2pix = self.mouse._windowUnits2pix # the graphic to use as the 'mouse' icon (pointer) if pointer: self.setPointer(pointer) else: #self.pointer = TextStim(win, text='+') self.pointer = ImageStim(win, image=os.path.join(os.path.split(__file__)[0], 'pointer.png'), autoLog=False) self.mouse.setVisible(False) # hide the actual (system) mouse self.visible = visible # the custom (virtual) mouse self.leftLimit = self.rightLimit = None self.topLimit = self.bottomLimit = None self.setLimit(leftLimit=leftLimit, topLimit=topLimit, rightLimit=rightLimit, bottomLimit=bottomLimit) self.showLimitBox = showLimitBox self.lastPos = None self.prevPos = None if newPos is not None: self.lastPos = newPos else: self.lastPos = self.mouse.getPos() # for counting clicks: self.clickOnUp = clickOnUp self.wasDown = False # state of mouse 1 frame prior to current frame, look for changes self.clicks = 0 # how many mouse clicks since last reset self.clickButton = 0 # which button to count clicks for; 0 = left # set autoLog now that params have been initialised self.__dict__['autoLog'] = autoLog or autoLog is None and self.win.autoLog if self.autoLog: logging.exp("Created %s = %s" %(self.name, str(self)))