コード例 #1
0
def play_sound(value):
    if USE_SOUND:
        sound.init(rate=44100, stereo=True, buffer=128)
        sync_beep = sound.Sound(value=value, secs=0.2, octave=4, loops=0)
        sync_beep.play()
        return sync_beep
    else:
        sys.stdout.write('\a')
        sys.stdout.flush()
        return None
コード例 #2
0
    def __init__(self, config):
        self.config = config

        sound.init()

        self.setupMonitor()
        self.setupHUD()
        self.setupDataFile()

        self.setupBlocks()
 def _hwsetup(self):
     """Set up hardware like displays, sounds, etc"""
     # See documentation for visual.Window parameters
     self.win = visual.Window()
     # store frame rate of monitor if we can measure it successfully
     self.frameRate = self.win.getActualFrameRate()
     if self.frameRate is not None:
         self.frameDur = 1.0/round(self.frameRate)
     else:
         self.frameDur = 1.0/60.0  # couldn't get a reliable measure/guess
     # Set up the sound card
     sound.init(rate=48000, stereo=True, buffer=256)
     # Create some handy timers
     self.clock = core.Clock()  # to track the time since experiment started
     # Create a parallel port handler
     self.port = parallel.ParallelPort(address=0x0378)
コード例 #4
0
ファイル: microphone.py プロジェクト: likanzhan/psychopy
def switchOn(sampleRate=48000, outputDevice=None, bufferSize=None):
    """You need to switch on the microphone before use, which can take
    several seconds. The only time you can specify the sample rate (in Hz)
    is during switchOn().

    Considerations on the default sample rate 48kHz::

        DVD or video = 48,000
        CD-quality   = 44,100 / 24 bit
        human hearing: ~15,000 (adult); children & young adult higher
        human speech: 100-8,000 (useful for telephone: 100-3,300)
        Google speech API: 16,000 or 8,000 only
        Nyquist frequency: twice the highest rate, good to oversample a bit

    pyo's downsamp() function can reduce 48,000 to 16,000 in about 0.02s
    (uses integer steps sizes). So recording at 48kHz will generate
    high-quality archival data, and permit easy downsampling.

    outputDevice, bufferSize: set these parameters on the pyoSndServer
        before booting; None means use pyo's default values
    """
    # imports pyo, creates sound.pyoSndServer using sound.initPyo() if not yet
    # created
    t0 = core.getTime()
    try:
        global pyo
        import pyo
        global haveMic
        haveMic = True
    except ImportError:  # pragma: no cover
        msg = ('Microphone class not available, needs pyo; '
               'see http://code.google.com/p/pyo/')
        logging.error(msg)
        raise ImportError(msg)
    if pyo.serverCreated():
        sound.backend.pyoSndServer.setSamplingRate(sampleRate)
    else:
        # sound.init() will create pyoSndServer. We want there only
        # ever to be one server
        # will automatically use duplex=1 and stereo if poss
        sound.init(rate=sampleRate)
    if outputDevice:
        sound.backend.pyoSndServer.setOutputDevice(outputDevice)
    if bufferSize:
        sound.backend.pyoSndServer.setBufferSize(bufferSize)
    logging.exp('%s: switch on (%dhz) took %.3fs' %
                (__file__.strip('.py'), sampleRate, core.getTime() - t0))
コード例 #5
0
def switchOn(sampleRate=48000, outputDevice=None, bufferSize=None):
    """You need to switch on the microphone before use, which can take
    several seconds. The only time you can specify the sample rate (in Hz)
    is during switchOn().

    Considerations on the default sample rate 48kHz::

        DVD or video = 48,000
        CD-quality   = 44,100 / 24 bit
        human hearing: ~15,000 (adult); children & young adult higher
        human speech: 100-8,000 (useful for telephone: 100-3,300)
        Google speech API: 16,000 or 8,000 only
        Nyquist frequency: twice the highest rate, good to oversample a bit

    pyo's downsamp() function can reduce 48,000 to 16,000 in about 0.02s
    (uses integer steps sizes). So recording at 48kHz will generate
    high-quality archival data, and permit easy downsampling.

    outputDevice, bufferSize: set these parameters on the pyoSndServer
        before booting; None means use pyo's default values
    """
    # imports pyo, creates sound.pyoSndServer using sound.initPyo() if not yet
    # created
    t0 = core.getTime()
    try:
        global pyo
        import pyo
        global haveMic
        haveMic = True
    except ImportError:  # pragma: no cover
        msg = ('Microphone class not available, needs pyo; '
               'see http://code.google.com/p/pyo/')
        logging.error(msg)
        raise ImportError(msg)
    if pyo.serverCreated():
        sound.backend.pyoSndServer.setSamplingRate(sampleRate)
    else:
        # sound.init() will create pyoSndServer. We want there only
        # ever to be one server
        # will automatically use duplex=1 and stereo if poss
        sound.init(rate=sampleRate)
    if outputDevice:
        sound.backend.pyoSndServer.setOutputDevice(outputDevice)
    if bufferSize:
        sound.backend.pyoSndServer.setBufferSize(bufferSize)
    logging.exp('%s: switch on (%dhz) took %.3fs' %
                (__file__.strip('.py'), sampleRate, core.getTime() - t0))
コード例 #6
0
 def _hwsetup(self):
     """Set up hardware like displays, sounds, etc"""
     self.win = visual.Window(size=(1280, 1024),
                              fullscr=True,
                              allowGUI=False,
                              useFBO=True,
                              monitor='testMonitor',
                              units='norm')
     # store frame rate of monitor if we can measure it successfully
     self.frameRate = self.win.getActualFrameRate()
     if self.frameRate is not None:
         self.frameDur = 1.0 / round(self.frameRate)
     else:
         self.frameDur = 1.0 / 60.0  # couldn't get a reliable measure/guess
     # Set up the sound card
     sound.init(rate=48000, stereo=True, buffer=256)
     self.volume = 0.4
コード例 #7
0
 def _hwsetup(self):
     """Set up hardware like displays, sounds, etc"""
     self.win = visual.Window(size=(1280, 1024),
                              fullscr=True,
                              allowGUI=False,
                              useFBO=True,
                              monitor='testMonitor',
                              units='norm')
     # store frame rate of monitor if we can measure it successfully
     self.frameRate = self.win.getActualFrameRate()
     # Create a parallel port handler
     self.port = parallel.ParallelPort(address=0x0378)
     self.clock = core.Clock()  # to track the time since experiment started
     if self.frameRate is not None:
         self.frameDur = 1.0 / round(self.frameRate)
     else:
         self.frameDur = 1.0 / 60.0  # couldn't get a reliable measure/guess
     # Set up the sound card
     sound.init(rate=48000, stereo=True, buffer=256)
コード例 #8
0
def makeStimuli(cfg):

    radius = 0.5 * cfg['pixpercm']  #0.0375*2/3*cfg['winSize'][1]
    lineWidth = 4
    cfg['radius'] = radius

    # add home position (grey, darker than background (which is roughly 128,128,128))
    cfg['home'] = visual.Circle(win=cfg['win'],
                                pos=[0, 0],
                                radius=radius,
                                lineWidth=lineWidth)
    #cfg['home'].setFillColor(color=(64,64,64), colorSpace = 'rgb255')
    cfg['home'].setLineColor(color=(64, 64, 64), colorSpace='rgb255')

    # add home fail feedback (red)
    cfg['homefail'] = visual.Circle(win=cfg['win'],
                                    pos=[0, 0],
                                    radius=radius,
                                    lineWidth=lineWidth)
    #cfg['homefail'].setFillColor(color=(64,64,64), colorSpace = 'rgb255')
    cfg['homefail'].setLineColor(color=(255, 0, 0), colorSpace='rgb255')

    # add home success feedback (blue)
    cfg['homesuccess'] = visual.Circle(win=cfg['win'],
                                       pos=[0, 0],
                                       radius=radius,
                                       lineWidth=lineWidth)
    #cfg['homesuccess'].setFillColor(color=(64,64,64), colorSpace = 'rgb255')
    cfg['homesuccess'].setLineColor(color=(0, 0, 255), colorSpace='rgb255')

    # add target stay (grey, darker than bg)
    cfg['targetstay'] = visual.Circle(win=cfg['win'],
                                      pos=[0, -0.25 * cfg['winSize'][1]],
                                      radius=radius,
                                      lineWidth=lineWidth)
    #cfg['targetstay'].setFillColor(color=(64,64,64), colorSpace = 'rgb255')
    cfg['targetstay'].setLineColor(color=(64, 64, 64), colorSpace='rgb255')

    # add target go (dark blue)
    cfg['targetgo'] = visual.Circle(win=cfg['win'],
                                    pos=[0, -0.25 * cfg['winSize'][1]],
                                    radius=radius,
                                    lineWidth=lineWidth)
    #cfg['targetgo'].setFillColor(color=(64,64,64), colorSpace = 'rgb255')
    cfg['targetgo'].setLineColor(color=(0, 0, 255), colorSpace='rgb255')

    #would want the targetdistance to be set, maybe 40% of distance from home to edge of window
    if (cfg['winSize'][1] == 1050):
        cfg['targetdistance'] = cfg['pixpercm'] * 9  #12 because stencil is 10
    else:
        cfg['targetdistance'] = cfg['pixpercm'] * 5  #5

    # add cursor (white)
    cfg['cursor'] = visual.Circle(win=cfg['win'],
                                  pos=[0, -0.25 * cfg['winSize'][1]],
                                  radius=radius,
                                  lineWidth=lineWidth)
    #cfg['cursor'].setFillColor(color=(255,255,255), colorSpace = 'rgb255')
    cfg['cursor'].setLineColor(color=(255, 255, 255), colorSpace='rgb255')

    #most likely will not need sound
    #add Beep sound
    sound.init(rate=44100, stereo=True, buffer=128)
    cfg['sound'] = sound.Sound('ding.wav', secs=1)

    return (cfg)
コード例 #9
0
For control of bitrate and buffer size you can call psychopy.sound.init before
creating your first Sound object::
    from psychopy import sound
    sound.init(rate=44100, stereo=True, buffer=128)
    s1 = sound.Sound('ding.wav')

"""
from numpy import log10
from psychopy import prefs  # must be imported first!

prefs.general["audioLib"] = ["pyo"]

from psychopy import sound, core, visual, parallel, event  # noqa

sound.init(rate=44100, stereo=True, buffer=128)
print "Using %s(with %s) for sounds" % (sound.audioLib, sound.audioDriver)

testSound = sound.Sound(
    value=100,
    secs=0.1,
    stereo=True,
    volume=0.5,
    loops=0,
    sampleRate=44100,
    bits=16,
    hamming=False,
    start=0,
    stop=-1,
    name="",
    autoLog=False,
コード例 #10
0
(The PsychoPy team would be interested to hear how your measurements go)
"""

from __future__ import absolute_import, division, print_function

import psychopy
from psychopy import visual, core, event, sound
from labjack import u3
import numpy, sys, platform

# setup window (can use for visual pulses)
win = visual.Window([800, 800], monitor='testMonitor')
win.recordFrameIntervals = False
stim = visual.GratingStim(win, color=-1, sf=0)

sound.init(rate=48000, buffer=48)
print('Using %s(with %s) for sounds' %(sound.audioLib, sound.audioDriver))
timeWithLabjack = True
maxReps = 100

# setup labjack U3
ports = u3.U3()
ports.__del__ = ports.close  # try to autoclose the ports if script crashes

# get zero value of FIO6
startVal = ports.getFIOState(6)  # is FIO6 high or low?
print('FIO6 is at', startVal, end='')
print('AIN0 is at', ports.getAIN(0))
if timeWithLabjack:
    print('OS\tOSver\taudioAPI\tPsychoPy\trate\tbuffer\tmean\tsd\tmin\tmax')
コード例 #11
0
from psychopy import prefs
#pyo.pa_get_input_devices()
prefs.general['audioLib'] = ['pygame']
prefs.general['audioDriver'] = ['SPDIF (RME HDSP 9652)']

from psychopy import visual, core, event, sound, logging
logging.console.setLevel(
    logging.DEBUG)  #get messages about the sound lib as it loads

win = visual.Window([1000, 1000])
sound.init(48000, buffer=500)

globalClock = core.Clock()

#mov._audioStream = testSound
for trl in range(0, 4):
    mov = visual.MovieStim3(
        win,
        r'C:\TCDTIMIT\volunteersSmall\s60T\straightcam\TestVideo.mp4',
        flipVert=False,
        flipHoriz=False,
        loop=False,
        noAudio=True)
    testSound = sound.Sound(
        r'C:\TCDTIMIT\volunteersSmall\s60T\straightcam\TestVideo.wav',
        sampleRate=48000)
    #core.wait(.2)
    print('orig movie size=%s' % (mov.size))
    print('duration=%.2fs' % (mov.duration))
    movStart = 1
    while mov.status != visual.FINISHED:
コード例 #12
0
#!/usr/bin/env python2
"""demo to illustrate and test microphone.AdvAudioCapture, and also permit
external latency testing (e.g., BlackBox Toolkit) by providing visual-tone synchrony
key lines: 29, 50, 61: mic = microphone.AdvAudioCapture(), mic.record(), mic.getOnset()
"""

from __future__ import division
from psychopy import microphone, sound, core, visual, event
from matplotlib import pyplot
import numpy as np
import os

buffer = 128  # smaller = short play latency, but higher chance of choppy sound playback
rate = 48000  # needs to be 40000 or higher
sound.init(buffer=buffer, rate=rate)

def plotYX(yaxis, xaxis, description=''):
    pyplot.plot(xaxis, yaxis)
    pyplot.grid(True)
    pyplot.title(description)
    pyplot.ylabel('[std %.1f]' % np.std(yaxis))
    pyplot.draw()
    pyplot.show()

# initial set up:
win = visual.Window(fullscr=False, units='height')
circle = visual.Circle(win, 0.25, fillColor=1, edges=64)
microphone.switchOn()
mic = microphone.AdvAudioCapture()

# identify the hardware microphone in use:
コード例 #13
0
external latency testing (e.g., BlackBox Toolkit) by providing visual-tone synchrony

key lines: 29, 50, 61: mic = microphone.AdvAudioCapture(), mic.record(), mic.getOnset()
"""

from __future__ import absolute_import, division, print_function

from builtins import range
from psychopy import microphone, sound, core, visual, event
from matplotlib import pyplot
import numpy as np
import os

buffer_size = 128  # smaller = short play latency, but higher chance of choppy sound playback
rate = 48000  # needs to be 40000 or higher
sound.init(buffer=buffer_size, rate=rate)

def plotYX(yaxis, xaxis, description=''):
    pyplot.plot(xaxis, yaxis)
    pyplot.grid(True)
    pyplot.title(description)
    pyplot.ylabel('[std %.1f]' % np.std(yaxis))
    pyplot.draw()
    pyplot.show()

# initial set up:
win = visual.Window(fullscr=False, units='height')
circle = visual.Circle(win, 0.25, fillColor=1, edges=64)
microphone.switchOn()
mic = microphone.AdvAudioCapture()
コード例 #14
0
ファイル: latencyFromTone.py プロジェクト: natsn/psychopy
#!/usr/bin/env python2
"""demo to illustrate and test microphone.AdvAudioCapture, and also permit
external latency testing (e.g., BlackBox Toolkit) by providing visual-tone synchrony

key lines: 29, 50, 61: mic = microphone.AdvAudioCapture(), mic.record(), mic.getOnset()
"""

from __future__ import division
from psychopy import microphone, sound, core, visual, event
from matplotlib import pyplot
import numpy as np
import os

buffer = 128  # smaller = short play latency, but higher chance of choppy sound playback
rate = 48000  # needs to be 40000 or higher
sound.init(buffer=buffer, rate=rate)


def plotYX(yaxis, xaxis, description=""):
    pyplot.plot(xaxis, yaxis)
    pyplot.grid(True)
    pyplot.title(description)
    pyplot.ylabel("[std %.1f]" % np.std(yaxis))
    pyplot.draw()
    pyplot.show()


# initial set up:
win = visual.Window(fullscr=False, units="height")
circle = visual.Circle(win, 0.25, fillColor=1, edges=64)
microphone.switchOn()
コード例 #15
0
	import winsound
except ImportError:
	print "Warning: winsound not found; will try using pyo/pyaudio"
try:
	import pyo
	print "Attempting to use pyo for sounds"
	prefs.general['audioLib'] = ['pyo']
	prefs.general['audioDriver'] = ['ASIO']
except:
	print 'could not load pyo'
from psychopy import sound,core, visual


if prefs.general['audioLib'][0] == 'pyo':
	print 'initializing pyo to 44100'
	sound.init(44100,buffer=128)
	print 'Using %s(with %s) for sounds' %(sound.audioLib, sound.audioDriver)

from psychopy import core,logging,event,visual,data,gui,misc
import glob,os,random,sys,gc,time,hashlib,subprocess
from math import *

#try:
#	import pygame
#	from pygame.locals import *
#except ImportError:
#	print "Warning: pygame not found; will be using pyglet for stim presentation"
#pygame.mixer.pre_init(44100,-16,1, 4096) # pre-initialize to reduce the delay
try:
	from scipy import ndimage
except ImportError:
コード例 #16
0
# must do this *before* importing psychopy.sound
from psychopy import prefs

try:
    import pyo
except ImportError:
    print "could not load pyo!"
else:
    prefs.general["audioLib"] = ["pyo"]

from psychopy import sound

if prefs.general["audioLib"][0] == "pyo":
    print "initializing pyo to 48000"
    sound.init(48000, buffer=128)
    print "Using %s(with %s) for sounds" % (sound.audioLib, sound.audioDriver)

from psychopy import visual, core, event

from resources.psychopy_helper import *
from resources.dynamic_mask import DynamicMask
from modality_trials import write_trials


class Experiment(object):
    def __init__(self, exp_dir, version_file):
        """
        Start the experiment by saving the experiment directory and loading the
        version parameters stored in .yaml format.
        """
コード例 #17
0
key lines: 29, 50, 61: mic = microphone.AdvAudioCapture(), mic.record(), mic.getOnset()
"""

from __future__ import division
from __future__ import print_function

from builtins import range
from psychopy import microphone, sound, core, visual, event
from matplotlib import pyplot
import numpy as np
import os

buffer_size = 128  # smaller = short play latency, but higher chance of choppy sound playback
rate = 48000  # needs to be 40000 or higher
sound.init(buffer=buffer_size, rate=rate)


def plotYX(yaxis, xaxis, description=''):
    pyplot.plot(xaxis, yaxis)
    pyplot.grid(True)
    pyplot.title(description)
    pyplot.ylabel('[std %.1f]' % np.std(yaxis))
    pyplot.draw()
    pyplot.show()


# initial set up:
win = visual.Window(fullscr=False, units='height')
circle = visual.Circle(win, 0.25, fillColor=1, edges=64)
microphone.switchOn()
コード例 #18
0
                        opacity=0.5)
IB_title = visual.TextStim(win,
                           text="How much time between click and sound?",
                           color='white',
                           height=20)
IB_input = visual.TextStim(win,
                           text='',
                           color='black',
                           pos=(0, -100),
                           height=30)

#===============================
# Creation of sound tones
#===============================

sound.init(rate=44100, stereo=True, buffer=128)
tone = sound.Sound(value='G', secs=0.2, octave=4, sampleRate=44100, bits=16)
tone_intervals = (0.3, 0.5, 0.7)

#======================================================
#Create conditions list and trialhandler to run trials
#======================================================

#create a list of trials per condition, adding factor info to trial
# for dev in range(sequence_of_dev):  # for each deviation in the sequence
stimList = []
for dev in [sequence_of_dev]:  # for each deviation in the sequence
    stimList.append({'dev': dev}  #this is a python 'dictionary'
                    )

# method can be ‘random’, ‘sequential’, or ‘fullRandom’
コード例 #19
0
ファイル: soundInit.py プロジェクト: yvs/psychopy
from psychopy import visual, sound
#~ visual.Window([100,100])
#~ import pygame

#~ pygame.mixer.init(44100, -16, 2, 1024*3)

#~ print pygame.mixer.get_init()
sound.init(rate=22050, stereo=2)
コード例 #20
0
    def __init__(self, **kwargs):
        self.DATA_DIR = kwargs.get('data_dir', 'digitspan_data')
        self.MONITOR = kwargs.get('monitor', 'testMonitor')
        self.MONITOR_RESOLUTION = kwargs.get('monitor_resolution', (1024, 768))
        self.SOUND_GENDER = kwargs.get('sound_gender', 'female')
        self.SOUND_PATH = kwargs.get('sound_path', os.path.join(os.path.dirname(os.path.realpath(__file__)), 'sounds'))
        self.SOUND_INIT_SAMPLES = kwargs.get('sound_init_samples', 48000)
        self.N_PRACTICE_TRIALS = kwargs.get('practice_trials', 2)
        self.LEN_PRACTICE_TRIAL = kwargs.get('practice_trial_len', 3)
        self.DIGIT_DISPLAY_TIME = kwargs.get('digit_display_time', 0.500)
        # renamed from "IN_BETWEEN_DIGITS_TIME"
        self.DIGIT_DISPLAY_GAP = kwargs.get('digit_display_gap', 0.300)
        self.NUM_TRIAL_BLOCKS = kwargs.get('trial_blocks', 1)
        self.INTER_TRIAL_DELAY = kwargs.get('inter_trial_delay', 0.500)
        self.sequence_range = {
            'forward': {
                'min': kwargs.get('forward_min', 3),
                'max': kwargs.get('forward_max', 15)
            },
            'reverse': {
                'min': kwargs.get('reverse_min', 2),
                'max': kwargs.get('reverse_max', 15)
            }
        }
        self.MAX_TRIALS_WRONG = kwargs.get('max_wrong_trials', 2)
        self.FULLSCREEN = kwargs.get('fullscreen', True)

        if not os.path.isdir(self.DATA_DIR):
            try:
                os.mkdir(self.DATA_DIR)
            except Exception as e:
                print e.getMessage()
                print "Error: cannot create data directory: " + self.DATA_DIR
                sys.exit(1)

        while True:
            # tuple of form: (subject_id, test_number)
            subject_info = self.get_subject_info(sys.argv[1:])
            self.log_file = os.path.join(
                self.DATA_DIR, '_'.join(subject_info) + '.csv')

            if os.path.isfile(self.log_file):
                rename_dialog = gui.Dlg(title='Error: Log File Exists')
                rename_dialog.addText("A log file with the subject ID " + subject_info[0] +
                                      " and test number " + subject_info[1] + " already exists. Overwrite?")
                rename_dialog.show()

                if rename_dialog.OK:
                    self.log_file = open(self.log_file, "w")
                    break
                else:
                    # not exactly necessary but w/e
                    continue
            else:
                self.log_file = open(self.log_file, "w")
                break

        # now log_file is a proper file
        self.data = []

        # this should load Pyo. However, it may require manually symlinking in
        # the newest liblo.
        sound.init(self.SOUND_INIT_SAMPLES, buffer=128)

        self.sound_correct = sound.Sound(value=440, secs=0.4)
        self.sound_incorrect = sound.Sound(value=330, secs=0.4)
        self.sound_files = [sound.Sound(value=os.path.join(self.SOUND_PATH, fn)) for fn in os.listdir(self.SOUND_PATH)
                            if fn.startswith(self.SOUND_GENDER) and fn.endswith('.wav')]

        # this is a bad way of doing this. Should load from a file.
        self.sequences = {
            'forward':  [(9, 7),
                         (6, 3),
                         (5, 8, 2),
                         (6, 9, 4),
                         (7, 2, 8, 6),
                         (6, 4, 3, 9),
                         (4, 2, 7, 3, 1),
                         (7, 5, 8, 3, 6),
                         (3, 9, 2, 4, 8, 7),
                         (6, 1, 9, 4, 7, 3),
                         (4, 1, 7, 9, 3, 8, 6),
                         (6, 9, 1, 7, 4, 2, 8),
                         (3, 8, 2, 9, 6, 1, 7, 4),
                         (5, 8, 1, 3, 2, 6, 4, 7),
                         (2, 7, 5, 8, 6, 3, 1, 9, 4),
                         (7, 1, 3, 9, 4, 2, 5, 6, 8)],
            'reverse':  [(3, 1),
                         (2, 4),
                         (4, 6),
                         (5, 7),
                         (6, 2, 9),
                         (4, 7, 5),
                         (8, 2, 7, 9),
                         (4, 9, 6, 8),
                         (6, 5, 8, 4, 3),
                         (1, 5, 4, 8, 6),
                         (5, 3, 7, 4, 1, 8),
                         (7, 2, 4, 8, 5, 6),
                         (8, 1, 4, 9, 3, 6, 2),
                         (4, 7, 3, 9, 6, 2, 8),
                         (9, 4, 3, 7, 6, 2, 1, 8),
                         (7, 2, 8, 1, 5, 6, 4, 3)]
        }

        # after this line executes, the window is showing.
        self.window = visual.Window(
            self.MONITOR_RESOLUTION, monitor=self.MONITOR, units='deg', fullscr=self.FULLSCREEN)
        self.mouse = event.Mouse(win=self.window)
コード例 #21
0
try:
    import winsound
except ImportError:
    print "Warning: winsound not found; will try using pyo/pyaudio"
try:
    import pyo
    print "Attempting to use pyo for sounds"
    prefs.general['audioLib'] = ['pyo']
except:
    print 'could not load pyo'
from psychopy import sound, core, visual

if prefs.general['audioLib'][0] == 'pyo':
    print 'initializing pyo to 48000'
    sound.init(48000, buffer=128)
    print 'Using %s(with %s) for sounds' % (sound.audioLib, sound.audioDriver)

from psychopy import core, logging, event, visual, data, gui, misc
import glob, os, random, sys, gc, time, hashlib, subprocess
from math import *

try:
    import pygame
    from pygame.locals import *
except ImportError:
    print "Warning: pygame not found; will be using pyglet for stim presentation"
#pygame.mixer.pre_init(44100,-16,1, 4096) # pre-initialize to reduce the delay
try:
    from scipy import ndimage
except ImportError:
コード例 #22
0
from psychopy import prefs
import pyo
pyo.pa_get_input_devices()
prefs.general['audioLib'] = ['pyo']
prefs.general['audioDriver'] = ['MOTU Audio ASIO']

from psychopy import visual, sound, core, event, microphone, gui
from psychopy import logging, prefs

from psychopy import parallel
import time
port = parallel.ParallelPort(address=0xEFF8)

#Initiate the PsychPy window
win = visual.Window([1920, 1080])
sound.init(48000, buffer=512)

#logging.console.setLevel(logging.DEBUG)#get messages about the sound lib as it loads

import time, os
import sys

import numpy as np
import scipy.io.wavfile
from numpy import mean, sqrt, square
import math
import pandas as pd
import fnmatch
from guinam import Dlg

コード例 #23
0
ファイル: run_quilt_fmri.py プロジェクト: thompsonj/quilts
def initialize_run():
    """Initalize settings and log file for this run of the experiment.

    Returns
    -------
    settings : dict
        Contains various experimental settings such as MR imaging parameters

        subject : Subject code use for loading/saving data, e.g. 's1'
        run : integer from 1-20
        debug : If true, don't display in full-screen mode
        TR : Time between acquisitions
        volumes : Number of whole-brain 3D volumes to collect this run
        sync : Character to use as the sync timing event; assumed to come at
               start of a volume
        resp : binary array indicating which blocks should be followed by a
               response probe
        skip : Number of volumes lacking a sync pulse at start of scan (for T1
               stabilization)
        scan_sound : In test mode only, play a tone as a reminder of scanner
                     noise
    """
    logging.console.setLevel(logging.DEBUG)
    if prefs.general['audioLib'][0] == 'pyo':
        # if pyo is the first lib in the list of preferred libs then we could
        # use small buffer
        # pygame sound is very bad with a small buffer though
        sound.init(16384, buffer=128)
    print 'Using %s(with %s) for sounds' % (sound.audioLib, sound.audioDriver)

    # settings for launchScan:
    try:
        settings = fromFile('settings.pickle')
    except:
        settings = {
            'subject': 's0',  # Subject code use for loading/saving data
            'run': 1,  # int from 1-20
            'debug': True,  # If true, print extra info
            'TR': 1.7,  # Time between acquisitions
            'volumes': 371,  # Number of whole-brain 3D volumes / frames
            # this will be updated when known
            'sync': '5',  # Character to use as the sync timing event;
            # assumed to come at start of a volume
            'resp': ['0', '0', '0'],  # Blocks after which response is made
            'skip': 0,  # Number of volumes lacking a sync pulse at
            # start of scan (for T1 stabilization)
            'sound': True  # In test mode only, play a tone as a
            # reminder of scanner noise
        }
    # First, confirm subject number and run number
    subandrun = {'sub': settings['subject'], 'run': settings['run']}
    info_dlg = gui.DlgFromDict(subandrun)
    sub = subandrun['sub']
    run = subandrun['run']
    # Load order info from file
    run_info = np.load(path.join(sub, sub + '_run' + str(run) +
                                 'order.npy')).item()
    settings['subject'] = sub
    settings['run'] = run
    settings['volumes'] = int(run_info['vols'])
    settings['resp'] = run_info['resp']
    # Confirm all settings
    info_dlg = gui.DlgFromDict(settings,
                               title='settings',
                               order=['subject', 'run', 'volumes', 'debug'])
    # Save settings for next run
    if info_dlg.OK:
        next_settings = settings.copy()
        if settings['run'] == 20:
            next_settings['run'] = 1  # Reset run when experiment is over
        else:
            next_settings['run'] += 1  # Increment for the next run
        toFile('settings.pickle', next_settings)
    else:
        core.quit()
    sub = settings['subject']
    run = settings['run']
    # Load order info again incase sub/run was altered in previous dialog box
    run_info = np.load(path.join(sub, sub + '_run' + str(run) +
                                 'order.npy')).item()
    settings['stimuli'] = run_info['stimuli']
    settings['lang'] = run_info['lang']

    # Create dated log file
    date_str = time.strftime("%b_%d_%H%M", time.localtime())
    logfname = path.join(
        'logs', "%s_run%s_log_%s.log" %
        (settings['subject'], settings['run'], date_str))
    log = logging.LogFile(logfname, level=logging.INFO, filemode='a')

    return settings
コード例 #24
0
ファイル: soundStimuli.py プロジェクト: jgors/psychopy
"""
Sound stimuli are currently an area of development in PsychoPy

Previously we used pygame. Now the pyo library is also supported.
On OSX this is an improvement (using coreaudio rather than SDL). 
On windows this should help on systems with good sound cards, 
but this is yet to be confirmed. 
See the demo hardware>testSoundLatency too

"""
import sys
from psychopy import logging
logging.console.setLevel(logging.DEBUG)#get messages about the sound lib as it loads

from psychopy import sound,core, visual
sound.init(44100,buffer=128)
print 'Using %s(with %s) for sounds' %(sound.audioLib, sound.audioDriver)

highA = sound.Sound('A',octave=3, sampleRate=44100, secs=0.8, bits=8)
highA.setVolume(0.8)
tick = sound.Sound(800,secs=0.01,sampleRate=44100, bits=8)#sample rate ignored because already set
tock = sound.Sound('600',secs=0.01, sampleRate=44100)

highA.play()
core.wait(0.8)
tick.play()
core.wait(0.4)
tock.play()
core.wait(0.6)

if sys.platform=='win32':
コード例 #25
0
    def __init__(self, **kwargs):
        self.DIGIT_DISPLAY_TIME = kwargs.get('digit_display_time', 0.250)
        self.DIGIT_RANGE = kwargs.get('digit_range', (0, 9))
        self.DIGIT_SIZES = kwargs.get('digit_sizes', [1.8, 2.7, 3.5, 3.8, 4.5])
        self.TARGET_DIGIT = kwargs.get('target_digit',
                                       random.randint(*self.DIGIT_RANGE))
        self.NUM_DIGIT_SETS = kwargs.get('num_digit_sets', 25)
        self.MASK_TIME = kwargs.get('mask_time', 0.900)
        self.MASK_DIAMETER = kwargs.get('mask_diameter', 3.0)
        self.MAX_FAILS = kwargs.get('max_fails', 3)
        self.CORRECT_FREQ = kwargs.get('correct_freq', 440)
        self.WRONG_FREQ = kwargs.get('wrong_freq', 330)
        self.TONE_LENGTH = kwargs.get('tone_length', 0.5)
        self.SOUND_INIT_SAMPLES = kwargs.get('sound_init_samples', 48000)
        self.PRACTICE_DIGIT_SETS = kwargs.get('practice_digit_sets', 2)
        self.DATA_DIR = kwargs.get('data_dir', 'sart_data')
        self.MONITOR_RESOLUTION = kwargs.get('monitor_resolution', (1024, 768))
        self.FULLSCREEN = kwargs.get('fullscreen', True)

        # if the datadir doesn't exist, create it.
        if not os.path.isdir(self.DATA_DIR):
            try:
                os.mkdir(self.DATA_DIR)
            except Exception as e:
                print e.getMessage()
                print "Error: cannot create data directory: " + self.DATA_DIR
                sys.exit(1)

        # then, collect the subject's ID and text number. If the file already exists, prompt to confirm overwrite
        while True:
            subject_info = self.get_subject_info(sys.argv[1:])
            self.log_file = os.path.join(self.DATA_DIR,
                                         '_'.join(subject_info) + '.csv')

            if os.path.isfile(self.log_file):
                rename_dialog = gui.Dlg(title='Error: Log File Exists')
                rename_dialog.addText(
                    'A log file with this subject id ({0}) and test number {1} already exists. Overwrite?'
                    .format(*subject_info))
                rename_dialog.show()

                if rename_dialog.OK:
                    break
                else:
                    break
            else:
                break

        #self.log_file = open(self.log_file, "w")

        self.data = []

        # this is the basic data output format (to CSV)
        self.Datum = namedtuple(
            'Datum', ['trial', 'target', 'digit', 'success', 'rt', 'note'])

        sound.init(self.SOUND_INIT_SAMPLES, buffer=128)

        # init components for rest of experiment
        self.sound_correct = sound.Sound(value=self.CORRECT_FREQ,
                                         secs=self.TONE_LENGTH)
        self.sound_incorrect = sound.Sound(value=self.WRONG_FREQ,
                                           secs=self.TONE_LENGTH)

        self.window = visual.Window(self.MONITOR_RESOLUTION,
                                    monitor='testMonitor',
                                    units='cm',
                                    fullscr=self.FULLSCREEN)
        self.mouse = event.Mouse(win=self.window)

        self.MASTER_CLOCK = core.Clock(
        )  # this is never used, holdover from original code
        self.TIMER = core.Clock()
コード例 #26
0
from __future__ import division

from __future__ import print_function

import psychopy
from psychopy import visual, core, event, sound
from labjack import u3
import numpy, sys, platform

# setup window (can use for visual pulses)
win = visual.Window([800, 800], monitor='testMonitor')
win.recordFrameIntervals = False
stim = visual.GratingStim(win, color=-1, sf=0)

sound.init(rate=48000, buffer=48)
print('Using %s(with %s) for sounds' % (sound.audioLib, sound.audioDriver))
timeWithLabjack = True
maxReps = 100

# setup labjack U3
ports = u3.U3()
ports.__del__ = ports.close  # try to autoclose the ports if script crashes

# get zero value of FIO6
startVal = ports.getFIOState(6)  # is FIO6 high or low?
print('FIO6 is at', startVal, end='')
print('AIN0 is at', ports.getAIN(0))
if timeWithLabjack:
    print('OS\tOSver\taudioAPI\tPsychoPy\trate\tbuffer\tmean\tsd\tmin\tmax')
コード例 #27
0
ファイル: testSoundLatency.py プロジェクト: jgors/psychopy
(The PsychoPy team would be interested to hear how your measurements go)

"""

import psychopy
from psychopy import visual, core, event, sound
from labjack import u3
import numpy, sys, platform

#setup window (can use for visual pulses)
win = visual.Window([800,800], monitor='testMonitor')
win.setRecordFrameIntervals(False)
stim = visual.PatchStim(win, color=-1, sf=0)

sound.init(rate=44100, buffer=128)
timeWithLabjack=True
maxReps=500

#setup labjack U3
ports = u3.U3()
ports.__del__=ports.close#try to autoclose the ports if script crashes (not working?)

FIO4 = 6004 #use as trigger (to view on scope if desired)
FIO6 = 6006 #use to read in microphone
#get zero value of FIO6
startVal = ports.readRegister(FIO6)
print 'FIO6 is at', startVal

if timeWithLabjack:
    print 'OS\tOSver\taudioAPI\tPsychoPy\trate\tbuffer\tmean\tsd\tmin\tmax'