コード例 #1
0
 def load(self, fileName=''):
     """read and return header + row x col data from a pickle file
     """
     if not fileName:
         fileName = self.defaultFileName
     if not os.path.isfile(fileName):
         _base = os.path.basename(fileName)
         fullPathList = gui.fileOpenDlg(tryFileName=_base,
                                        allowed="All files (*.*)|*.*")
         if fullPathList:
             fileName = fullPathList[0]  # wx.MULTIPLE -> list
     if os.path.isfile(fileName) and fileName.endswith('.pkl'):
         f = open(fileName, 'rb')
         # Converting newline characters.
         if PY3:
             # 'b' is necessary in Python3 because byte object is 
             # returned when file is opened in binary mode.
             buffer = f.read().replace(b'\r\n',b'\n').replace(b'\r',b'\n')
         else:
             buffer = f.read().replace('\r\n','\n').replace('\r','\n')
         contents = pickle.loads(buffer)
         f.close()
         if self.parent:
             self.parent.conditionsFile = fileName
         return contents
     elif not os.path.isfile(fileName):
         print('file %s not found' % fileName)
     else:
         print('only .pkl supported at the moment')
コード例 #2
0
ファイル: _psychopyApp.py プロジェクト: harmadillo/psychopy
    def csvFromPsydat(self, evt=None):
        from psychopy import gui
        from psychopy.tools.filetools import fromFile

        names = gui.fileOpenDlg(allowed='*.psydat',
                    prompt=_translate("Select .psydat file(s) to extract"))
        for name in names or []:
            filePsydat = os.path.abspath(name)
            print("psydat: {0}".format(filePsydat))

            exp = fromFile(filePsydat)
            if filePsydat.endswith('.psydat'):
                fileCsv = filePsydat[:-7]
            else:
                fileCsv = filePsydat
            fileCsv += '.csv'
            exp.saveAsWideText(fileCsv)
            print('   -->: {0}'.format(os.path.abspath(fileCsv)))
コード例 #3
0
ファイル: dlgsConditions.py プロジェクト: papr/psychopy
 def load(self, fileName=''):
     """read and return header + row x col data from a pickle file
     """
     if not fileName:
         fileName = self.defaultFileName
     if not os.path.isfile(fileName):
         fullPathList = gui.fileOpenDlg(tryFileName=os.path.basename(fileName),
                         allowed="All files (*.*)|*.*")
         if fullPathList:
             fileName = fullPathList[0] # wx.MULTIPLE -> list
     if os.path.isfile(fileName) and fileName.endswith('.pkl'):
         f = open(fileName)
         contents = cPickle.load(f)
         f.close()
         if self.parent:
             self.parent.conditionsFile = fileName
         return contents
     elif not os.path.isfile(fileName):
         print('file %s not found' % fileName)
     else:
         print('only .pkl supported at the moment')
コード例 #4
0
ファイル: resaveData.py プロジェクト: Complex501/posner
"""This is not a particularly useful example of saving out
csv files from a set of psydat files
"""
from os import path
from psychopy import misc, gui

#select some files to use
filenames = gui.fileOpenDlg(allowed="*.psydat")

#loop through the files
for thisFilename in filenames:
    #get a new name
    fileNoExt, fileExt = path.splitext(thisFilename)
    newName = fileNoExt+"NEW.csv"
    #load and save
    dat = misc.fromFile(thisFilename)
    dat.saveAsWideText(newName)
    print 'saved', newName
コード例 #5
0
        'subject':'1', 
        'session': 1, 
        'skipPrompts':False, 
        'paramsFile':['DEFAULT','Load...']}
# overwrite params struct if you just saved a new parameter set
if saveParams:
    expInfo['paramsFile'] = [newParamsFilename,'Load...']

#present a dialogue to change select params
dlg = gui.DlgFromDict(expInfo, title=scriptName, order=['subject','session','skipPrompts','paramsFile'])
if not dlg.OK:
    core.quit() # the user hit cancel, so exit

# find parameter file
if expInfo['paramsFile'] == 'Load...':
    dlgResult = gui.fileOpenDlg(prompt='Select parameters file',tryFilePath=os.getcwd(),
        allowed="PICKLE files (.psydat)|.psydat|All files (.*)|")
    expInfo['paramsFile'] = dlgResult[0]
# load parameter file
if expInfo['paramsFile'] not in ['DEFAULT', None]: # otherwise, just use defaults.
    # load params file
    params = fromFile(expInfo['paramsFile'])


# transfer skipPrompts from expInfo (gui input) to params (logged parameters)
params['skipPrompts'] = expInfo['skipPrompts']

# print params to Output
print('params = {')
for key in sorted(params.keys()):
    print("   '%s': %s"%(key,params[key])) # print each value as-is (no quotes)
print('}')
コード例 #6
0
#!/usr/bin/env python

#This analysis script takes one or more staircase datafiles as input from a GUI
#It then plots the staircases on top of each other on the left 
#and a combined psychometric function from the same data
#on the right
#

from psychopy import data, gui, core
from psychopy.tools.filetools import fromFile
import pylab, scipy

files = gui.fileOpenDlg('.')
if not files:
    core.quit()

#get the data from all the files
allIntensities, allResponses = [],[]
for thisFileName in files:
    thisDat = fromFile(thisFileName)
    assert isinstance(thisDat, data.StairHandler)
    allIntensities.append( thisDat.intensities )
    allResponses.append( thisDat.data )
    
#plot each staircase
pylab.subplot(121)
colors = 'brgkcmbrgkcm'
lines, names = [],[]
for fileN, thisStair in enumerate(allIntensities):
    #lines.extend(pylab.plot(thisStair))
    #names = files[fileN]
コード例 #7
0
        'set6_y':'G',
        'set1_n':'H',
        'set2_n':'I',
        'set3_n':'J',
        'set4_n':'K',
        'set5_n':'L',
        'set6_n':'M',
        }

#libs for handling excel files:
from openpyxl.workbook import Workbook
from openpyxl.writer.excel import ExcelWriter
from openpyxl.cell import get_column_letter

#use a file open dialog to choose the files to include
files = gui.fileOpenDlg(tryFilePath=".", allowed='*.psydat')
if not files:#user pressed cancel
    core.quit()
    
xlBook = Workbook()
xlsxWriter = ExcelWriter(workbook = xlBook)
xlSheet = xlBook.worksheets[0]#use the first worksheet (index 0)
xlSheet.title = groupName
#make a header row
for condition in outCols.keys():
    xlSheet.cell(outCols[condition]+'1').value = condition
    
outRow = 2#starting row for data
#do the actual analysis, file-by-file
condNames = []
for fileName in files:
コード例 #8
0
        'subject':'1', 
        'session': 1, 
        'skipPrompts':False, 
        'paramsFile':['DEFAULT','Load...']}
# overwrite params struct if you just saved a new parameter set
if saveParams:
    expInfo['paramsFile'] = [newParamsFilename,'Load...']

#present a dialogue to change select params
dlg = gui.DlgFromDict(expInfo, title=scriptName, order=['subject','session','skipPrompts','paramsFile'])
if not dlg.OK:
    core.quit() # the user hit cancel, so exit

# find parameter file
if expInfo['paramsFile'] == 'Load...':
    dlgResult = gui.fileOpenDlg(prompt='Select parameters file',tryFilePath=os.getcwd(),
        allowed="PICKLE files (.pickle)|.pickle|All files (.*)|")
    expInfo['paramsFile'] = dlgResult[0]
# load parameter file
if expInfo['paramsFile'] not in ['DEFAULT', None]: # otherwise, just use defaults.
    # load params file
    params = fromFile(expInfo['paramsFile'])


# transfer skipPrompts from expInfo (gui input) to params (logged parameters)
params['skipPrompts'] = expInfo['skipPrompts']

# print params to Output
print 'params = {'
for key in sorted(params.keys()):
    print "   '%s': %s"%(key,params[key]) # print each value as-is (no quotes)
print '}'
コード例 #9
0
            for s in range(0, diff):
                timewarp.append([verboseMatrix[-1][7], 0])

    stats = {'WeightedPercentageAgreement': WPA(timewarp, timewarp2), 'CohensKappa': cohensKappa(timewarp, timewarp2),
             'AverageObserverAgreement': avgObsAgree(timewarp, timewarp2), 'PearsonsR': pearsonR(verboseMatrix, verboseMatrix2)}
    return stats

ready = False
startDlg = gui.Dlg(title='PyHab reliability calculator')
startDlg.addText('Subject info')
startDlg.addField('Subject Number: ')
startDlg.addField('Subject ID: ')
startDlg.addText('Click OK to select the two verbose files (the file select window may take a while to load)')
startDlg.show()
if startDlg.OK:
    dlg1 = gui.fileOpenDlg()
    print(dlg1)
    if type(dlg1) is not type(None):
        dlg2 = gui.fileOpenDlg()
        if type(dlg2) is not type(None):
            ready = True
if ready:
    thisInfo = startDlg.data
    VF1 = csv.reader(open(dlg1[0], 'rU')) 
    VF2 = csv.reader(open(dlg2[0], 'rU'))
    Verb1=[]
    Verb2=[]
    for row in VF1:
        Verb1.append(row)
    for row in VF2:
        Verb2.append(row)
コード例 #10
0
# An output structure for aligning with acquired imaging frames and storing stimulus
# meta data
now = datetime.now()
dt_string = now.strftime("%Y%m%d_%H%M%S")
meta = {
    "nidaq" : use_nidaq,
    "proj_params" : proj_params,
    "date_time" : dt_string
}
outputObj = OutputInfo(meta=meta)



#%% Reading stimulus information & generating epochs
# Ask user where the stim input file is located
stim_fname = gui.fileOpenDlg(os.getcwd())[0]
# Pre-organizing the routine and epochs
routine = PyStimRoutine(stim_fname)
print('Stimulus routine with {eN} epochs is generated...'.format(\
    eN = routine.total_epoch_n))

#%% We need a monitor to present the stimulus
mon = monitors.Monitor(proj_params["monitorName"])
mon.setSizePix = proj_params["monitorSizePix"]
mon.setWidth = proj_params["monitorWidthcm"]
mon.setDistance = proj_params["observerDistancecm"]
refresh_rate = proj_params["monitorRefreshRate"]

win = visual.Window(
    size=(proj_params['win_size_pix'], 
    proj_params['win_size_pix']), fullscr=False, 
コード例 #11
0
# if viewing distance is large, use 768 ... if not, use 384
stimSize = 768
#stimSize = 384

################### Timing info ####################
stimDuration = 3  #seconds
fixationBaseTime = 0  #seconds
fixationDelta = 0.7  #seconds
responseTimeout = 7  #seconds

################### Monitor setup ######################
localMonitors = monitors.getAllMonitors()

################## Subject setup #################
#load previous responses
files = gui.fileOpenDlg(tryFilePath='response_data', allowed='*.txt')

if len(files) == 0:
    #user cancelled--abort
    core.quit()
else:
    #get subject number from filename
    (oldFilePath, oldFile) = os.path.split(files[0])
    fileNameParts = oldFile.split('_')
    subject = fileNameParts[1]
    runNum = fileNameParts[2][3:]
    with open(os.path.join('response_data', oldFile), 'rt') as fileIn:
        #loop through lines and keep image numbers for which the subject responded "yes"
        inputObject = csv.reader(fileIn)
        keepImageNum = 0
        keepImagesList = []
コード例 #12
0
ファイル: imagesBasic.py プロジェクト: TEParsons/workshops
from os import path
from psychopy import gui
from PIL import Image
filenames = gui.fileOpenDlg(allowed="*.*")
for thisFilename in filenames:
    print(thisFilename)
    fileNoExt, fileExt = path.splitext(thisFilename)
    thisImg = Image.open(thisFilename)
    thisImg.save(fileNoExt + 'NEW.jpg')

コード例 #13
0
from os import path
from psychopy import misc, gui
import pandas as pd
import scipy
from scipy import stats

#choose some data files to analyse
filenames = gui.fileOpenDlg(allowed="*.csv")

#loop through the files
for thisFilename in filenames:
    print thisFilename
    thisDat = pd.read_csv(thisFilename)

    #filter out bad data
    filtered = thisDat[thisDat['rt'] <= 1.0]
    filtered = filtered[filtered['corr'] == 1]

    #separate conflicting from congruent reaction times
    conflict = filtered[filtered.description == 'conflict']
    congruent = filtered[filtered.description != 'conflict']
    #get mean/std.dev
    meanConfl = scipy.mean(conflict.rt)
    semConfl = scipy.std(conflict.rt, ddof=1)
    meanCongr = scipy.mean(congruent.rt)
    semCongr = scipy.std(congruent.rt, ddof=1)
    print "Conflict = %.3f (std=%.3f)" % (meanConfl, semConfl)
    print "Congruent = %.3f (std=%.3f)" % (meanCongr, semCongr)

    #run a t-test
    t, p = stats.ttest_ind(conflict.rt, congruent.rt)
コード例 #14
0
import csv, codecs
from psychopy import gui

filename = gui.fileOpenDlg('.', allowed='*.csv')[0]
        
#use csv from python (not from numpy) due to handling newlines within quote char
with open(filename, 'rU') as csvFile:
    spamreader = csv.reader(csvFile, delimiter=',', quotechar='"', dialect=csv.excel)
    headers = spamreader.next()
    print 'headers:', type(headers), headers
    entries=[]
    for thisRow in spamreader:
        print thisRow
        thisEntry = {}
        for fieldN, thisFieldName in enumerate(headers):
            thisEntry[thisFieldName] = thisRow[fieldN]
        entries.append(thisEntry)

companHead="Your Company or Institution"
nameHead='Your name (or anon, but a name is nicer)'
testimHead='Your thoughts on PsychoPy'
posnHead = 'Your position'


with open('testimonialsText.html', 'wb') as outFile:
    for thisEntry in entries:
        outFile.write('    <hr>%s <p>\n' %(thisEntry[testimHead].replace('\n', '<br>')))
        nameStr = '    - <em>%s' %thisEntry[nameHead]
        if thisEntry[posnHead]:
            nameStr += ', %s' %thisEntry[posnHead]
        if thisEntry[companHead]:
コード例 #15
0
expInfo = {'participant': '', 'session': '001'}
dlg = gui.DlgFromDict(dictionary=expInfo, sortKeys=False, title=expName)
if dlg.OK == False:
    core.quit()  # user pressed cancel
    
expInfo['date'] = data.getDateStr()  # add a simple timestamp
expInfo['expName'] = expName
expInfo['psychopyVersion'] = psychopyVersion

# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['participant'], expName, expInfo['date'])


# user interface to open up a condition file
    # a list object, save the path(including the name) of the file selected, conditionFile[0] gives the file name string
conditionFile = gui.fileOpenDlg(".", prompt = "Please Select the Condition File", allowed = "*.csv") 
if not conditionFile:  # if there is no selection, quit experiment
    core.quit
    


# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
    extraInfo=expInfo, runtimeInfo=None,
    savePickle=True, saveWideText=True,
    dataFileName=filename)
# save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING)    # this outputs to the screen, not a file

endExpNow = False  # flag for 'escape' or other condition => quit the exp
コード例 #16
0
from __future__ import print_function
from builtins import next
import csv, codecs
from psychopy import gui

filename = gui.fileOpenDlg('.', allowed='*.csv')[0]
        
#use csv from python (not from numpy) due to handling newlines within quote char
with open(filename, 'rU') as csvFile:
    spamreader = csv.reader(csvFile, delimiter=',', quotechar='"', dialect=csv.excel)
    headers = next(spamreader)
    print('headers:', type(headers), headers)
    entries=[]
    for thisRow in spamreader:
        print(thisRow)
        thisEntry = {}
        for fieldN, thisFieldName in enumerate(headers):
            thisEntry[thisFieldName] = thisRow[fieldN]
        entries.append(thisEntry)

companHead="Your Company or Institution"
nameHead='Your name (or anon, but a name is nicer)'
testimHead='Your thoughts on PsychoPy'
posnHead = 'Your position'


with open('testimonialsText.html', 'wb') as outFile:
    for thisEntry in entries:
        outFile.write('    <hr>%s <p>\n' %(thisEntry[testimHead].replace('\n', '<br>')))
        nameStr = '    - <em>%s' %thisEntry[nameHead]
        if thisEntry[posnHead]:
コード例 #17
0
import analysis_utils as ana

contrast = []
correct = []

if __name__=="__main__":

    bootstrap_n = 1000

    #Weibull params set in analysis_utils: The guessing rate is 0.25 for 4afc
    guess = 0.5
    flake = 0.01
    slope = 3.5

    file_names = gui.fileOpenDlg(tryFilePath='./data')

    for file_idx, file_name in enumerate(file_names):
        print file_idx
        if file_idx == 0:
            file_stem =  file_name.split('/')[-1].split('.')[0]
        else:
            file_stem = file_stem + file_name[-8]
        p, l, data_rec = ana.get_data(str(file_name))
        trials_per_condition = float(p[' trials_per_block'])*(float(p[' num_blocks'])/2.0)
        print trials_per_condition
        contrast = np.ones([len(file_names)*trials_per_condition,1])
        correct = np.ones([len(file_names)*trials_per_condition,1])
        data_rec = csv2rec(file_name)
        contrast_this_run = data_rec['annulus_target_contrast']
        correct_this_run = data_rec['correct']
コード例 #18
0
import pandas as pd
from random import choice
from numpy.random import choice as choice2
import numpy as np
import random
import csv
import pandas
import matplotlib
import matplotlib.pyplot as plt
from psychopy.tools.filetools import fromFile
import pylab
import os
from operator import truediv

#Open Dialog for file
files = gui.fileOpenDlg('./results/')
file = files[0]

full_filename = os.path.splitext(os.path.basename(file))
filename = full_filename[0]

save_dir = os.path.dirname(file)
data = pandas.read_csv(file)

clean_data = data[data['correct'] != 'Rest']
only_correct = clean_data[clean_data['correct'] == '1'].groupby(
    ['trial_type', 'strength']).count()
all_data = clean_data.groupby(['trial_type', 'strength']).count()

percentages_correct = only_correct / all_data
コード例 #19
0
def main(file_name=None):
    """ Run the analysis on data in a file"""

    # Define these two within the scope of main:
    def func(pars):
        a, b, c = pars
        return a * (x ** 2) + b * x + c

    def errfunc(pars):
        return y - func(pars)

    if file_name is None:
        # path_to_files = '/Volumes/Plata1/Shared/Ariel/texture_data/'
        file_name = fileOpenDlg()[0]

    p, l, data_rec = utils.get_data(file_name)

    # For backwards compatibility, check if this variable exists:
    if "eye_moved" in l:
        data_rec = data_rec[np.where(data_rec["eye_moved"] == 0)]

    neutral = data_rec[np.where(data_rec["neutral"])]
    peripheral = data_rec[np.where(data_rec["neutral"] == 0)]
    cond_str = ["Neutral", "Cued"]
    colors = ["b", "r"]
    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)

    print("SOA used was: %s msec" % (1000 * p[" texture_dur"]))
    print("% correct: ")

    for cond_idx, cond_rec in enumerate([neutral, peripheral]):
        correct = cond_rec["correct"]
        ecc = cond_rec["target_ecc"]

        # Bin the eccentricities:
        a = np.floor(ecc)
        eccs_used = np.unique(a)

        # Initialize counters:
        b = np.zeros(len(eccs_used))
        c = np.zeros(len(eccs_used))

        # Loop over the trials and add the correct to one counter and the number of
        # trials to the other:
        for i in xrange(len(correct)):
            idx = np.where(eccs_used == np.floor(ecc[i]))
            b[idx] += correct[i]
            c[idx] += 1.0

        p_correct = b / c
        print("%s: %s " % (cond_str[cond_idx], np.mean(p_correct) * 100))

        for i, p in enumerate(p_correct):
            ax.plot(eccs_used[i], p, "o", color=colors[cond_idx], markersize=c[i])

        x = []
        y = []

        for i, this_ecc in enumerate(eccs_used):
            x = np.hstack([x, c[i] * [this_ecc]])
            y = np.hstack([y, c[i] * [p_correct[i]]])

        guess = 1, 1, 1
        fit, mesg = leastsq(errfunc, guess)
        x = np.arange(0, np.max(x), 0.01)
        ax.plot(x, func(fit), "--", color=colors[cond_idx], label=cond_str[cond_idx])

    ax.legend()
    ax.set_xlim([-1, 13])
    ax.set_ylim([0, 1.1])
    ax.set_xlabel("Eccentricity (degrees)")
    ax.set_ylabel("Proportion correct responses")

    fig_name = "figures/" + file_name.split(".")[0].split("/")[-1] + ".png"
    fig.savefig(fig_name)
    os.system("open %s" % fig_name)
コード例 #20
0
        'sendPortEvents': True,
        'startAtRun': ['1','2','3'],
        'paramsFile':['DEFAULT','Load...']}
# overwrite params struct if you just saved a new parameter set
if saveParams:
    expInfo['paramsFile'] = [newParamsFilename,'Load...']

#present a dialogue to change select params
dlg = gui.DlgFromDict(expInfo, title=scriptName, order=['subject','session','version','skipPrompts','sendPortEvents','paramsFile'])
if not dlg.OK:
    core.quit() # the user hit cancel, so exit

expInfo['paramsFile'] = ''.join(expInfo['paramsFile']) # convert from list of characters to string
# find parameter file
if expInfo['paramsFile'] == 'Load...':
    dlgResult = gui.fileOpenDlg(prompt='Select parameters file',tryFilePath=os.getcwd(),
        allowed="PSYDAT files (*.psydat);;All files (*.*)")
    expInfo['paramsFile'] = dlgResult[0]
# load parameter file
if expInfo['paramsFile'] not in ['DEFAULT', None]: # otherwise, just use defaults.
    # load params file
    print(expInfo)
    params = fromFile(expInfo['paramsFile'])


# transfer experimental flow items from expInfo (gui input) to params (logged parameters)
for flowItem in ['skipPrompts','sendPortEvents']:
    params[flowItem] = expInfo[flowItem]


# print params to Output
print('params = {')
コード例 #21
0
#!/usr/bin/env python

# This analysis script takes one or more staircase datafiles as input from a GUI
# It then plots the staircases on top of each other on the left
# and a combined psychometric function from the same data
# on the right
#

from psychopy import data, gui, misc, core, compatibility
import pylab, scipy
import numpy as np

files = gui.fileOpenDlg(".")
if not files:
    core.quit()

# get the data from all the files
allIntensities, allResponses = [], []
for thisFileName in files:
    thisDat = compatibility.fromFile(thisFileName)
    assert isinstance(thisDat, data.StairHandler)
    allIntensities.append(thisDat.intensities)
    allResponses.append(thisDat.data)

# plot each staircase
pylab.subplot(121)
colors = "brgkcmbrgkcm"
lines, names = [], []
for fileN, thisStair in enumerate(allIntensities):
    # lines.extend(pylab.plot(thisStair))
    # names = files[fileN]
コード例 #22
0
def load_subject():
    filename = gui.fileOpenDlg(tryFilePath='data', allowed='*.pkl')[0]
    with open(filename, 'rb') as fp:
        subject = pickle.load(fp)
    return subject
コード例 #23
0
def main(file_name=None):
    """ Run the analysis on data in a file"""

    # Define these two within the scope of main:
    def func(pars):
        a,b,c = pars
        return a*(x**2) + b*x + c 

    def errfunc(pars):
        return y-func(pars)

    if file_name is None: 
        #path_to_files = '/Volumes/Plata1/Shared/Ariel/texture_data/'
        file_name =  fileOpenDlg()[0]
    
    p,l,data_rec = utils.get_data(file_name)
    neutral = data_rec[np.where(data_rec['neutral'])]
    peripheral = data_rec[np.where(data_rec['neutral']==0)]
    cond_str = ['Neutral', 'Cued']
    colors = ['b','r']
    fig = plt.figure()
    ax = fig.add_subplot(1,1,1)

    print("SOA used was: %s msec"%(1000*p[' texture_dur']))
    print("% correct: ")

    for cond_idx,cond_rec in enumerate([neutral,peripheral]):
        correct = cond_rec['correct']
        ecc = cond_rec['target_ecc']

        #Bin the eccentricities: 
        a = np.floor(ecc)
        eccs_used = np.unique(a)

        #Initialize counters: 
        b = np.zeros(len(eccs_used))
        c = np.zeros(len(eccs_used))

        #Loop over the trials and add the correct to one counter and the number of
        #trials to the other: 
        for i in xrange(len(correct)):
                idx = np.where(eccs_used==np.floor(ecc[i]))
                b[idx]+=correct[i]
                c[idx]+=1.0

        p_correct = b/c
        print("%s: %s "%(cond_str[cond_idx], np.mean(p_correct)*100))
        
        for i,p in enumerate(p_correct):
                ax.plot(eccs_used[i],p,'o',color=colors[cond_idx],markersize=c[i])

        x = []
        y = []

        for i,this_ecc in enumerate(eccs_used):
            x = np.hstack([x,c[i]*[this_ecc]])    
            y = np.hstack([y,c[i]*[p_correct[i]]])

        guess = 1,1,1
        fit, mesg = leastsq(errfunc,guess)
        x = np.arange(0,np.max(x),0.01)        
        ax.plot(x,func(fit),'--',color=colors[cond_idx],
                label=cond_str[cond_idx])
        
    ax.legend()
    ax.set_xlim([-1,13])
    ax.set_ylim([0,1.1])
    ax.set_xlabel('Eccentricity (degrees)')
    ax.set_ylabel('Proportion correct responses')

    fig_name = 'figures/' + file_name.split('.')[0].split('/')[-1] + '.png'
    fig.savefig(fig_name)
    os.system('open %s'%fig_name)
コード例 #24
0
"""Use this script to analyse data from the gammaMotionNull.py
script. 

Instructions: From the dialogue box select multiple staircases (Cmd-click
or shift-click) to plot the results
"""

#analyse standard staircase data
import matplotlib

matplotlib.use('TKAgg')
from psychopy import data, gui, misc, core
import pylab
import numpy as num

files = gui.fileOpenDlg('.')
if not files:
    core.quit()

#get the data from all the files
allIntensities, allResponses = [], []
for thisFileName in files:
    thisDat = misc.fromFile(thisFileName)
    assert isinstance(thisDat, data.StairHandler)
    allIntensities.append(thisDat.intensities)
    allResponses.append(thisDat.data)

#plot each staircase
pylab.subplot(121)
lines, names = [], []
for fileN, thisStair in enumerate(allIntensities):
コード例 #25
0
ファイル: analyse.py プロジェクト: MWiechmann/posner
from os import path
from psychopy import misc, gui
import pandas as pd
import scipy
from scipy import stats

#choose some data files to analyse
filenames = gui.fileOpenDlg(allowed="*.csv")

#loop through the files
for thisFilename in filenames:
    print thisFilename
    thisDat = pd.read_csv(thisFilename)
    
    #filter out bad data
    filtered = thisDat[ thisDat['rt']<=1.0 ]
    filtered = filtered[ filtered['corr']==1 ]
    
    #separate conflicting from congruent reaction times
    conflict = filtered[filtered.description == 'conflict']
    congruent = filtered[filtered.description != 'conflict']
    #get mean/std.dev
    meanConfl = scipy.mean(conflict.rt)
    semConfl = scipy.std(conflict.rt, ddof=1)
    meanCongr = scipy.mean(congruent.rt)
    semCongr = scipy.std(congruent.rt, ddof=1)
    print "Conflict = %.3f (std=%.3f)" %(meanConfl, semConfl)
    print "Congruent = %.3f (std=%.3f)" %(meanCongr, semCongr)
    
    #run a t-test
    t, p = stats.ttest_ind(conflict.rt, congruent.rt)
コード例 #26
0
ファイル: 2dfft.py プロジェクト: RSharman/Experiments
# 2D Fast Fourier Transform on the lum, lm and s components of natural scenes - March 2011

#IF YOU DO NOT CHANGE THE DESTINATION FILE NAME, IT WILL OVER WRITE ANY
#EXISTING FILE WITH THE SAME NAME

from psychopy import misc, core, visual, event, data, gui
from scipy import fftpack
import scipy, Image, copy, pylab, glob
import numpy as np
import radialProfile, radial_data
from openpyxl.workbook import Workbook
from openpyxl.writer.excel import ExcelWriter
#
#files = glob.glob('C:\Documents and Settings\lpxrs\My Documents\Colour Data\Image Statistics\McGill Image Library/Textures/*.tif')
#files = gui.fileOpenDlg('.', allowed = "JPEG files (*.jpg) | *.jpg")
files = gui.fileOpenDlg('.', allowed = "TIFF files (*.tif) | *.tif")
if not files:
    core.quit()

counter = 2

#Name of file where data will be saved
#fName = "Textures2.xlsx"

#Create Excel File
wb = Workbook()
ew = ExcelWriter(workbook = wb)

ws = wb.get_active_sheet()
ws.title = "Raw Data"