Example #1
0
def ett(plot_ax="Y", source=False, id_list="final", make_tight=True, print_title=True, linewidth=0.8, fontscale=0.5, isspec=False, make_sem=True):
    config = get_config_file(localpath=path.dirname(path.realpath(__file__))+'/')
	    
    #IMPORT VARIABLES
    if not source:
	    source = config.get('Source', 'source')
    data_path = config.get('Addresses', source)
    reaction_times = config.get('Addresses', 'reaction_times')
    #END IMPORT VARIABLES
    
    if plot_ax == "X":
	plot_axis = 'GazePointX'
	direction = "(towards stimulus)"
    if plot_ax == "Y":
	plot_axis = 'GazePointY'
	direction = "(up)"
    data_path = path.expanduser(data_path)
    rt_path = data_path + reaction_times
    
    files = [lefile for lefile in listdir(rt_path) if lefile.endswith('.tsv')]
    ids = [t.split('_',2)[0]+'_'+t.split('_',2)[1] for t in files]
    ids = np.unique(ids)
    eye_data_total = pd.DataFrame([])
    stimulus_datas = pd.DataFrame([])
    spec = ['6245247_f']
    h_dist = ['1236345_f','6779353_f','7310001_f','7714775_m','7816097_m','7865828_m','7922847_m']
    l_dist = ['1975801_m','4724273_f','6268973_m','8963557_f','8286497_m','8963557_m','9651558_m','8240877_m','6887665_m','5559429_f','8582941_f','8582941_m','9302438_f','4276763_f','3878418_m','3537898_f','1247497_f','8717741_m','4744495_f','7117377_m']
    best = ['1975801_m','4724273_f','6268973_m','8963557_f','8286497_m','8963557_m','6887665_m','5559429_f','8582941_f','9302438_f','1247497_f','4744495_f','7117377_m']
    test = ['chr1_f','chr2_f']
    if id_list=="final":
	id_list = l_dist
    
    for fileidx, fileid in enumerate(id_list):
	ratings = open_csv(rt_path+fileid+'_p')
	ratings = pd.DataFrame(ratings[1:], columns=ratings[0], dtype=float)
	ratings = ratings.groupby('picture').mean()    
	sorted_scores = sorted(ratings['score'])
	score_top, score_bottom = sorted_scores[-20], sorted_scores	[19]
	
	stimulus_data = pd.DataFrame
	stimulus_data = stimulus_data.from_csv(rt_path+fileid+'_wm.csv')
	stimulus_data['rateL'] = stimulus_data['rateL'].astype(np.float64)
	stimulus_data['RTL'] = stimulus_data['RTL'].astype(np.float64)
	stimulus_data['orderL'] = stimulus_data['orderL'].astype(np.float64)
	stimulus_data['rateR'] = stimulus_data['rateR'].astype(np.float64)
	stimulus_data['RTR'] = stimulus_data['RTR'].astype(np.float64)
	stimulus_data['orderR'] = stimulus_data['orderR'].astype(np.float64)
	stimulus_data['RT'] = stimulus_data['RT'].astype(np.float64)
	stimulus_data['session'] = stimulus_data['session'].astype(np.float64)
	stimulus_data = stimulus_data[stimulus_data['RT'] >=0]
	stimulus_data['block'] = ''
	stimulus_data.ix[(stimulus_data['rateL'] >= score_top) & (stimulus_data['rateR'] >= score_top), 'block'] = 'aa'
	stimulus_data.ix[(stimulus_data['rateL'] >= score_top) & (stimulus_data['rateR'] <= score_bottom), 'block'] = 'au'
	stimulus_data.ix[(stimulus_data['rateL'] <= score_bottom) & (stimulus_data['rateR'] >= score_top), 'block'] = 'ua'
	stimulus_data.ix[(stimulus_data['rateL'] <= score_bottom) & (stimulus_data['rateR'] <= score_bottom), 'block'] = 'uu'
	
	aa_trials = list(stimulus_data[(stimulus_data['block'] == 'aa')]['session'])
	au_trials = list(stimulus_data[(stimulus_data['block'] == 'au')]['session'])
	ua_trials = list(stimulus_data[(stimulus_data['block'] == 'ua')]['session'])
	uu_trials = list(stimulus_data[(stimulus_data['block'] == 'uu')]['session'])
	
	stimleft_trials = list(stimulus_data[(stimulus_data['isstimleft'] == True)]['session'])
	stimright_trials = list(stimulus_data[(stimulus_data['isstimleft'] == False)]['session'])
	stimatt_trials = list(stimulus_data[(stimulus_data['isstimleft'] == True) & (stimulus_data['rateL'] >= score_top) & (stimulus_data['rateR'] <= score_bottom)]['session'])
	stimatt_trials = stimatt_trials + list(stimulus_data[(stimulus_data['isstimleft'] == False) & (stimulus_data['rateL'] <= score_bottom) & (stimulus_data['rateR'] >= score_top)]['session'])
	stimNatt_trials = list(stimulus_data[(stimulus_data['isstimleft'] == False) & (stimulus_data['rateL'] >= score_top) & (stimulus_data['rateR'] <= score_bottom)]['session'])
	stimNatt_trials = stimNatt_trials + list(stimulus_data[(stimulus_data['isstimleft'] == True) & (stimulus_data['rateL'] <= score_bottom) & (stimulus_data['rateR'] >= score_top)]['session'])
	
	pat = 'TimeStamp	GazePointXLeft	GazePointYLeft	ValidityLeft	GazePointXRight	GazePointYRight	ValidityRight	GazePointX	GazePointY	Event'
	with open(rt_path+fileid+'_wmet.tsv') as infile:
	    eye_data = infile.read().split(pat)
	    eye_data = eye_data[1:] # remove header (describing date etc)
	    eye_data = [trial.split('\r\n') for trial in eye_data] # split at '\r'
	    for idx, i in enumerate(eye_data): # crop to 447 ACTUAL time frames - the first one is empty
		eye_data[idx] = i[:448]
	    for idx, trial in enumerate(eye_data):
		trial = [row.split('\t') for row in trial]
		eye_data[idx] = trial
	    eye_data = [name[1:] for name in eye_data] # crop the first, empty line
    
	    eye_data = np.array(eye_data)
	    eye_data = eye_data[:,:,[0,3,6,7,8]].astype(np.float64) # convert to float, we don't need separate eye coordinates
	    eye_data[:,:,3:] = eye_data[:,:,3:] / 2 - 0.5 # the integrated left-right gaze coordinates are the sum of the per-eye screen percentages - divide by 2 (2 eyes) and normalize to: 50% = 0
    
	    for a in np.arange(np.shape(eye_data)[0]): # assume that when neither of the eyes is detected the subject looks at the fixation
		for i in np.arange(np.shape(eye_data)[1]):
		    if eye_data[a,i,1] == 4 and eye_data[a,i,2] == 4:
			eye_data[a,i,3] = 0
			eye_data[a,i,4] = 0
    
	for i in stimleft_trials: # invert stimleft trial coordinates - equates 'right' with 'stimside'
	    eye_data[i,:,3:] = -eye_data[i,:,3:]
	eye_data = eye_data[:,:,[0,3,4]]  # we can't work with eye detection indices in the subsequent sumation, discard them here
	eye_data_aa = eye_data[aa_trials,:,:]
	eye_data_uu = eye_data[uu_trials,:,:]
	eye_data_uas = eye_data[stimatt_trials,:,:]
	eye_data_aus = eye_data[stimNatt_trials,:,:]
    
	eye_data_aa = np.sum(eye_data_aa, axis=0) / np.shape(eye_data_aa)[0]
	eye_data_uu = np.sum(eye_data_uu, axis=0) / np.shape(eye_data_uu)[0]
	eye_data_uas = np.sum(eye_data_uas, axis=0) / np.shape(eye_data_uas)[0]
	eye_data_aus = np.sum(eye_data_aus, axis=0) / np.shape(eye_data_aus)[0]
	
	eye_data_aa = pd.DataFrame(eye_data_aa, columns=['time','GazePointX','GazePointY'])
	eye_data_aa['stimuli'] = 'aa' 
	eye_data_uu = pd.DataFrame(eye_data_uu, columns=['time','GazePointX','GazePointY'])
	eye_data_uu['stimuli'] = 'uu' 
	eye_data_uas = pd.DataFrame(eye_data_uas, columns=['time','GazePointX','GazePointY'])
	eye_data_uas['stimuli'] = 'uas'
	eye_data_aus = pd.DataFrame(eye_data_aus, columns=['time','GazePointX','GazePointY'])
	eye_data_aus['stimuli'] = 'aus'
    
	eye_data = pd.concat([eye_data_aa, eye_data_uu, eye_data_uas, eye_data_aus])
	eye_data['ID'] = fileid
	eye_data["timepoint"] = eye_data.index
	if fileidx == 0:
	    eye_data_total = eye_data[["timepoint",'time','GazePointX','GazePointY','stimuli',"ID"]]
	else:
	    eye_data_total =  pd.concat([eye_data_total, eye_data[["timepoint",'time','GazePointX','GazePointY','stimuli',"ID"]]])
    
    # load reaction times (to plot as lines) here:
    conts = get_dataframes(id_list, rt_path)
    sa_reaction_time = conts[(conts['subblock'] == 'uas+sau')]['RT'].mean()*1000
    su_reaction_time = conts[(conts['subblock'] == 'aus+sua')]['RT'].mean()*1000
    aa_reaction_time = conts[(conts['block'] == 'aa')]['RT'].mean()*1000
    uu_reaction_time = conts[(conts['block'] == 'uu')]['RT'].mean()*1000
    # end load reaction times

    et_means = eye_data_total.groupby(["stimuli","timepoint"]).mean()
    if make_sem:
	et_sem = eye_data_total.groupby(["stimuli","timepoint"]).aggregate(sem)
     
    fig = figure(figsize=(3, 4), dpi=300, facecolor='#eeeeee', tight_layout=make_tight)
    ax1=fig.add_subplot(2,1,1)
    matplotlib.rcParams.update({'font.size': 12*fontscale})
    
    ax1.set_xlim(0, et_means['time'].max())
    tc = et_means.ix["aa"]['time']
    v = et_means.ix["aa"][plot_axis]
    ax1.plot(tc, v, color='g')
    if make_sem:
	se = et_sem.ix["aa"][plot_axis]/2
	ax1.fill_between(tc, v+se, v-se, facecolor="g", edgecolor="none", alpha=0.1, zorder=0)
    tc = et_means.ix["uu"]['time']
    v = et_means.ix["uu"][plot_axis]
    ax1.plot(tc, v, color='m')
    if make_sem:
	se = et_sem.ix["uu"][plot_axis]/2
	ax1.fill_between(tc, v+se, v-se, facecolor="m", edgecolor="none", alpha=0.1, zorder=0)
    legend(('Attractive - Attractive','Unattractive - Unattractive'), bbox_to_anchor=(0.94, 0.99), shadow=False, frameon=False, prop=FontProperties(size=str(9*fontscale)))
    ax1.axhline(0, color='k', alpha = 0.1, linewidth=linewidth)
    ax1.axvline(aa_reaction_time, color='g', alpha = 0.3, linewidth=linewidth)
    ax1.axvline(uu_reaction_time, color='m', alpha = 0.3, linewidth=linewidth)
    ax1.set_ylabel(plot_ax+'-axis % '+direction)
    ax1.set_xlabel('Time [ms]')
    
    ax2 = fig.add_subplot(212)
    ax2.set_xlim(0, et_means['time'].max())
    tc = et_means.ix["uas"]['time']
    v = et_means.ix["uas"][plot_axis]
    ax2.plot(tc, v, color='g')
    if make_sem:
	se = et_sem.ix["uas"][plot_axis]/2
	ax2.fill_between(tc, v+se, v-se, facecolor="g", edgecolor="none", alpha=0.1, zorder=0)
    tc = et_means.ix["aus"]['time']
    v = et_means.ix["aus"][plot_axis]
    ax2.plot(tc, v, color='m')
    if make_sem:
	se = et_means.ix["aus"][plot_axis]/2
	ax2.fill_between(tc, v+se, v-se, facecolor="m", edgecolor="none", alpha=0.1, zorder=0)
    legend(('Attractive on Target Side','Unattractive on Target Side'), bbox_to_anchor=(0.94, 0.99), shadow=False, frameon=False, prop=FontProperties(size=str(9*fontscale)))
    ax2.axhline(0, color='k', alpha = 0.1, linewidth=linewidth)
    ax2.axvline(sa_reaction_time, color='g', alpha = 0.3, linewidth=linewidth)
    ax2.axvline(su_reaction_time, color='m', alpha = 0.3, linewidth=linewidth)
    ax2.set_ylabel(plot_ax+'-axis % '+direction)
    ax2.set_xlabel('Time [ms]')
Example #2
0
globalpath = '~/Data/shared/2att/' #root of results
bh_results = 'bh/' # behavioural test results
cq_results = 'cq/' # questionnaire results

globalpath = path.expanduser(globalpath)
bhpath = globalpath + bh_results
cqpath = globalpath + cq_results

files = [lefile for lefile in listdir(bhpath) if lefile.endswith('.csv')]
ids = [t.split('_',2)[0]+'_'+t.split('_',2)[1] for t in files]
ids = np.unique(ids)
conts = pd.DataFrame([])

for i in ids:
	print(i)
	cont = open_csv(bhpath+i+'_p')
	cont = pd.DataFrame(cont[1:], columns=cont[0])
	cont['score'] = cont['score'].astype(np.float64)
	cont['RT'] = cont['RT'].astype(np.float64)
	cont['session'] = cont['session'].astype(np.float64)
	#cont.index = cont['picture']
	#cont = cont.pivot(index='session', columns='picture', values='score')
	#cont = pd.DataFrame({'count':cont.groupby('picture').size()}).reset_index()
	#lala = lambda x: (x - x.mean()) / x.std()
	#cont = cont.groupby(['picture'], as_index=True).mean()
	cont = cont.set_index('picture', 'session').sort()
	meas_id = [1,2,3] * int(len(cont.index) / 3)
	cont['measurement']=''
	cont = cont.reset_index().set_index(['measurement', meas_id])
	lemeans = pd.DataFrame([])
	lemeans['picture'] = ''
Example #3
0
def coni(source=False, make_tight=True, print_title = True, elinewidth=2, fontscale=1, isspec = False):
    config = get_config_file(localpath=path.dirname(path.realpath(__file__))+'/')
	    
    #IMPORT VARIABLES
    if not source:
	    source = config.get('Source', 'source')
    data_path = config.get('Addresses', source)
    reaction_times = config.get('Addresses', 'reaction_times')
    #END IMPORT VARIABLES
    
    data_path = path.expanduser(data_path)
    rt_path = data_path + reaction_times
    
    files = [lefile for lefile in listdir(rt_path) if lefile.endswith('.csv')]
    ids = [t.split('_',2)[0]+'_'+t.split('_',2)[1] for t in files]
    ids = np.unique(ids)
    conts = pd.DataFrame([])
    
    for i in id_list:
	ratings = open_csv(rt_path+i+'_p')
	ratings = pd.DataFrame(ratings[1:], columns=ratings[0], dtype=float)
	ratings = ratings.groupby('picture').mean()    
	sorted_scores = sorted(ratings['score'])
	score_top, score_bottom = sorted_scores[-20], sorted_scores	[19]

	cont = open_csv(rt_path+i+'_wm')
	cont = pd.DataFrame(cont[1:], columns=cont[0])
	cont['rateL'] = cont['rateL'].astype(np.float64)
	cont['RTL'] = cont['RTL'].astype(np.float64)
	cont['orderL'] = cont['orderL'].astype(np.float64)
	cont['rateR'] = cont['rateR'].astype(np.float64)
	cont['RTR'] = cont['RTR'].astype(np.float64)
	cont['orderR'] = cont['orderR'].astype(np.float64)
	cont['RT'] = cont['RT'].astype(np.float64)
	cont['session'] = cont['session'].astype(np.float64)
	cont = cont[cont['RT'] >=0]
	cont.ix[cont['isstimleft'] == 'False', 'isstimleft'] = False
	cont.ix[cont['isstimleft'] == 'True', 'isstimleft'] = True
	cont['ID'] = i	
	cont1 = cont[(cont['isstimleft'] == False) & (cont['keypress'] == 'right')]
	cont2 = cont[(cont['isstimleft'] == True) & (cont['keypress'] == 'left')]
	cont = pd.concat([cont1,cont2])
	cont['block'] = ''
	cont.ix[(cont['rateL'] >= score_top) & (cont['rateR'] >= score_top), 'block'] = 'aa'
	cont.ix[(cont['rateL'] >= score_top) & (cont['rateR'] <= score_bottom), 'block'] = 'au'
	cont.ix[(cont['rateL'] <= score_bottom) & (cont['rateR'] >= score_top), 'block'] = 'ua'
	cont.ix[(cont['rateL'] <= score_bottom) & (cont['rateR'] <= score_bottom), 'block'] = 'uu'
	conts = pd.concat([conts, cont], ignore_index=True)
	#cat1 = cont[cont['block']=='aa']
	#cat2 = cont[cont['block']=='uu']
	#print ttest_ind(cat1['RT'], cat2['RT'])
    
    ids = sorted(list(set(conts.set_index('ID').index)))
    pos_ids = np.arange(len(ids))
    
    meanscont = conts.groupby(['ID','block']).mean()
    meanscont = meanscont.reset_index()
    
    aa_means = conts[(conts['block'] == 'aa')].groupby('ID')['RT'].mean()
    aa_std = conts[(conts['block'] == 'aa')].groupby('ID')['RT'].aggregate(sem)
    aa_t_means = meanscont[(meanscont['block'] == 'aa')]['RT'].mean()
    aa_t_std = sem(meanscont[(meanscont['block'] == 'aa')]['RT'])
    
    uu_means = conts[(conts['block'] == 'uu')].groupby('ID')['RT'].mean()
    uu_std = conts[(conts['block'] == 'uu')].groupby('ID')['RT'].aggregate(sem)
    uu_t_means = meanscont[(meanscont['block'] == 'uu')]['RT'].mean()
    uu_t_std = sem(meanscont[(meanscont['block'] == 'uu')]['RT'])
    
    fig = figure(figsize=(pos_ids.max()*3, 4), dpi=300, facecolor='#eeeeee', tight_layout=make_tight)
    ax=fig.add_subplot(1,1,1)
    matplotlib.rcParams.update({'font.size': 12*fontscale})
    width = 0.3
    ax.yaxis.grid(True, linestyle='-', which='major', color='#dddddd',alpha=0.5, zorder=1)
    
    aa_bar = plt.bar(pos_ids, aa_means, width ,color='g', alpha=0.4, zorder=1, linewidth=0)
    aa_err = errorbar(pos_ids+(width/2), aa_means, yerr=aa_std, ecolor='0.55', elinewidth=elinewidth, capsize=0, linestyle='None', zorder=2)
    aa_t_bar = plt.bar(pos_ids[-1]+1, aa_t_means, width ,color='g', alpha=0.8, zorder=1, linewidth=0)
    aa_err = errorbar(pos_ids[-1]+1+(width/2), aa_t_means, yerr=aa_t_std, ecolor='0.1', elinewidth=elinewidth, capsize=0, linestyle='None', zorder=2)
    
    uu_bar = plt.bar(pos_ids+width, uu_means, width ,color='m', alpha=0.4, zorder=1, linewidth=0)
    uu_err = errorbar(pos_ids+(width*3/2), uu_means, yerr=uu_std, ecolor='0.55', elinewidth=elinewidth, capsize=0, linestyle='None', zorder=2)
    uu_t_bar = plt.bar(pos_ids[-1]+1+width, uu_t_means, width ,color='m', alpha=0.8, zorder=1, linewidth=0)
    uu_err = errorbar(pos_ids[-1]+1+(width*3/2), uu_t_means, yerr=uu_t_std, ecolor='0.1', elinewidth=elinewidth, capsize=0, linestyle='None', zorder=2)
    
    ids=ids+['TOTAL  '] # blank space at the end so that it doesn't overlap with the x-axis
    pos_ids = np.arange(len(ids))
    ax.set_xlim(0, pos_ids.max())
    ax.set_ylim(0, 1.1)
    ax.set_ylabel(r'$\mathsf{\overline{RT}}$ [s]')
    ax.set_xlabel('Participant ID')
    ax.set_xticks(pos_ids + width)
    ax.set_xticklabels(ids,fontsize=8*fontscale,rotation=90)
    for tick in ax.axes.get_xticklines():
	tick.set_visible(False)
    axis.Axis.zoom(ax.xaxis, -0.5)
    legend((aa_t_bar,uu_t_bar),('Only attractive faces','Only unattractive faces'), bbox_to_anchor=(0.92, 1), shadow=False, frameon=False, prop=FontProperties(size=str(11*fontscale)))
    return meanscont