Example #1
0
def test_io():

    refdm = DataMatrix(length=3)
    refdm[u'tést'] = 1, 2, u''
    refdm.B = u'mathôt', u'b', u'x'
    refdm.C = u'a,\\b"\'c', 8, u''

    testdm = io.readtxt('testcases/data/data.csv')
    check_dm(refdm, testdm)
    io.writetxt(testdm, 'tmp.csv')
    testdm = io.readtxt('tmp.csv')
    check_dm(refdm, testdm)

    refdm = io.readtxt('testcases/data/line-ending-cr.csv')
    check_dm(refdm, testdm)
    refdm = io.readtxt('testcases/data/line-ending-crlf.csv')
    check_dm(refdm, testdm)

    io.writepickle(testdm, 'tmp.pickle')
    testdm = io.readpickle('tmp.pickle')
    check_dm(refdm, testdm)

    io.writexlsx(testdm, 'tmp.xlsx')
    testdm = io.readxlsx('tmp.xlsx')
    check_dm(refdm, testdm)
Example #2
0
def test_io():

    refdm = DataMatrix(length=3)
    refdm[u'tést'] = 1, 2, u''
    refdm.B = u'mathôt', u'b', u'x'
    refdm.C = u'a,\\b"\'c', 8, u''

    testdm = io.readtxt('testcases/data/data.csv')
    check_dm(refdm, testdm)
    io.writetxt(testdm, 'tmp.csv')
    testdm = io.readtxt('tmp.csv')
    check_dm(refdm, testdm)

    refdm = io.readtxt('testcases/data/line-ending-cr.csv')
    check_dm(refdm, testdm)
    refdm = io.readtxt('testcases/data/line-ending-crlf.csv')
    check_dm(refdm, testdm)
    refdm = io.readtxt('testcases/data/data-with-bom.csv')
    check_dm(refdm, testdm)

    io.writepickle(testdm, 'tmp.pickle')
    testdm = io.readpickle('tmp.pickle')
    check_dm(refdm, testdm)

    io.writexlsx(testdm, 'tmp.xlsx')
    with pytest.warns(UserWarning):  # Not all rows have column C
        testdm = io.readxlsx('tmp.xlsx')
    check_dm(refdm, testdm)
    io.writexlsx(testdm, 'tmp.xlsx')
    with pytest.warns(UserWarning):  # Not all rows have column C
        testdm = io.readxlsx('tmp.xlsx')
    check_dm(refdm, testdm)
Example #3
0
    def _convert_results(self):

        from osweb import data
        from datamatrix import io

        jatos_results_path = QFileDialog.getOpenFileName(
            self.main_window,
            _(u'Select JATOS results file…'),
            filter=u'JATOS results (*.txt)')
        if isinstance(jatos_results_path, tuple):
            jatos_results_path = jatos_results_path[0]
        if not jatos_results_path:
            return

        self.main_window.set_busy(True)
        try:
            dm = data.parse_jatos_results(jatos_results_path)
        finally:
            self.main_window.set_busy(False)
        export_path = QFileDialog.getSaveFileName(
            self.main_window,
            _(u'Save as…'),
            filter=u'Excel (*.xlsx);;CSV (*.csv)')
        if isinstance(export_path, tuple):
            export_path = export_path[0]
        if not export_path:
            return
        if export_path.lower().endswith(u'.xlsx'):
            io.writexlsx(dm, export_path)
        else:
            io.writetxt(dm, export_path)
def process_video(run, start_frame=1):

    kernel = smoothing_kernel()
    cap = cv2.VideoCapture(SRC_VIDEO.format(run - 1))
    dm = DataMatrix()
    im_prev = None
    for frame in it.count(start=start_frame):
        ret, im = cap.read()
        if not ret or frame >= MAX_FRAME:
            print('Done!')
            break
        dm <<= process_frame(run, frame, luminance_map(im, kernel),
                             change_map(im, im_prev, kernel))
        im_prev = im
    for sub, sdm in ops.split(dm.sub):
        io.writetxt(sdm, DST.format(subject=sub, run=run))
Example #5
0
def word_summary(dm):

    """
	desc:
		Plots the mean pupil size for dark and bright words as a bar plot. The
		time window is indicated by the PEAKWIN constant. This data is also
		written to a .csv file.

	arguments:
		dm:
			type: DataMatrix
	"""

    dm = (dm.type == "light") | (dm.type == "dark")
    x = np.arange(dm.pupil.depth)
    sm = DataMatrix(length=len(dm.word.unique))
    sm.word = 0
    sm.type = 0
    sm.pupil_win = FloatColumn
    sm.pupil_win_se = FloatColumn
    sm.pupil_full = FloatColumn
    sm.pupil_full_se = FloatColumn
    for i, w in enumerate(dm.word.unique):
        _dm = dm.word == w
        sm.word[i] = w
        sm.type[i] = (dm.word == w).type[0]
        sm.pupil_win[i], sm.pupil_win_se[i] = size_se(_dm, PEAKWIN[0], PEAKWIN[1])
        sm.pupil_full[i], sm.pupil_full_se[i] = size_se(_dm)
    sm = operations.sort(sm, sm.pupil_win)
    io.writetxt(sm, "%s/word_summary.csv" % OUTPUT_FOLDER)

    plot.new(size=(4, 3))
    dx = 0
    for color, type_ in ((orange[1], "light"), (blue[1], "dark")):
        sm_ = sm.type == type_
        x = np.arange(len(sm_))
        plt.plot(sm_.pupil_win, "o-", color=color)
        if type_ == "dark":
            yerr = (np.zeros(len(sm_)), sm_.pupil_win_se)
        else:
            yerr = (sm_.pupil_win_se, np.zeros(len(sm_)))
        plt.errorbar(x, sm_.pupil_win, yerr=yerr, linestyle="", color=color, capsize=0)
    plt.xlim(-1, 33)
    plt.ylabel("Pupil size (normalized)")
    plt.xlabel("Word")
    plt.xticks([])
    plot.save("word_summary")
Example #6
0
def _launchr(dm, cmd):

	dm = dm[:]
	# SeriesColumns cannot be saved to a csv file, so we delete those first.
	for name, col in dm.columns:
		if isinstance(col, _SeriesColumn):
			del dm[name]
	# Write the data to an input file
	io.writetxt(dm, u'.r-in.csv')
	# Launch R, read the data, and communicate the commands
	proc = subprocess.Popen( ['R', '--vanilla'], stdin=subprocess.PIPE)
	# proc = subprocess.Popen( ['R', '--vanilla'], stdin=subprocess.PIPE,
	# 	stdout=subprocess.PIPE, stderr=subprocess.PIPE)
	cmd = u'data <- read.csv(".r-in.csv")\nattach(data)\n%s' % cmd
	proc.communicate(safe_encode(cmd, u'ascii'))
	# Wait until the output file has been generated and return it
	while not os.path.exists(u'.r-out.csv'):
		time.sleep(.5)
	dm = io.readtxt(u'.r-out.csv')
	return dm
Example #7
0
def test_io():

	refdm = DataMatrix(length=3)
	refdm[u'tést'] = 1, 2, u''
	refdm.B = u'mathôt', u'b', u'x'
	refdm.C = u'a,\\b"\'c', 8, u''

	testdm = io.readtxt('testcases/data/data.csv')
	check_dm(refdm, testdm)
	io.writetxt(testdm, 'tmp.csv')
	testdm = io.readtxt('tmp.csv')
	check_dm(refdm, testdm)

	io.writepickle(testdm, 'tmp.pickle')
	testdm = io.readpickle('tmp.pickle')
	check_dm(refdm, testdm)

	io.writexlsx(testdm, 'tmp.xlsx')
	testdm = io.readxlsx('tmp.xlsx')
	check_dm(refdm, testdm)
def merge_pupil(subject_run):

    subject, run = subject_run
    print(SRC_EYEGAZE.format(subject=subject, run=run))
    if '--clean' not in sys.argv and os.path.exists(
        DST.format(subject=subject, run=run)
    ):
        print('already done ...')
        return
    print('\treading ...')
    a = np.genfromtxt(
        SRC_EYEGAZE.format(subject=subject, run=run),
        delimiter='\t'
    )
    n_eyegaze = a.shape[0] // DOWNSAMPLE
    dm = DataMatrix(length=n_eyegaze)
    gazex = a[:, 0]
    gazey = a[:, 1]
    pupil = a[:, 2]
    print('\treconstructing blinks ...')
    pupil = srs.blinkreconstruct(pupil)
    pupil[pupil == 0] = np.nan
    print('\tgetting average luminance ...')
    luminance, change = video_timeseries(
        subject=subject,
        run=run,
        frames=a[:, 3]
    )
    print('\tdownsampling ...')
    frame = a[:, 3]
    dm.pupil_size = srs.downsample(pupil, DOWNSAMPLE, fnc=np.nanmedian)
    dm.luminance = srs.downsample(luminance, DOWNSAMPLE, fnc=np.nanmedian)
    dm.change = srs.downsample(change, DOWNSAMPLE, fnc=np.nanmedian)
    dm.sdgazex = srs.downsample(gazey, DOWNSAMPLE, fnc=np.nanstd)
    dm.sdgazey = srs.downsample(gazex, DOWNSAMPLE, fnc=np.nanstd)
    dm.start_frame = srs.downsample(frame, DOWNSAMPLE, fnc=np.nanmin)
    dm.end_frame = srs.downsample(frame, DOWNSAMPLE, fnc=np.nanmax)
    print('\twriting {}'.format(DST.format(subject=subject, run=run)))
    io.writetxt(dm, DST.format(subject=subject, run=run))
    def _convert_results(self):

        from osweb import data
        from datamatrix import io

        jatos_results_path = QFileDialog.getOpenFileName(
            self.main_window,
            _(u'Select JATOS results file…'),
            filter=u'JATOS results (*.*)')
        if isinstance(jatos_results_path, tuple):
            jatos_results_path = jatos_results_path[0]
        if not jatos_results_path:
            return

        self.main_window.set_busy(True)
        try:
            dm = data.parse_jatos_results(
                jatos_results_path,
                include_context=cfg.oswebext_include_context)
        except UnicodeDecodeError:
            self.extension_manager.fire('notify',
                                        message=_('File is not utf-8 encoded'),
                                        category='warning')
            return
        finally:
            self.main_window.set_busy(False)
        export_path = QFileDialog.getSaveFileName(
            self.main_window,
            _(u'Save as…'),
            filter=u'Excel (*.xlsx);;CSV (*.csv)')
        if isinstance(export_path, tuple):
            export_path = export_path[0]
        if not export_path:
            return
        if export_path.lower().endswith(u'.xlsx'):
            io.writexlsx(dm, export_path)
        else:
            io.writetxt(dm, export_path)
Example #10
0
def subject_summary(dm):

    """
	desc:
		Plots the mean difference in pupil size between dark and bright trials
		for each participant as a bar plot. The time window is indicated by the
		PEAKWIN constant. This data is also written to a .csv file.

	arguments:
		dm:
			type: DataMatrix
	"""

    x = np.arange(len(dm.subject_nr.unique))
    sm = DataMatrix(length=len(dm.subject_nr.unique))
    sm.subject_nr = 0
    sm.effect_win = FloatColumn
    sm.effect_win_se = FloatColumn
    sm.effect_full = FloatColumn
    sm.effect_full_se = FloatColumn
    for i, s in enumerate(dm.subject_nr.unique):
        _dm = dm.subject_nr == s
        sm.subject_nr[i] = s
        sm.effect_win[i], sm.effect_win_se[i] = effect_se(_dm, PEAKWIN[0], PEAKWIN[1])
        sm.effect_full[i], sm.effect_full_se[i] = effect_se(_dm)
    sm = operations.sort(sm, by=sm.effect_win)
    plot.new(size=(4, 3))
    plt.axhline(0, color="black")
    plt.plot(sm.effect_win, "o-", color=green[-1])
    plt.errorbar(x, sm.effect_win, yerr=sm.effect_win_se, linestyle="", color=green[-1], capsize=0)
    plt.xlim(-1, 30)
    plt.ylabel("Pupil-size difference (normalized)")
    plt.xlabel("Participant")
    plt.xticks([])
    plot.save("subject_summary")
    io.writetxt(sm, "%s/subject_summary.csv" % OUTPUT_FOLDER)
Example #11
0
def _launchr(dm, cmd):

    dm = dm[:]
    # SeriesColumns cannot be saved to a csv file, so we delete those first.
    for name, col in dm.columns:
        if isinstance(col, _SeriesColumn):
            del dm[name]
    # Write the data to an input file
    io.writetxt(dm, u'.r-in.csv')
    # Launch R, read the data, and communicate the commands
    if verbose:
        proc = subprocess.Popen(['R', '--vanilla'], stdin=subprocess.PIPE)
    else:
        proc = subprocess.Popen(['R', '--vanilla'],
                                stdin=subprocess.PIPE,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)
    cmd = u'data <- read.csv(".r-in.csv")\nattach(data)\n%s' % cmd
    proc.communicate(safe_encode(cmd, u'ascii'))
    # Wait until the output file has been generated and return it
    while not os.path.exists(u'.r-out.csv'):
        time.sleep(.5)
    dm = io.readtxt(u'.r-out.csv')
    return dm
Example #12
0
        main_dm = io.readtxt(src_exp)

        # Read in fillers:
        dm = io.readtxt(src_fillers)
        dm_first, dm_last, dm_remaining = helpers.splitFillers(dm)

        # Merge the remaining fillers with the main dm:
        merged_dm = main_dm << dm_remaining

        # Shuffle the merged dm:
        merged_dm = ops.shuffle(merged_dm)

        # Create an Enforce object, and add constraint
        ef = Enforce(merged_dm)
        ef.add_constraint(MaxRep, cols=[merged_dm.Emotion], maxrep=3)
        ef.add_constraint(MinDist, cols=[merged_dm.Trial_ID], mindist=2)

        # Enforce the constraints
        merged_dm = ef.enforce()

        # Add the first fillers:
        merged_dm = dm_first << merged_dm
        # Add the last fillers:
        merged_dm = merged_dm << dm_last

        # Add exp ID to the dm:
        merged_dm["Exp_ID"] = block
        io.writetxt(merged_dm, os.path.join(dst, "blockloop_%s_PP_%s.csv" \
            % (block, pp_ID)))
Example #13
0
rm = DataMatrix(length=101)
rm.word = ''
rm._type = ''
rm.rating_brightness = FloatColumn
rm.rating_valence = FloatColumn
rm.rating_intensity = FloatColumn
for row, word in zip(rm, dm.word.unique):
	dm_ = dm.word == word
	dme = dm_.rating_type == 'emotion'
	dmb = dm_.rating_type == 'brightness'
	# Pénombre was accidentally rated twice.
	assert(len(dmb)==30 or word == 'pénombre')
	assert(len(dme)==30 or word == 'pénombre')
	row.word = word
	row._type = dme.type[0]
	row.rating_brightness = dmb.rating.mean
	row.rating_valence = dme.rating.mean
	# The intensity is just the deviation from the middle score (2). In the
	# initial analysis, the deviation from the mean valence was taken. But
	# taking the per-trial deviation from the middle score, and then averaging
	# that seems to make more sense.
	row.rating_intensity = np.abs(dme.rating-2).mean()
io.writetxt(rm, 'ratings.csv')

# Determine the correlations
print(plot.regress(rm.rating_brightness, rm.rating_valence))
plot.save('regress.brightness.valence')
print(plot.regress(rm.rating_brightness, rm.rating_intensity))
plot.save('regress.brightness.intensity')
Example #14
0
    # and negative distractor 2
    L_NEG_DIST2 = list(neg_dm.Scene)

    # Make a list of potential scenes for neutral distractor 1
    L_NEU_DIST1 = list(neu_dm.Scene)
    # and 2
    L_NEU_DIST2 = list(neu_dm.Scene)

    new_dm = combine(dm, L_NEG_DIST1[:], L_NEG_DIST2[:], L_NEU_DIST1[:],
                     L_NEU_DIST2[:])

    # Make sure target, distractor 1 and distractor 2 are different scenes:
    while any(row.distractor_scene_1 == row.distractor_scene_2
              or row.distractor_scene_1 == row.Scene
              or row.distractor_scene_2 == row.Scene for row in new_dm):
        # If not, try again:
        print("try again")
        new_dm = combine(dm, L_NEG_DIST1[:], L_NEG_DIST2[:], L_NEU_DIST1[:],
                         L_NEU_DIST2[:])

    return new_dm


if __name__ == "__main__":

    # Read dm containing stimuli without distractors:
    dm_exp = io.readtxt('dm_exp.csv')

    dm_final = addDistractors(dm_exp)
    io.writetxt(dm_final, "./trial_list.csv")
Example #15
0
        dm_fillers_slice_last = dm_fillers[10:]
        final_dm = final_dm << dm_fillers_slice_last
        
        # Append to the dm list:
        list_dms.append(final_dm)

    # Unpack the list so we can return two separate dms
    dm1, dm2 = list_dms

    return dm1, dm2


if __name__ == "__main__":
    
    
    src_path = "trial list prescan"
    dst_path = "trial list prescan"
    # And experimental trials (without distractors)
    dm_exp = io.readtxt(os.path.join(src_path, 'dm_exp.csv'))
    
    # Read dm containing fillers (without distractors)
    dm_fillers = io.readtxt(os.path.join(src_path, 'dm_fillers.csv'))
    
    for subject_ID in range(1, 71):
        
        dm1, dm2 = applyConstraints(dm_exp, dm_fillers)
        io.writetxt(dm1, os.path.join(dst_path, "block_loop_test_feedback_PP_%s.csv" % subject_ID))
        io.writetxt(dm2, os.path.join(dst_path, "block_loop_criterion_test_PP_%s.csv" % subject_ID))
        
        
    final_dm = dm_fillers_slice_first << main_dm

    # And to the end fo the final dm:
    dm_fillers_slice_last = dm_fillers[6:]
    final_dm = final_dm << dm_fillers_slice_last

    return final_dm


if __name__ == "__main__":

    src_file = "trial list postscan"
    dst_file = "trial list postscan/block loops IMDF postscan"

    # And experimental trials (without distractors)
    dm_exp = io.readtxt(os.path.join(src_file, 'IMDF_pairs_exp.csv'))
    dm_exp = getBlockLoopsScanner.addValenceColumn(dm_exp)
    dm_exp["Exp_ID"] = "IMDF_postscan"

    # Read dm containing fillers (without distractors)
    dm_fillers = io.readtxt(os.path.join(src_file, 'IMDF_pairs_fillers.csv'))
    dm_fillers["Exp_ID"] = "IMDF_postscan"

    for subject_ID in range(1, 71):

        pp_dm = applyConstraints(dm_exp, dm_fillers)
        io.writetxt(
            pp_dm,
            os.path.join(dst_file,
                         "block_loop_IMDF_postscan_PP_%s.csv" % subject_ID))
Example #17
0
    src_exp = "trial list postscan/TNT_pairs_exp.csv"
    src_fillers = "trial list postscan/TNT_pairs_fillers.csv"
    dst = "trial lists postscan/block loops TNT postscan"

    main_dm = io.readtxt(src_exp)

    # Read in fillers:
    dm = io.readtxt(src_fillers)
    dm_first, dm_last, dm_remaining = splitFillers(dm)

    # Merge the remaining fillers with the main dm:
    merged_dm = main_dm << dm_remaining

    # Shuffle the merged dm:
    merged_dm = ops.shuffle(merged_dm)

    # Create an Enforce object, and add constraint
    ef = Enforce(merged_dm)
    ef.add_constraint(MaxRep, cols=[merged_dm.Emotion], maxrep=3)
    ef.add_constraint(MinDist, cols=[merged_dm.Trial_ID], mindist=2)

    # Enforce the constraints
    merged_dm = ef.enforce()

    # Add the first fillers:
    merged_dm = dm_first << merged_dm
    # Add the last fillers:
    merged_dm = merged_dm << dm_last

    io.writetxt(merged_dm, "FINAL.csv")