Exemple #1
0
def test_bids_eventsfile():
    """Check if correct BIDS events file can be sourced."""
    from labbookdb.report.tracking import bids_eventsfile
    import pandas as pd

    df = bids_eventsfile(DB_PATH, 'chr_longSOA')
    bids_eventsfile = path.join(DATA_DIR, 'bids_eventsfile.csv')
    df_ = pd.read_csv(bids_eventsfile, index_col=0)

    assert df[['onset', 'duration']].equals(df_[['onset', 'duration']])
Exemple #2
0
def write_bids_events_file(scan_dir,
	db_path="~/syncdata/meta.db",
	metadata_file='',
	out_file="events.tsv",
	prefer_labbookdb=False,
	timecourse_file='',
	task='',
	forced_dummy_scans=0.,
	):
	"""Adjust a BIDS event file to reflect delays introduced after the trigger and before the scan onset.

	Parameters
	----------

	scan_dir : str
		ParaVision scan directory path.
	db_path : str, optional
		LabbookDB database file path from which to source the evets profile for the identifier assigned to the `task` parameter.
	metadata_file : str, optional
		Path to a BIDS metadata file.
	out_file : str, optional
		Path to which to write the adjusted events file
	prefer_labbookdb : bool, optional
		Whether to query the events file in the LabbookDB database file first (rather than look for the events file in the scan directory).
	timecourse_file : str, optional
		Path to a NIfTI file.
	task : str, optional
		Task identifier from a LabbookDB database.

	Returns
	-------

	str : Path to which the adjusted events file was saved.
	"""

	import csv
	import sys
	import json
	import os
	import pandas as pd
	import nibabel as nib
	import numpy as np
	from datetime import datetime

	out_file = os.path.abspath(os.path.expanduser(out_file))
	scan_dir = os.path.abspath(os.path.expanduser(scan_dir))
	db_path = os.path.abspath(os.path.expanduser(db_path))

	if not prefer_labbookdb:
		try:
			scan_dir_contents = os.listdir(scan_dir)
			sequence_files = [i for i in scan_dir_contents if ("sequence" in i and "tsv" in i)]
			if sequence_files:
				sequence_file = os.path.join(scan_dir, sequence_files[0])
			else:
				timecourse_dir = os.path.dirname(timecourse_file)
				timecourse_name = os.path.basename(timecourse_file)
				stripped_name = timecourse_name.split('.', 1)[0].rsplit('_', 1)[0]
				sequence_file = os.path.join(timecourse_dir,stripped_name+'_events.tsv')
			mydf = pd.read_csv(sequence_file, sep="\s", engine='python')
		except:
			if os.path.isfile(db_path):
				from labbookdb.report.tracking import bids_eventsfile
				mydf = bids_eventsfile(db_path, task)
			else:
				return '/dev/null'
	else:
		try:
			if os.path.isfile(db_path):
				from labbookdb.report.tracking import bids_eventsfile
				mydf = bids_eventsfile(db_path, task)
			else:
				return '/dev/null'
		except ImportError:
			scan_dir_contents = os.listdir(scan_dir)
			sequence_files = [i for i in scan_dir_contents if ("sequence" in i and "tsv" in i)]
			if sequence_files:
				sequence_file = os.path.join(scan_dir, sequence_files[0])
			else:
				timecourse_dir = os.path.dirname(timecourse_file)
				timecourse_name = os.path.basename(timecourse_file)
				stripped_name = timecourse_name.split('.', 1)[0].rsplit('_', 1)[0]
				sequence_file = os.path.join(timecourse_dir,stripped_name+'_events.tsv')
			mydf = pd.read_csv(sequence_file, sep="\s", engine='python')

	timecourse_file = os.path.abspath(os.path.expanduser(timecourse_file))
	timecourse = nib.load(timecourse_file)
	zooms = timecourse.header.get_zooms()
	tr = float(zooms[-1])
	delay = 0.
	if forced_dummy_scans:
		delay = forced_dummy_scans * tr
	elif metadata_file:
		metadata_file = os.path.abspath(os.path.expanduser(metadata_file))

		with open(metadata_file) as metadata:
			    metadata = json.load(metadata)
		try:
			delay += metadata['NumberOfVolumesDiscardedByScanner'] * tr
		except:
			pass
		try:
			delay += metadata['DelayAfterTrigger']
		except:
			pass
	mydf['onset'] = mydf['onset'] - delay

	mydf.to_csv(out_file, sep=str('\t'), index=False)

	return out_file
Exemple #3
0
def write_bids_events_file(
    scan_dir,
    db_path="~/syncdata/meta.db",
    metadata_file='',
    out_file="events.tsv",
    prefer_labbookdb=False,
    timecourse_file='',
    task='',
    forced_dummy_scans=0.,
):
    """Adjust a BIDS event file to reflect delays introduced after the trigger and before the scan onset.

	Parameters
	----------

	scan_dir : str
		ParaVision scan directory path.
	db_path : str, optional
		LabbookDB database file path from which to source the evets profile for the identifier assigned to the `task` parameter.
	metadata_file : str, optional
		Path to a BIDS metadata file.
	out_file : str, optional
		Path to which to write the adjusted events file
	prefer_labbookdb : bool, optional
		Whether to query the events file in the LabbookDB database file first (rather than look for the events file in the scan directory).
	timecourse_file : str, optional
		Path to a NIfTI file.
	task : str, optional
		Task identifier from a LabbookDB database.

	Returns
	-------

	str : Path to which the adjusted events file was saved.
	"""

    import csv
    import sys
    import json
    import os
    import pandas as pd
    import nibabel as nib
    import numpy as np
    from datetime import datetime

    out_file = os.path.abspath(os.path.expanduser(out_file))
    scan_dir = os.path.abspath(os.path.expanduser(scan_dir))
    db_path = os.path.abspath(os.path.expanduser(db_path))

    if not prefer_labbookdb:
        try:
            scan_dir_contents = os.listdir(scan_dir)
            sequence_files = [
                i for i in scan_dir_contents
                if ("sequence" in i and "tsv" in i)
            ]
            if sequence_files:
                sequence_file = os.path.join(scan_dir, sequence_files[0])
            else:
                timecourse_dir = os.path.dirname(timecourse_file)
                timecourse_name = os.path.basename(timecourse_file)
                stripped_name = timecourse_name.split('.',
                                                      1)[0].rsplit('_', 1)[0]
                sequence_file = os.path.join(timecourse_dir,
                                             stripped_name + '_events.tsv')
            mydf = pd.read_csv(sequence_file, sep="\s", engine='python')
        except:
            if os.path.isfile(db_path):
                from labbookdb.report.tracking import bids_eventsfile
                mydf = bids_eventsfile(db_path, task)
            else:
                return '/dev/null'
    else:
        try:
            if os.path.isfile(db_path):
                from labbookdb.report.tracking import bids_eventsfile
                mydf = bids_eventsfile(db_path, task)
            else:
                return '/dev/null'
        except ImportError:
            scan_dir_contents = os.listdir(scan_dir)
            sequence_files = [
                i for i in scan_dir_contents
                if ("sequence" in i and "tsv" in i)
            ]
            if sequence_files:
                sequence_file = os.path.join(scan_dir, sequence_files[0])
            else:
                timecourse_dir = os.path.dirname(timecourse_file)
                timecourse_name = os.path.basename(timecourse_file)
                stripped_name = timecourse_name.split('.',
                                                      1)[0].rsplit('_', 1)[0]
                sequence_file = os.path.join(timecourse_dir,
                                             stripped_name + '_events.tsv')
            mydf = pd.read_csv(sequence_file, sep="\s", engine='python')

    timecourse_file = os.path.abspath(os.path.expanduser(timecourse_file))
    timecourse = nib.load(timecourse_file)
    zooms = timecourse.header.get_zooms()
    tr = float(zooms[-1])
    delay = 0.
    if forced_dummy_scans:
        delay = forced_dummy_scans * tr
    elif metadata_file:
        metadata_file = os.path.abspath(os.path.expanduser(metadata_file))

        with open(metadata_file) as metadata:
            metadata = json.load(metadata)
        try:
            delay += metadata['NumberOfVolumesDiscardedByScanner'] * tr
        except:
            pass
        try:
            delay += metadata['DelayAfterTrigger']
        except:
            pass
    mydf['onset'] = mydf['onset'] - delay

    mydf.to_csv(out_file, sep=str('\t'), index=False)

    return out_file