def process_phantom_session( interface, project, subject, session, label, force_updates=False ):
    # Get the experiment object
    experiment = interface.select.experiment( session )

    # First, see if the QA files are already there
    files = experiment.resources().files().get()
    if force_updates or not (('t1.nii.gz' in files) and ('phantom.xml' in files) and ('phantom.nii.gz' in files)):
        dicom_path=''

        # Get list of all scans in the session
        scans = experiment.scans().get()
        for scan in scans:
            # Check only 'usable' scans with the proper name
            [scan_type,quality] = experiment.scan( scan ).attrs.mget( ['type', 'quality'] )
            if ('mprage' in scan_type) or ('t1spgr' in scan_type):
                # Extract the DICOM file directory from the XML representation
                match = re.match( '.*(/fs/storage/XNAT/.*)scan_.*_catalog.xml.*', experiment.scan( scan ).get(), re.DOTALL )
                if match:
                    dicom_path = match.group(1)

        if dicom_path:
            # If we found a matching scan, run the QA
            run_phantom_qa( interface, project, subject, session, label, dicom_path )
        else:
            # If there was no matching scan in the session, print a warning
            warning = "WARNING: ADNI phantom session: {}, experiment: {}, subject: {} does not have \
                       a usable T1-weighted scan".format(session, experiment, subject)
            sibis.logging(hashlib.sha1('t1_qa_functions').hexdigest()[0:6], warning,
                          script='t1_qa_functions')
def import_stroop_to_redcap( xnat, stroop_eid, stroop_resource, stroop_file, redcap_token, redcap_key, verbose=False, no_upload=False ):
    if verbose:
        print "Importing Stroop data from file %s:%s" % ( stroop_eid, stroop_file )

    # Download Stroop file from XNAT into temporary directory
    experiment = xnat.select.experiment( stroop_eid )
    tempdir = tempfile.mkdtemp()
    stroop_file_path = experiment.resource( stroop_resource ).file( stroop_file ).get_copy( os.path.join( tempdir, stroop_file ) )

    # Convert downloaded Stroop file to CSV scores file
    added_files = []
    try:
        added_files = subprocess.check_output( [ os.path.join( import_bindir, "stroop2csv" ), '--mr-session', '--record', redcap_key[0], '--event', redcap_key[1], stroop_file_path, tempdir ] )
    except:
        pass

    if len( added_files ):
        if not no_upload:
            # Upload CSV file(s) (should only be one anyway)
            for file in added_files.split( '\n' ):
                if re.match( '.*\.csv$', file ):
                    if verbose:
                        print "Uploading ePrime Stroop scores",file
                    subprocess.call( [ os.path.join( bindir, 'csv2redcap' ), file ] )
            # Upload original ePrime file for future reference
            if verbose:
                print "Uploading ePrime Stroop file",stroop_file_path
            subprocess.check_output( [ os.path.join( import_bindir, "eprime2redcap" ), "--api-key", redcap_token, '--record', redcap_key[0], '--event', redcap_key[1], stroop_file_path, 'mri_stroop_log_file' ] )
    else:
        error = "ERROR: could not convert Stroop file %s:%s" % ( xnat_eid, stroop_file )
        sibis.logging(xnat_eid,error,
                      stroop_file = stroop_file)

    shutil.rmtree( tempdir )
def check_xml_file( xml_file, project, session, label ):
    xml = open( xml_file, 'r' )

    warnings = []
    try:
        for line in xml:
            # Check fallbacks triggered
            if 'fallbackOrientationCNR' in line:
                warnings.append( "CNR spheres used for orientation - problem detecting 15mm spheres?" )
            if 'fallbackCentroidCNR' in line:
                match = re.match( '^.*distance="([0-9]+\.[0-9]+)".*$', line )
                distance = float( match.group(1) )
                if distance > 3.0:
                    warnings.append( "CNR spheres used for centroid location (distance to SNR center = %f mm) - problem with the SNR sphere?" % distance )

            # Check number of landmarks
            match = re.match( '<landmarkList.*count="([0-9]+)">', line )
            if match:
                count = int( match.group(1) )
                if ( count < 165 ):
                    warnings.append( "Landmark count=%d" % (project,session,count) )

            # Check SNR
            match = re.match( '<snr>([0-9]*\.[0-9]*)</snr>', line )
            if match:
                snr = float( match.group(1) )
                if ( snr < 50 ):
                    warnings.append( "Low SNR=%f" % (project,session,snr) )

            # Check scale
            match = re.match( '<scale>([0-9]*\.[0-9]*)\s+([0-9]*\.[0-9]*)\s+([0-9]*\.[0-9]*)</scale>', line )
            if match:
                for idx in [0,1,2]:
                    scale = float( match.group( idx+1 ) )
                    if ( (scale < 0.99) or (scale > 1.01) ):
                        warnings.append( "Non-unit scale[%d]=%f" % (project,session,idx,scale) )

            # Check nonlinearity
            match = re.match( '<nonlinear>([0-9]*\.[0-9]*)\s+([0-9]*\.[0-9]*)\s+([0-9]*\.[0-9]*)</nonlinear>', line )
            if match:
                for idx in [0,1,2]:
                    nonlinear = float( match.group( idx+1 ) )
                    if ( (nonlinear > 0.5) ):
                        warnings.append( "Nonlinearity[%d]=%f" % (project,session,idx,nonlinear) )
    except:
         error='Could not open XML file for experiment.'
         sibis.logging(session,error,
                     project_id=project)


    finally:
        xml.close()

    # Print warnings if there were any
    if len( warnings ) > 0:
        warning = " ".join(warnings)
        sibis.logging(label, warning,
                      session_id=session,
                      project=project,
                      script='t1_qa_functions')
Exemple #4
0
def check_for_stroop(xnat, xnat_eid_list, verbose=False):
    stroop_files = []
    for xnat_eid in xnat_eid_list:
        experiment = xnat.select.experiment(xnat_eid)

        # Get list of resource files that match the Stroop file name pattern
        for resource in experiment.resources().get():
            resource_files = xnat._get_json(
                '/data/experiments/%s/resources/%s/files?format=json' %
                (xnat_eid, resource))
            stroop_files += [
                (xnat_eid, resource, re.sub('.*\/files\/', '', file['URI']))
                for file in resource_files if re.match(
                    '^NCANDAStroopMtS_3cycles_7m53stask_.*.txt$', file['Name'])
            ]

    # No matching files - nothing to do
    if len(stroop_files) == 0:
        return (None, None, None)

    # Get first file from list, warn if more files
    if len(stroop_files) > 1:
        error = "ERROR: experiment have/has more than one Stroop .txt file. Please make sure there is exactly one per session."
        for xnat_eid in xnat_eid_list:
            sibis.logging(xnat_eid, error)
        return (None, None, None)

    return stroop_files[0]
def export_series( xnat, session_and_scan_list, to_directory, filename_pattern, verbose=False ):
    # List should have at least one "SESSION/SCAN" entry
    if not '/' in session_and_scan_list:
        return False

    # Put together target directory and filename pattern
    to_path_pattern = os.path.join( to_directory, filename_pattern )

    # If filename is a pattern with substitution, check whether entire directory exists
    if '%' in filename_pattern:
        eid_file_path = os.path.join( to_directory, 'eid' )
        if os.path.exists( to_directory ):
            if check_eid_file( eid_file_path, session_and_scan_list ):
                return False
    else:
        eid_file_path = re.sub( '\.[^/]*', '.eid', to_path_pattern )
        if os.path.exists( to_path_pattern ) or os.path.exists( to_path_pattern + '.gz' ):
            if check_eid_file( eid_file_path, session_and_scan_list ):
                return False

    dicom_path_list = []
    for session_and_scan in session_and_scan_list.split( ' ' ):
        [ session, scan ] = session_and_scan.split( '/' )
        match = re.match( '.*(/fs/storage/XNAT/.*)scan_.*_catalog.xml.*', xnat.select.experiment( session ).scan( scan ).get(), re.DOTALL )
        if match:
            dicom_path = match.group(1)
            if not os.path.exists( dicom_path ):
                dicom_path = re.sub( 'storage/XNAT', 'ncanda-xnat', dicom_path )

            dicom_path_list.append( dicom_path )

    dcm2image_output = None
    if len( dicom_path_list ):
        try:
            dcm2image_command = 'cmtk dcm2image --tolerance 1e-3 --write-single-slices --no-progress -rxO %s %s 2>&1' % ( to_path_pattern, ' '.join( dicom_path_list ) )

            if ( verbose ):
                print dcm2image_command

            dcm2image_output = subprocess.check_output( dcm2image_command, shell=True )
        except:
            if dcm2image_output:
                output_file = open( to_path_pattern + '.log' , 'w' )
                try:
                    output_file.writelines( dcm2image_output )
                except:
                    print dcm2image_output
                finally:
                    output_file.close()
            return False

        try:
            open( eid_file_path, 'w' ).writelines( session_and_scan_list )
        except:
            error = "ERROR: unable to write EID file"
            sibis.logging(eid_file_path,error)

        return True
    return False
Exemple #6
0
def do_export_spiral_files(xnat, resource_location, to_directory, spiral_nifti_out, tmpdir, verbose=None):
    # Do the actual export using a temporary directory that is managed by the caller
    # (simplifies its removal regardless of exit taken)
    [xnat_eid, resource_id, resource_file_bname] = resource_location.split('/')
    tmp_file_path = xnat.select.experiment(xnat_eid).resource(resource_id).file(resource_file_bname).get_copy(os.path.join(tmpdir, "pfiles.tar.gz"))

    errcode, stdout, stderr = untar_to_dir(tmp_file_path, tmpdir)
    if errcode != 0:
        error="ERROR: Unable to un-tar resource file. File is likely corrupt."
        sibis.logging(xnat_eid,error,
                     tempfile_path=tmp_file_path,
                     resource_location=resource_location)
        if verbose:
            print "StdErr:\n{}".format(stderr)
            print "StdOut:\n{}".format(stdout)
        return False

    spiral_E_files = glob_for_files_recursive(tmpdir, pattern="E*P*.7")
    if len(spiral_E_files) > 1:
        error = "ERROR: more than one E file found"
        sibis.logging(xnat_eid,error,
                      spiral_e_files = ', '.join(spiral_E_files))
        return False

    physio_files = glob_for_files_recursive(tmpdir, pattern="P*.physio")
    if len(physio_files) > 1:
        error = 'More than one physio file found.'
        sibis.logging(xnat_eid,error,
                     tmp_file_path=tmp_file_path,
                     physio_files=physio_files)
        return False

    if len(spiral_E_files) == 1:
        # Make directory first
        spiral_dir_out = os.path.join(to_directory, 'native')
        if not os.path.exists(spiral_dir_out):
            os.makedirs(spiral_dir_out)
        # Now try to make the NIfTI
        errcode, stdout, stderr = make_nifti_from_spiral(spiral_E_files[0], spiral_nifti_out)
        if not os.path.exists(spiral_nifti_out):
            error="Unable to make NIfTI from resource file, please try running makenifti manually"
            sibis.logging(xnat_eid, error,
                         spiral_file=spiral_E_files[0])
            if verbose:
                print "StdErr:\n{}".format(stderr)
                print "StdOut:\n{}".format(stdout)
            return False
    else:
        error = "ERROR: no spiral data file found"
        sibis.logging(xnat_eid,error,
                      resource_location=resource_location)
        return False

    if len(physio_files) == 1:
        spiral_physio_out = os.path.join(to_directory, 'native', 'physio')
        shutil.copyfile(physio_files[0], spiral_physio_out)
        gzip(spiral_physio_out)
    return True
def do_export_spiral_files(xnat, resource_location, to_directory, spiral_nifti_out, tmpdir, verbose=None):
    # Do the actual export using a temporary directory that is managed by the caller
    # (simplifies its removal regardless of exit taken)
    [xnat_eid, resource_id, resource_file_bname] = resource_location.split('/')
    tmp_file_path = xnat.select.experiment(xnat_eid).resource(resource_id).file(resource_file_bname).get_copy(os.path.join(tmpdir, "pfiles.tar.gz"))

    errcode, stdout, stderr = untar_to_dir(tmp_file_path, tmpdir)
    if errcode != 0:
        error="ERROR: Unable to un-tar resource file. File is likely corrupt."
        sibis.logging(xnat_eid,error,
                     tempfile_path=tmp_file_path,
                     resource_location=resource_location)
        if verbose:
            print "StdErr:\n{}".format(stderr)
            print "StdOut:\n{}".format(stdout)
        return False

    spiral_E_files = glob_for_files_recursive(tmpdir, pattern="E*P*.7")
    if len(spiral_E_files) > 1:
        error = "ERROR: more than one E file found"
        sibis.logging(xnat_eid,error,
                      spiral_e_files = ', '.join(spiral_E_files))
        return False

    physio_files = glob_for_files_recursive(tmpdir, pattern="P*.physio")
    if len(physio_files) > 1:
        error = 'More than one physio file found.'
        sibis.logging(xnat_eid,error,
                     tmp_file_path=tmp_file_path,
                     physio_files=physio_files)
        return False

    if len(spiral_E_files) == 1:
        # Make directory first
        spiral_dir_out = os.path.join(to_directory, 'native')
        if not os.path.exists(spiral_dir_out):
            os.makedirs(spiral_dir_out)
        # Now try to make the NIfTI
        errcode, stdout, stderr = make_nifti_from_spiral(spiral_E_files[0], spiral_nifti_out)
        if not os.path.exists(spiral_nifti_out):
            error="Unable to make NIfTI from resource file, please try running makenifti manually"
            sibis.logging(xnat_eid, error,
                         spiral_file=spiral_E_files[0])
            if verbose:
                print "StdErr:\n{}".format(stderr)
                print "StdOut:\n{}".format(stdout)
            return False
    else:
        error = "ERROR: no spiral data file found"
        sibis.logging(xnat_eid,error,
                      resource_location=resource_location)
        return False

    if len(physio_files) == 1:
        spiral_physio_out = os.path.join(to_directory, 'native', 'physio')
        shutil.copyfile(physio_files[0], spiral_physio_out)
        gzip(spiral_physio_out)
    return True
def run_phantom_qa( interface, project, subject, session, label, dicom_path ):
    # Make a temporary directory
    temp_dir = tempfile.mkdtemp()

    # Switch to temp directory
    original_wd = os.getcwd()
    os.chdir( temp_dir )

    # Create NIFTI file from the DICOM files
    nii_file = 't1.nii.gz'
    subprocess.call( 'cmtk dcm2image --tolerance 1e-3 -rO %s %s >& /dev/null' % ( nii_file, dicom_path ), shell=True )
    if not os.path.exists( nii_file ):
        error = "ERROR: NIFTI file was not created from DICOM files experiment"
        sibis.logging('{}/{}'.format(project,session),error,
                         session = session,
                         project = project,
                         nii_file = nii_file,
                         dicom_path = dicom_path)
        return

    # Upload NIFTI file
    try:
        file = interface.select.project( project ).subject( subject ).experiment( session ).resource('QA').file( nii_file )
        file.insert( nii_file, format='nifti_gz', tags='qa,adni,nifti_gz', content='ADNI Phantom QA File', overwrite=True )
    except:
        print "Something bad happened uploading file %s to Experiment %s/%s/%s" % (fname,project,session,label)

    # Run the PERL QA script and capture its output
    xml_file = 'phantom.xml'
    lbl_file = 'phantom.nii.gz'
    subprocess.call( 'cmtk detect_adni_phantom --tolerant --refine-xform --erode-snr 15 --write-labels %s %s %s' % ( lbl_file, nii_file, xml_file ), shell=True )
    if not os.path.exists( xml_file ) or not os.path.exists( lbl_file ):
        error = "ERROR: mandatory output file (either xml or label image) was not created from file %s, experiment %s/%s/%s" % ( nii_file,project,session,label )
        sibis.logging('{}/{}/{}'.format(project,session,label),error,
                       nii_file=nii_file,
                       project = project,
                       session = session,
                       label= label)
        return

    # Upload phantom files to XNAT
    for (fname,fmt) in [ (xml_file, 'xml'), (lbl_file, 'nifti_gz') ]:
        try:
            file = interface.select.project( project ).subject( subject ).experiment( session ).resource('QA').file( fname )
            file.insert( fname, format=fmt, tags='qa,adni,%s' % fmt, content='ADNI Phantom QA File', overwrite=True )
        except:
            print "Something bad happened uploading file %s to Experiment %s/%s" % (fname,project,session)

    # Read and evaluate phantom XML file
    check_xml_file( xml_file, project, session, label )

    # Clean up - remove temp directory
    os.chdir( original_wd )
    shutil.rmtree( temp_dir )
def verify_image_count( session, session_label, scan, scantype, manufacturer, images_created ):
    if manufacturer in expected_images.keys():
        if scantype in expected_images[manufacturer].keys():
            imgrange = expected_images[manufacturer][scantype]
            if not images_created in imgrange:
                error = 'WARNING: Scan found more images than expected.'
                sibis.logging(session_label, error,
                    session = session,
                    scan = scan,
                    scan_type = scantype,
                    images_created = images_created,
                    expected_images = expected_images)
Exemple #10
0
def import_stroop_to_redcap(xnat,
                            stroop_eid,
                            stroop_resource,
                            stroop_file,
                            redcap_token,
                            redcap_key,
                            verbose=False,
                            no_upload=False):
    if verbose:
        print "Importing Stroop data from file %s:%s" % (stroop_eid,
                                                         stroop_file)

    # Download Stroop file from XNAT into temporary directory
    experiment = xnat.select.experiment(stroop_eid)
    tempdir = tempfile.mkdtemp()
    stroop_file_path = experiment.resource(stroop_resource).file(
        stroop_file).get_copy(os.path.join(tempdir, stroop_file))

    # Convert downloaded Stroop file to CSV scores file
    added_files = []
    try:
        added_files = subprocess.check_output([
            os.path.join(import_bindir,
                         "stroop2csv"), '--mr-session', '--record',
            redcap_key[0], '--event', redcap_key[1], stroop_file_path, tempdir
        ])
    except:
        pass

    if len(added_files):
        if not no_upload:
            # Upload CSV file(s) (should only be one anyway)
            for file in added_files.split('\n'):
                if re.match('.*\.csv$', file):
                    if verbose:
                        print "Uploading ePrime Stroop scores", file
                    subprocess.call([os.path.join(bindir, 'csv2redcap'), file])
            # Upload original ePrime file for future reference
            if verbose:
                print "Uploading ePrime Stroop file", stroop_file_path
            subprocess.check_output([
                os.path.join(import_bindir, "eprime2redcap"), "--api-key",
                redcap_token, '--record', redcap_key[0], '--event',
                redcap_key[1], stroop_file_path, 'mri_stroop_log_file'
            ])
    else:
        error = "ERROR: could not convert Stroop file %s:%s" % (xnat_eid,
                                                                stroop_file)
        sibis.logging(xnat_eid, error, stroop_file=stroop_file)

    shutil.rmtree(tempdir)
Exemple #11
0
def verify_image_count(session, session_label, scan, scantype, manufacturer,
                       images_created):
    if manufacturer in expected_images.keys():
        if scantype in expected_images[manufacturer].keys():
            imgrange = expected_images[manufacturer][scantype]
            if not images_created in imgrange:
                error = 'WARNING: Scan found more images than expected.'
                sibis.logging(session_label,
                              error,
                              session=session,
                              scan=scan,
                              scan_type=scantype,
                              images_created=images_created,
                              expected_images=expected_images)
Exemple #12
0
def run_subject_qa(interface, project, subject, session, scan_number,
                   dicom_path):
    # Make a temporary directory
    temp_dir = tempfile.mkdtemp()

    # Make XML file as wrapper for the DICOM files
    bxh_file = '%s/dicoms.bxh' % temp_dir
    subprocess.call('dicom2bxh %s/* %s >& /dev/null' % (dicom_path, bxh_file),
                    shell=True)
    if not os.path.exists(bxh_file):
        error = "ERROR: BXH file was not created from DICOM files"
        sibis.logging(dicom_path, error, bxh_file=bxh_file)
    return

    # Run the PERL QA script and capture its output
    html_dir = '%s/html' % temp_dir
    script_output = os.popen('fmriqa_generate.pl %s %s 2> /dev/null' %
                             (bxh_file, html_dir)).readlines()
    if not os.path.exists(html_dir):
        error = "ERROR: html directory was not created from BXH file"
        sibis.logging(html_dir, error, bxh_file=bxh_file)
        return

    # Convert QA results from html to pdf
    summary_file_path = '%s/QA-Summary.pdf' % temp_dir
    subprocess.call(
        'htmldoc --webpage --browserwidth 1024 --no-title --no-toc --compression=9 --outfile %s %s/index.html >& /dev/null'
        % (summary_file_path, html_dir),
        shell=True)
    if os.path.exists(summary_file_path):
        # Upload QA files to XNAT as file resources
        try:
            qa_file = interface.select.project(project).subject(
                subject).experiment(session).resource('QA').file(
                    'QA-%s-Summary.pdf' % scan_number)
            qa_file.insert(summary_file_path,
                           format='pdf',
                           tags='qa,fbirn,pdf',
                           content='QA Analysis Summary',
                           overwrite=True)
        except:
            print "Something bad happened uploading QA summary file to Experiment %s" % session
    else:
        print "Unable to create PDF QA summary file %s" % summary_file_path

    # Clean up - remove temp directory
    shutil.rmtree(temp_dir)
Exemple #13
0
def main(args):
    project_entry = get_project_entry()
    project_df = data_entry_fields(fields, project_entry, args.visit)
    error = []

    for idx, row in project_df.iterrows():
        for f in form_fields:
            check(missing_form(idx, row, f[0], f[1]), error)
        for np in np_gpeg_fields:
            check(np_groove_check(idx, row, 'np_gpeg_missing', np[0], np[1]),
                  error)
        check(fourteen_days_mri_report(idx, row), error)
        check(cnp_dob(idx, row), error)
        check(missing_mri_stroop(idx, row), error)
        for s in saliva_fields:
            check(missing_saliva_sample(idx, row, s[0], s[1]), error)
        check(visit_data_missing(idx, row), error)
        check(wais_score_verification(idx, row), error)
        for f in fields_sex:
            check(youth_report_sex(idx, row, f[0], f[1]), error)

    if args.csvdir:
        for e in error:
            if e == 'null':
                error.remove(e)
        f = csv.writer(open(args.csvdir, "wb+"))
        f.writerow(["subject_site_id", "visit_date", "event_name", "error"])
        for x in error:
            f.writerow([
                x["subject_site_id"], x["visit_date"], x["event_name"],
                x["error"]
            ])
    else:
        for e in error:
            if e != 'null':
                #print json.dumps(e, sort_keys=True)
                #print "{}-{}".format(e['subject_site_id'], e['visit_date']), e['error'],e
                sibis.logging("{}-{}".format(e['subject_site_id'],
                                             e['visit_date']),
                              e['error'],
                              e_dictionary=e)
def check_for_stroop( xnat, xnat_eid_list, verbose=False ):
    stroop_files = []
    for xnat_eid in xnat_eid_list:
        experiment = xnat.select.experiment( xnat_eid )

        # Get list of resource files that match the Stroop file name pattern
        for resource in  experiment.resources().get():
            resource_files = xnat._get_json( '/data/experiments/%s/resources/%s/files?format=json' % ( xnat_eid, resource ) );
            stroop_files += [ (xnat_eid, resource, re.sub( '.*\/files\/', '', file['URI']) ) for file in resource_files if re.match( '^NCANDAStroopMtS_3cycles_7m53stask_.*.txt$', file['Name'] ) ]

    # No matching files - nothing to do
    if len( stroop_files ) == 0:
        return (None, None, None)

    # Get first file from list, warn if more files
    if len( stroop_files ) > 1:
        error = "ERROR: experiment have/has more than one Stroop .txt file. Please make sure there is exactly one per session."
        for xnat_eid in xnat_eid_list:
            sibis.logging(xnat_eid,error)
	return (None, None, None)

    return stroop_files[0]
def main(args):
    project_entry = get_project_entry()
    project_df = data_entry_fields(fields,project_entry,args.visit)
    error = []

    for idx, row in project_df.iterrows():
        for f in form_fields:
            check(missing_form(idx,row,f[0],f[1]),error)
        for np in np_gpeg_fields:
            check(np_groove_check(idx,row,'np_gpeg_missing',np[0],np[1]),error)
        check(fourteen_days_mri_report(idx,row),error)
        check(cnp_dob(idx, row),error)
        check(missing_mri_stroop(idx, row),error)
        for s in saliva_fields:
            check(missing_saliva_sample(idx,row,s[0],s[1]),error)
        check(visit_data_missing(idx,row),error)
        check(wais_score_verification(idx,row),error)
        for f in fields_sex:
            check(youth_report_sex(idx,row,f[0],f[1]),error)

    if args.csvdir:
        for e in error:
            if e == 'null':
                error.remove(e)
        with open(args.csvdir, 'wb+') as fi:
            f = csv.writer(fi)
            f.writerow(["subject_site_id", "visit_date", "event_name", "error"])
            for x in error:
               f.writerow([x["subject_site_id"],
                           x["visit_date"],
                           x["event_name"],
                           x["error"]])
    else:
        for e in error:
            if e != 'null':
                #print json.dumps(e, sort_keys=True)
                #print "{}-{}".format(e['subject_site_id'], e['visit_date']), e['error'],e
                sibis.logging("{}-{}".format(e['subject_site_id'], e['visit_date']), e['error'],e_dictionary=e)
def get_all_gradients(session_label, dti_stack, decimals=None):
    """
    Parses a list of dti sidecar files for subject.

    Returns
    =======
    list of np.array
    """
    gradients_per_frame = list()
    for xml_path in dti_stack:
        xml_sidecar = read_xml_sidecar(xml_path)
        try:
            gradients_per_frame.append(get_gradient_table(xml_sidecar,
                                                     decimals=decimals))
            gradients_as_array = np.asanyarray(gradients_per_frame)
        except Exception as e:
            sibis.logging(session_label,
                          'ERROR: Could not get gradient table from xml sidecar',
                          script='xnat/check_gradient_tables.py',
                          sidecar=str(xml_sidecar),
                          xml_path=xml_path,
                          error=str(e))
    return gradients_as_array
def run_subject_qa( interface, project, subject, session, scan_number, dicom_path ):
    # Make a temporary directory
    temp_dir = tempfile.mkdtemp()

    # Make XML file as wrapper for the DICOM files
    bxh_file = '%s/dicoms.bxh' % temp_dir
    subprocess.call( 'dicom2bxh %s/* %s >& /dev/null' % ( dicom_path, bxh_file ), shell=True )
    if not os.path.exists( bxh_file ):
        error = "ERROR: BXH file was not created from DICOM files"
        sibis.logging(dicom_path,error,
                  bxh_file = bxh_file)
    return

    # Run the PERL QA script and capture its output
    html_dir = '%s/html' % temp_dir
    script_output = os.popen( 'fmriqa_generate.pl %s %s 2> /dev/null' % (bxh_file,html_dir) ).readlines()
    if not os.path.exists( html_dir ):
        error = "ERROR: html directory was not created from BXH file"
        sibis.logging(html_dir,error,
                      bxh_file = bxh_file)
        return

    # Convert QA results from html to pdf
    summary_file_path = '%s/QA-Summary.pdf' % temp_dir
    subprocess.call( 'htmldoc --webpage --browserwidth 1024 --no-title --no-toc --compression=9 --outfile %s %s/index.html >& /dev/null' % (summary_file_path,html_dir), shell=True )
    if os.path.exists( summary_file_path ):
        # Upload QA files to XNAT as file resources
        try:
            qa_file = interface.select.project( project ).subject( subject ).experiment( session ).resource('QA').file( 'QA-%s-Summary.pdf' % scan_number )
            qa_file.insert( summary_file_path, format='pdf', tags='qa,fbirn,pdf', content='QA Analysis Summary', overwrite=True )
        except:
            print "Something bad happened uploading QA summary file to Experiment %s" % session
    else:
        print "Unable to create PDF QA summary file %s" % summary_file_path

    # Clean up - remove temp directory
    shutil.rmtree( temp_dir )
def run_phantom_qa( interface, project, subject, session, label, dicom_path ):
    # Make a temporary directory
    temp_dir = tempfile.mkdtemp()

    # Switch to temp directory
    original_wd = os.getcwd()
    os.chdir(temp_dir)

    # Make XML file as wrapper for the DICOM files
    bxh_file = '%s.bxh' % session[7:]
    subprocess.call('dicom2bxh %s/* %s >& /dev/null' % ( dicom_path, bxh_file ), shell=True)
    if not os.path.exists( bxh_file ):
        error = "ERROR: BXH file was not created from DICOM files"
        sibis.logging(session,error,
                  bxh_file = bxh_file,
                  dicom_path = dicom_path)
        return

    # Run the PERL QA script and capture its output
    html_dir = './html'
    script_output = os.popen( 'fmriqa_phantomqa.pl %s %s 2> /dev/null' % (bxh_file,html_dir) ).readlines()
    if not os.path.exists( '%s/index.html' % html_dir ):
        error =  "ERROR: html file %s/index.html was not created from BXH file %s (session %s/%s)" % ( html_dir, bxh_file, session, label )
        qa_script_output = '\n'.join( script_output )
        sibis.logging('session {}/{}'.format(session, label),error,
                      html_dir = html_dir,
                      bxh_file = bxh_file,
                      qa_script_output = qa_script_output)
        return

    # Copy the entire output to a text file for upload to XNAT
    details_file_path = '%s/QA-Details.txt' % temp_dir
    script_output_file = open( details_file_path, 'w' )
    script_output_file.writelines( script_output )
    script_output_file.close()

    # Upload detail file to XNAT
    try:
        qa_file = interface.select.project( project ).subject( subject ).experiment( session ).resource('QA').file( 'QA-Details.txt' )
        qa_file.insert( details_file_path, format='text', tags='qa,fbirn,txt', content='QA Analysis Details', overwrite=True )
    except:
        print "Something bad happened uploading QA details file to Experiment %s/%s/%s" % (project,session,label)

    # Step through QA output, line by line, and check for measures for which we have thresholds defined.
    for line in script_output:
        # Parse current line into key=value pairs
        match = re.match( '^#([A-Za-z]+)=(.*)$', line )

        # Is this key in the list of thresholds?
        if match and (match.group(1) in QA_thresholds.keys()):
            value = float( match.group(2) )
            metric = QA_thresholds[match.group(1)]
            if metric.exceeds(value):
                error = 'QA metric fails to meet threshhold.'
                sibis.logging(session,error,
                              metric_name=metric._name,
                              metric_key=match.group(1),
                              metric_value=value,
                              metric_threshold=metric._thresh)

    # Convert QA results from html to pdf
    summary_file_path = '%s/QA-Summary.pdf' % temp_dir
    subprocess.call( 'htmldoc --quiet --webpage --no-title --no-toc --compression=9 --outfile %s %s/index.html' % (summary_file_path,html_dir), shell=True )
    if os.path.exists( summary_file_path ):
        # Upload QA files to XNAT as file resources
        try:
            qa_file = interface.select.project( project ).subject( subject ).experiment( session ).resource('QA').file( 'QA-Summary.pdf' )
            qa_file.insert( summary_file_path, format='pdf', tags='qa,fbirn,pdf', content='QA Analysis Summary', overwrite=True )
        except:
            print "Something bad happened uploading QA summary file to Experiment %s/%s" % (session,label)
    else:
        print "Unable to create PDF QA summary file %s from DICOMs in %s (session %s/%s)" % (summary_file_path, dicom_path, session, label )

    # Clean up - remove temp directory
    os.chdir( original_wd )
    shutil.rmtree( temp_dir )
def export_to_nifti(interface, project, subject, session, session_label,
                    manufacturer, scan, scantype, verbose=False):
    if verbose:
        print("Starting export of nifti files...")
    error_msg = []

    logfile_resource = '%s_%s/dcm2image.log' % (scan, scantype)
    xnat_log = interface.select.project(project).subject(subject).experiment(
        session).resource('nifti').file(logfile_resource)
    if not xnat_log.exists():
        match = re.match('.*(/fs/storage/XNAT/.*)scan_.*_catalog.xml.*',
                         interface.select.experiment(session).scan(scan).get(),
                         re.DOTALL)
        if match:
            dicom_path = match.group(1)
            if not os.path.exists(dicom_path):
                dicom_path = re.sub('storage/XNAT', 'ncanda-xnat', dicom_path)

            if not os.path.exists(dicom_path):
                error_msg.append(
                    "Path %s does not exist - export_to_nifti failed for SID:"
                    "%s EID:%s Label: %s!" % (dicom_path, subject, session,
                                              session_label))

            else:
                temp_dir = tempfile.mkdtemp()
                zip_path = '%s/%s_%s.zip' % (temp_dir, scan, scantype)

                log_filename = '%s/%s_%s/dcm2image.log' % (
                    temp_dir, scan, scantype)
                dcm2image_command = 'cmtk dcm2image --tolerance 1e-3 ' \
                                    '--write-single-slices --no-progress ' \
                                    '-rvxO %s/%s_%s/image%%n.nii %s 2>&1' % (
                                        temp_dir, scan, scantype, dicom_path)

                try:
                    output = subprocess.check_output(dcm2image_command,
                                                     shell=True)
                except:
                    error_msg.append(
                        "The following command failed: %s" % dcm2image_command)

                if len(error_msg) == 0:
                    output_file = open(log_filename, 'w')
                    try:
                        output_file.writelines(output)
                    finally:
                        output_file.close()

                    try:
                        fzip = zipfile.ZipFile(zip_path, 'w')
                        for src in sorted(glob.glob('%s/*/*' % temp_dir)):
                            fzip.write(src, re.sub('%s/' % temp_dir, '', src))
                        fzip.close()
                    except:
                        error_msg.append("Could not zip %s" % zip_path)

                if os.path.exists(zip_path):
                    try:
                        interface.select.project(project).subject(
                            subject).experiment(session).resource(
                            'nifti').put_zip(zip_path, overwrite=True,
                                             extract=True)
                    except:
                        error_msg.append(
                            "Unable to upload ZIP file %s to experiment %s" % (
                                zip_path, session))

                # Verify image counts for various series
                images_created = len(glob.glob('%s/*/*.nii.gz' % temp_dir))

                if images_created > 0:
                    manufacturer_u = manufacturer.upper()
                    verify_image_count(session, session_label, scan, scantype,
                                       manufacturer_u, images_created)
                    truth_gradient_map = cgt.get_ground_truth_gradients(session_label)

                    if manufacturer_u == 'SIEMENS':
                        truth_gradients_siemens = truth_gradient_map.get('Siemens')
                        numpy.array(truth_gradients_siemens)
                        if 'dti60b1000' in scantype:
                            xml_file_list = glob.glob(
                                os.path.join(temp_dir,
                                             '%s_%s' % (
                                                 scan,
                                                 scantype), 'image*.nii.xml'))
                            xml_file_list.sort()
                            errorsFrame = list()
                            errorsExpected = list()
                            errorsActual = list()
                            try:
                                evaluated_gradients = cgt.get_all_gradients(session_label,
                                                                            xml_file_list, decimals=3)
                                if len(evaluated_gradients) == len(truth_gradients_siemens):
                                    for idx, frame in enumerate(evaluated_gradients):
                                        # if there is a frame that doesn't match,
                                        # report it.
                                        if not (truth_gradients_siemens[idx] == frame).all():
                                            errorsFrame.append(idx)
                                            errorsActual.append(frame)
                                            errorsExpected.append(truth_gradients_siemens[idx])
                                else:
                                    sibis.logging(
                                        session_label,
                                        "ERROR: Incorrect number of frames.",
                                        case_gradients=str(evaluated_gradients),
                                        expected=str(truth_gradients_siemens),
                                        session=session)
                            except AttributeError as error:
                                sibis.logging(
                                    session_label,
                                    "Error: parsing XML files failed.",
                                    xml_file_list=str(xml_file_list),
                                    error=str(error),
                                    session=session)
                            if errorsFrame:
                                # key = os.path.join(case, args.arm, args.event
                                # , 'diffusion/native/dti60b1000')
                                sibis.logging(session_label,
                                    "Errors in dti601000 gradients for new sessions after comparing with ground_truth.",
                                    frames=str(errorsFrame),
                                    actualGradients=str(errorsActual),
                                    expectedGradients=str(errorsExpected))


                            xml_file = open(xml_file_list[0], 'r')
                            try:
                                for line in xml_file:
                                    match = re.match(
                                        '.*<phaseEncodeDirectionSign>(.+)'
                                        '</phaseEncodeDirectionSign>.*',
                                        line)
                                    if match and match.group(
                                            1).upper() != 'NEG':
                                        error_msg.append(
                                            "Scan %s of type %s in session %s "
                                            "has wrong PE sign %s (expected "
                                            "NEG)" % (scan,
                                                      scantype,
                                                      session_label,
                                                      match.group(1).upper()))

                            except:
                                error_msg.append(
                                    "Cannot read XML sidecar file for scan %s "
                                    "of session %s" % (scan, session_label))

                            finally:
                                xml_file.close()

                # Clean up - remove temp directory
                shutil.rmtree(temp_dir)

    return error_msg
Exemple #20
0
def export_to_nifti(interface,
                    project,
                    subject,
                    session,
                    session_label,
                    manufacturer,
                    scan,
                    scantype,
                    dry_run=False,
                    verbose=False):

    images_created = 0
    errorMSG = []

    logfile_resource = '%s_%s/dcm2image.log' % (scan, scantype)
    xnat_log = interface.select.project(project).subject(subject).experiment(
        session).resource('nifti').file(logfile_resource)
    if not xnat_log.exists():
        match = re.match('.*(/fs/storage/XNAT/.*)scan_.*_catalog.xml.*',
                         interface.select.experiment(session).scan(scan).get(),
                         re.DOTALL)
        if match:
            dicom_path = match.group(1)
            if not os.path.exists(dicom_path):
                dicom_path = re.sub('storage/XNAT', 'ncanda-xnat', dicom_path)

            if not os.path.exists(dicom_path):
                errorMSG.append(
                    "Path %s does not exist - export_to_nifti failed for SID:%s EID:%s Label: %s!"
                    % (dicom_path, subject, session, session_label))

            else:
                temp_dir = tempfile.mkdtemp()
                zip_path = '%s/%s_%s.zip' % (temp_dir, scan, scantype)

                logFileName = '%s/%s_%s/dcm2image.log' % (temp_dir, scan,
                                                          scantype)
                dcm2image_command = 'cmtk dcm2image --tolerance 1e-3 --write-single-slices --no-progress -rvxO %s/%s_%s/image%%n.nii %s 2>&1' % (
                    temp_dir, scan, scantype, dicom_path)

                try:
                    output = subprocess.check_output(dcm2image_command,
                                                     shell=True)
                except:
                    errorMSG.append("The following command failed: %s" %
                                    dcm2image_command)

                if len(errorMSG) == 0:
                    output_file = open(logFileName, 'w')
                    try:
                        output_file.writelines(output)
                    finally:
                        output_file.close()

                    try:
                        fzip = zipfile.ZipFile(zip_path, 'w')
                        for src in sorted(glob.glob('%s/*/*' % temp_dir)):
                            fzip.write(src, re.sub('%s/' % temp_dir, '', src))
                        fzip.close()
                    except:
                        errorMSG.append("Could not zip %s" % zip_path)

                if os.path.exists(zip_path):
                    try:
                        interface.select.project(project).subject(
                            subject).experiment(session).resource(
                                'nifti').put_zip(zip_path,
                                                 overwrite=True,
                                                 extract=True)
                    except:
                        errorMSG.append(
                            "Unable to upload ZIP file %s to experiment %s" %
                            (zip_path, session))

                # Verify image counts for various series
                images_created = len(glob.glob('%s/*/*.nii.gz' % temp_dir))

                if images_created > 0:
                    manufacturer_u = manufacturer.upper()
                    verify_image_count(session, session_label, scan, scantype,
                                       manufacturer_u, images_created)

                    if manufacturer_u == 'SIEMENS':
                        if 'dti6b500pepolar' in scantype:
                            xml_file_list = glob.glob(
                                os.path.join(temp_dir,
                                             '%s_%s' % (scan, scantype),
                                             'image*.nii.xml'))
                            case_gradients = check_gradient_tables.get_all_gradients(
                                xml_file_list, decimals=3)
                            errors = list()
                            for idx, frame in enumerate(case_gradients):
                                # if there is a frame that doesn't match, report it.
                                if not (gradients[idx] == frame).all():
                                    errors.append(idx)
                            if errors:
                                #key = os.path.join(case, args.arm, args.event, 'diffusion/native/dti60b1000')
                                key = session_label
                                sibis.logging(
                                    key,
                                    "ERROR: Gradient tables do not match for frames.",
                                    frames=errors,
                                    session=session)
                            xml_file = open(xml_file_list[0], 'r')
                            try:
                                for line in xml_file:
                                    match = re.match(
                                        '.*<phaseEncodeDirectionSign>(.+)</phaseEncodeDirectionSign>.*',
                                        line)
                                    if match and match.group(
                                            1).upper() != 'POS':
                                        errorMSG.append(
                                            "Scan %s of type %s in session %s has wrong PE sign %s (expected POS)"
                                            % (scan, scantype, session_label,
                                               match.group(1).upper()))

                            except:
                                errorMSG.append(
                                    "Cannot read XML sidecar file for scan %s of session %s"
                                    % (scan, session_label))

                            finally:
                                xml_file.close()
                        elif 'dti60b1000' in scantype:
                            xml_file_list = glob.glob(
                                os.path.join(temp_dir,
                                             '%s_%s' % (scan, scantype),
                                             'image*.nii.xml'))
                            case_gradients = check_gradient_tables.get_all_gradients(
                                xml_file_list, decimals=3)
                            errors = list()
                            for idx, frame in enumerate(case_gradients):
                                # if there is a frame that doesn't match, report it.
                                if not (gradients[idx] == frame).all():
                                    errors.append(idx)
                            if errors:
                                #key = os.path.join(case, args.arm, args.event, 'diffusion/native/dti60b1000')
                                key = session_label
                                sibis.logging(
                                    key,
                                    "ERROR: Gradient tables do not match for frames.",
                                    frames=errors,
                                    session=session)
                            xml_file = open(xml_file_list[0], 'r')
                            try:
                                for line in xml_file:
                                    match = re.match(
                                        '.*<phaseEncodeDirectionSign>(.+)</phaseEncodeDirectionSign>.*',
                                        line)
                                    if match and match.group(
                                            1).upper() != 'NEG':
                                        errorMSG.append(
                                            "Scan %s of type %s in session %s has wrong PE sign %s (expected NEG)"
                                            % (scan, scantype, session_label,
                                               match.group(1).upper()))

                            except:
                                errorMSG.append(
                                    "Cannot read XML sidecar file for scan %s of session %s"
                                    % (scan, session_label))

                            finally:
                                xml_file.close()

                # Clean up - remove temp directory
                shutil.rmtree(temp_dir)

    #for MSG in errorMSG :
    #    sibis.logging(subject, "ERROR: {}".format(MSG),
    #                  interface=interface,
    #                  project=project,
    #                  session=session,
    #                  session_label=session_label,
    #                  manufacturer=manufacturer,
    #                  scan=scan,
    #                  scantype=scantype)

    return errorMSG
def export_series(xnat,
                  session_and_scan_list,
                  to_directory,
                  filename_pattern,
                  verbose=False):
    # List should have at least one "SESSION/SCAN" entry
    if not '/' in session_and_scan_list:
        return False

    # Put together target directory and filename pattern
    to_path_pattern = os.path.join(to_directory, filename_pattern)

    # If filename is a pattern with substitution, check whether entire directory exists
    if '%' in filename_pattern:
        eid_file_path = os.path.join(to_directory, 'eid')
        if os.path.exists(to_directory):
            if check_eid_file(eid_file_path, session_and_scan_list):
                return False
    else:
        eid_file_path = re.sub('\.[^/]*', '.eid', to_path_pattern)
        if os.path.exists(to_path_pattern) or os.path.exists(to_path_pattern +
                                                             '.gz'):
            if check_eid_file(eid_file_path, session_and_scan_list):
                return False

    dicom_path_list = []
    for session_and_scan in session_and_scan_list.split(' '):
        [session, scan] = session_and_scan.split('/')
        match = re.match('.*(/fs/storage/XNAT/.*)scan_.*_catalog.xml.*',
                         xnat.select.experiment(session).scan(scan).get(),
                         re.DOTALL)
        if match:
            dicom_path = match.group(1)
            if not os.path.exists(dicom_path):
                dicom_path = re.sub('storage/XNAT', 'ncanda-xnat', dicom_path)

            dicom_path_list.append(dicom_path)

    dcm2image_output = None
    if len(dicom_path_list):
        try:
            dcm2image_command = 'cmtk dcm2image --tolerance 1e-3 --write-single-slices --no-progress -rxO %s %s 2>&1' % (
                to_path_pattern, ' '.join(dicom_path_list))

            if (verbose):
                print dcm2image_command

            dcm2image_output = subprocess.check_output(dcm2image_command,
                                                       shell=True)
        except:
            if dcm2image_output:
                output_file = open(to_path_pattern + '.log', 'w')
                try:
                    output_file.writelines(dcm2image_output)
                except:
                    print dcm2image_output
                finally:
                    output_file.close()
            return False

        try:
            open(eid_file_path, 'w').writelines(session_and_scan_list)
        except:
            error = "ERROR: unable to write EID file"
            sibis.logging(eid_file_path, error)

        return True
    return False
def gzip_physio(physio_file_path):
    try:
        subprocess.check_call(['gzip', '-9f', physio_file_path])
    except:
        error = "ERROR: unable to compress physio file"
        sibis.logging(physio_file_path, error)
def gzip_physio( physio_file_path ):
    try:
        subprocess.check_call( [ 'gzip', '-9f', physio_file_path ] )
    except:
        error = "ERROR: unable to compress physio file"
        sibis.logging(physio_file_path,error)
def check_excluded_subjects(excluded_subjects, pipeline_root_dir):
    for subject in excluded_subjects:
        subject_dir = os.path.join(pipeline_root_dir, subject)
        if os.path.exists(subject_dir):
            error = "ERROR: pipeline directory is from an *excluded* subject and should probable be deleted"
            sibis.logging(subject_dir, error)
def check_excluded_subjects( excluded_subjects, pipeline_root_dir ):
    for subject in excluded_subjects:
        subject_dir = os.path.join( pipeline_root_dir, subject )
        if os.path.exists( subject_dir ):
            error = "ERROR: pipeline directory is from an *excluded* subject and should probable be deleted"
            sibis.logging(subject_dir,error)
def export_to_nifti( interface, project, subject, session, session_label, manufacturer, scan, scantype, dry_run=False, verbose=False ):

    images_created = 0
    errorMSG = []

    logfile_resource = '%s_%s/dcm2image.log' % (scan,scantype)
    xnat_log = interface.select.project( project ).subject( subject ).experiment( session ).resource('nifti').file( logfile_resource )
    if not xnat_log.exists():
        match = re.match( '.*(/fs/storage/XNAT/.*)scan_.*_catalog.xml.*', interface.select.experiment( session ).scan( scan ).get(), re.DOTALL )
        if match:
            dicom_path = match.group(1)
            if not os.path.exists( dicom_path ):
                dicom_path = re.sub( 'storage/XNAT', 'ncanda-xnat', dicom_path )

            if not os.path.exists( dicom_path ):
                errorMSG.append("Path %s does not exist - export_to_nifti failed for SID:%s EID:%s Label: %s!" % (dicom_path,  subject, session, session_label))

            else :
                temp_dir = tempfile.mkdtemp()
                zip_path = '%s/%s_%s.zip' % (temp_dir, scan, scantype)

                logFileName='%s/%s_%s/dcm2image.log' % (temp_dir,scan,scantype)
                dcm2image_command = 'cmtk dcm2image --tolerance 1e-3 --write-single-slices --no-progress -rvxO %s/%s_%s/image%%n.nii %s 2>&1' % ( temp_dir, scan, scantype, dicom_path )

                try:
                    output = subprocess.check_output( dcm2image_command, shell=True )
                except:
                    errorMSG.append("The following command failed: %s" % dcm2image_command)

                if len(errorMSG)==0:
                    output_file = open( logFileName , 'w' )
                    try:
                        output_file.writelines( output )
                    finally:
                        output_file.close()

                    try:
                        fzip = zipfile.ZipFile( zip_path, 'w' )
                        for src in sorted( glob.glob( '%s/*/*' % temp_dir ) ):
                           fzip.write( src, re.sub( '%s/' % temp_dir, '', src ) )
                        fzip.close()
                    except:
                        errorMSG.append("Could not zip %s" % zip_path )


                if os.path.exists( zip_path ):
                    try:
                        interface.select.project( project ).subject( subject ).experiment( session ).resource('nifti').put_zip( zip_path, overwrite=True, extract=True )
                    except:
                        errorMSG.append("Unable to upload ZIP file %s to experiment %s" % (zip_path,session))

                # Verify image counts for various series
                images_created = len( glob.glob( '%s/*/*.nii.gz' % temp_dir ) )

                if images_created > 0:
                    manufacturer_u = manufacturer.upper()
                    verify_image_count( session, session_label, scan, scantype, manufacturer_u, images_created )

                    if manufacturer_u == 'SIEMENS':
                        if 'dti6b500pepolar' in scantype:
                            xml_file_list = glob.glob(os.path.join( temp_dir, '%s_%s' % (scan,scantype), 'image*.nii.xml' ))
                            case_gradients = check_gradient_tables.get_all_gradients(xml_file_list, decimals=3)
                            errors = list()
                            for idx, frame in enumerate(case_gradients):
                                # if there is a frame that doesn't match, report it.
                                if not (gradients[idx]==frame).all():
                                    errors.append(idx)
                            if errors:
                                #key = os.path.join(case, args.arm, args.event, 'diffusion/native/dti60b1000')
                                key = session_label
                                sibis.logging(key,"ERROR: Gradient tables do not match for frames.",
                                              frames=errors,
                                              session=session)
                            xml_file = open( xml_file_list[0], 'r' )
                            try:
                                for line in xml_file:
                                    match = re.match( '.*<phaseEncodeDirectionSign>(.+)</phaseEncodeDirectionSign>.*', line )
                                    if match and match.group(1).upper() != 'POS':
                                        errorMSG.append("Scan %s of type %s in session %s has wrong PE sign %s (expected POS)" % (scan, scantype, session_label, match.group(1).upper()))

                            except:
                                errorMSG.append("Cannot read XML sidecar file for scan %s of session %s" % (scan, session_label))

                            finally:
                                xml_file.close()
                        elif 'dti60b1000' in scantype:
                            xml_file_list = glob.glob(os.path.join( temp_dir, '%s_%s' % (scan,scantype), 'image*.nii.xml' ))
                            case_gradients = check_gradient_tables.get_all_gradients(xml_file_list, decimals=3)
                            errors = list()
                            for idx, frame in enumerate(case_gradients):
                                # if there is a frame that doesn't match, report it.
                                if not (gradients[idx]==frame).all():
                                    errors.append(idx)
                            if errors:
                                #key = os.path.join(case, args.arm, args.event, 'diffusion/native/dti60b1000')
                                key = session_label
                                sibis.logging(key,"ERROR: Gradient tables do not match for frames.",
                                              frames=errors,
                                              session=session)
                            xml_file = open( xml_file_list[0], 'r' )
                            try:
                                for line in xml_file:
                                    match = re.match( '.*<phaseEncodeDirectionSign>(.+)</phaseEncodeDirectionSign>.*', line )
                                    if match and match.group(1).upper() != 'NEG':
                                        errorMSG.append("Scan %s of type %s in session %s has wrong PE sign %s (expected NEG)" % (scan, scantype, session_label, match.group(1).upper()))

                            except:
                                errorMSG.append("Cannot read XML sidecar file for scan %s of session %s" % (scan, session_label))

                            finally:
                                xml_file.close()

                # Clean up - remove temp directory
                shutil.rmtree( temp_dir )

    #for MSG in errorMSG :
    #    sibis.logging(subject, "ERROR: {}".format(MSG),
    #                  interface=interface,
    #                  project=project,
    #                  session=session,
    #                  session_label=session_label,
    #                  manufacturer=manufacturer,
    #                  scan=scan,
    #                  scantype=scantype)

    return errorMSG
Exemple #27
0
def run_phantom_qa(interface, project, subject, session, label, dicom_path):
    # Make a temporary directory
    temp_dir = tempfile.mkdtemp()

    # Switch to temp directory
    original_wd = os.getcwd()
    os.chdir(temp_dir)

    # Make XML file as wrapper for the DICOM files
    bxh_file = '%s.bxh' % session[7:]
    subprocess.call('dicom2bxh %s/* %s >& /dev/null' % (dicom_path, bxh_file),
                    shell=True)
    if not os.path.exists(bxh_file):
        error = "ERROR: BXH file was not created from DICOM files"
        sibis.logging(session, error, bxh_file=bxh_file, dicom_path=dicom_path)
        return

    # Run the PERL QA script and capture its output
    html_dir = './html'
    script_output = os.popen('fmriqa_phantomqa.pl %s %s 2> /dev/null' %
                             (bxh_file, html_dir)).readlines()
    if not os.path.exists('%s/index.html' % html_dir):
        error = "ERROR: html file %s/index.html was not created from BXH file %s (session %s/%s)" % (
            html_dir, bxh_file, session, label)
        qa_script_output = '\n'.join(script_output)
        sibis.logging('session {}/{}'.format(session, label),
                      error,
                      html_dir=html_dir,
                      bxh_file=bxh_file,
                      qa_script_output=qa_script_output)
        return

    # Copy the entire output to a text file for upload to XNAT
    details_file_path = '%s/QA-Details.txt' % temp_dir
    script_output_file = open(details_file_path, 'w')
    script_output_file.writelines(script_output)
    script_output_file.close()

    # Upload detail file to XNAT
    try:
        qa_file = interface.select.project(project).subject(
            subject).experiment(session).resource('QA').file('QA-Details.txt')
        qa_file.insert(details_file_path,
                       format='text',
                       tags='qa,fbirn,txt',
                       content='QA Analysis Details',
                       overwrite=True)
    except:
        print "Something bad happened uploading QA details file to Experiment %s/%s/%s" % (
            project, session, label)

    # Step through QA output, line by line, and check for measures for which we have thresholds defined.
    for line in script_output:
        # Parse current line into key=value pairs
        match = re.match('^#([A-Za-z]+)=(.*)$', line)

        # Is this key in the list of thresholds?
        if match and (match.group(1) in QA_thresholds.keys()):
            value = float(match.group(2))
            metric = QA_thresholds[match.group(1)]
            if metric.exceeds(value):
                error = 'QA metric fails to meet threshhold.'
                sibis.logging(session,
                              error,
                              metric_name=metric._name,
                              metric_key=match.group(1),
                              metric_value=value,
                              metric_threshold=metric._thresh)

    # Convert QA results from html to pdf
    summary_file_path = '%s/QA-Summary.pdf' % temp_dir
    subprocess.call(
        'htmldoc --quiet --webpage --no-title --no-toc --compression=9 --outfile %s %s/index.html'
        % (summary_file_path, html_dir),
        shell=True)
    if os.path.exists(summary_file_path):
        # Upload QA files to XNAT as file resources
        try:
            qa_file = interface.select.project(project).subject(
                subject).experiment(session).resource('QA').file(
                    'QA-Summary.pdf')
            qa_file.insert(summary_file_path,
                           format='pdf',
                           tags='qa,fbirn,pdf',
                           content='QA Analysis Summary',
                           overwrite=True)
        except:
            print "Something bad happened uploading QA summary file to Experiment %s/%s" % (
                session, label)
    else:
        print "Unable to create PDF QA summary file %s from DICOMs in %s (session %s/%s)" % (
            summary_file_path, dicom_path, session, label)

    # Clean up - remove temp directory
    os.chdir(original_wd)
    shutil.rmtree(temp_dir)
def export_to_nifti(interface, project, subject, session, session_label, manufacturer, scan, scantype, verbose=False):
    if verbose:
        print("Starting export of nifti files...")
    error_msg = []

    logfile_resource = "%s_%s/dcm2image.log" % (scan, scantype)
    xnat_log = (
        interface.select.project(project).subject(subject).experiment(session).resource("nifti").file(logfile_resource)
    )
    if not xnat_log.exists():
        match = re.match(
            ".*(/fs/storage/XNAT/.*)scan_.*_catalog.xml.*",
            interface.select.experiment(session).scan(scan).get(),
            re.DOTALL,
        )
        if match:
            dicom_path = match.group(1)
            if not os.path.exists(dicom_path):
                dicom_path = re.sub("storage/XNAT", "ncanda-xnat", dicom_path)

            if not os.path.exists(dicom_path):
                error_msg.append(
                    "Path %s does not exist - export_to_nifti failed for SID:"
                    "%s EID:%s Label: %s!" % (dicom_path, subject, session, session_label)
                )

            else:
                temp_dir = tempfile.mkdtemp()
                zip_path = "%s/%s_%s.zip" % (temp_dir, scan, scantype)

                log_filename = "%s/%s_%s/dcm2image.log" % (temp_dir, scan, scantype)
                dcm2image_command = (
                    "cmtk dcm2image --tolerance 1e-3 "
                    "--write-single-slices --no-progress "
                    "-rvxO %s/%s_%s/image%%n.nii %s 2>&1" % (temp_dir, scan, scantype, dicom_path)
                )

                try:
                    output = subprocess.check_output(dcm2image_command, shell=True)
                except:
                    error_msg.append("The following command failed: %s" % dcm2image_command)

                if len(error_msg) == 0:
                    output_file = open(log_filename, "w")
                    try:
                        output_file.writelines(output)
                    finally:
                        output_file.close()

                    try:
                        fzip = zipfile.ZipFile(zip_path, "w")
                        for src in sorted(glob.glob("%s/*/*" % temp_dir)):
                            fzip.write(src, re.sub("%s/" % temp_dir, "", src))
                        fzip.close()
                    except:
                        error_msg.append("Could not zip %s" % zip_path)

                if os.path.exists(zip_path):
                    try:
                        interface.select.project(project).subject(subject).experiment(session).resource(
                            "nifti"
                        ).put_zip(zip_path, overwrite=True, extract=True)
                    except:
                        error_msg.append("Unable to upload ZIP file %s to experiment %s" % (zip_path, session))

                # Verify image counts for various series
                images_created = len(glob.glob("%s/*/*.nii.gz" % temp_dir))

                if images_created > 0:
                    manufacturer_u = manufacturer.upper()
                    verify_image_count(session, session_label, scan, scantype, manufacturer_u, images_created)
                    gradient_map = cgt.get_ground_truth_gradients()

                    if manufacturer_u == "SIEMENS":
                        gradients = gradient_map.get("Siemens")
                        if "dti6b500pepolar" in scantype:
                            xml_file_list = glob.glob(
                                os.path.join(temp_dir, "%s_%s" % (scan, scantype), "image*.nii.xml")
                            )
                            case_gradients = cgt.get_all_gradients(xml_file_list, decimals=3)
                            errors = list()
                            if len(case_gradients) == len(gradients):
                                for idx, frame in enumerate(case_gradients):
                                    # if there is a frame that doesn't match,
                                    # report it.
                                    if not (gradients[idx] == frame).all():
                                        errors.append(idx)
                            else:
                                sibis.logging(
                                    session_label,
                                    "ERROR: Incorrect number of frames.",
                                    case_gradients=case_gradients,
                                    expected=gradients,
                                    session=session,
                                )
                            if errors:
                                key = session_label
                                sibis.logging(
                                    key,
                                    "ERROR: Gradient tables do not " "match for frames.",
                                    frames=errors,
                                    session=session,
                                )
                            xml_file = open(xml_file_list[0], "r")
                            try:
                                for line in xml_file:
                                    match = re.match(
                                        ".*<phaseEncodeDirectionSign>(.+)" "</phaseEncodeDirectionSign>.*", line
                                    )
                                    if match and match.group(1).upper() != "POS":
                                        error_msg.append(
                                            "Scan %s of type %s in session %s "
                                            "has wrong PE sign %s (expected "
                                            "POS)" % (scan, scantype, session_label, match.group(1).upper())
                                        )

                            except:
                                error_msg.append(
                                    "Cannot read XML sidecar file for scan %s " "of session %s" % (scan, session_label)
                                )

                            finally:
                                xml_file.close()
                        elif "dti60b1000" in scantype:
                            xml_file_list = glob.glob(
                                os.path.join(temp_dir, "%s_%s" % (scan, scantype), "image*.nii.xml")
                            )
                            case_gradients = cgt.get_all_gradients(xml_file_list, decimals=3)
                            errors = list()
                            if len(case_gradients) == len(gradients):
                                for idx, frame in enumerate(case_gradients):
                                    # if there is a frame that doesn't match,
                                    # report it.
                                    if not (gradients[idx] == frame).all():
                                        errors.append(idx)
                            else:
                                sibis.logging(
                                    session_label,
                                    "ERROR: Incorrect number of frames.",
                                    case_gradients=case_gradients,
                                    expected=gradients,
                                    session=session,
                                )
                            if errors:
                                # key = os.path.join(case, args.arm, args.event,
                                # 'diffusion/native/dti60b1000')
                                key = session_label
                                sibis.logging(
                                    key,
                                    "ERROR: Gradient tables do not match for " "frames.",
                                    frames=errors,
                                    session=session,
                                )
                            xml_file = open(xml_file_list[0], "r")
                            try:
                                for line in xml_file:
                                    match = re.match(
                                        ".*<phaseEncodeDirectionSign>(.+)" "</phaseEncodeDirectionSign>.*", line
                                    )
                                    if match and match.group(1).upper() != "NEG":
                                        error_msg.append(
                                            "Scan %s of type %s in session %s "
                                            "has wrong PE sign %s (expected "
                                            "NEG)" % (scan, scantype, session_label, match.group(1).upper())
                                        )

                            except:
                                error_msg.append(
                                    "Cannot read XML sidecar file for scan %s " "of session %s" % (scan, session_label)
                                )

                            finally:
                                xml_file.close()

                # Clean up - remove temp directory
                shutil.rmtree(temp_dir)

    return error_msg
def export(redcap_project, site, subject, event, subject_data, visit_age,
           visit_data, arm_code, visit_code, subject_code, subject_datadir,
           forms_this_event, select_exports=None, verbose=False):

    # Mark subjects/visits that have QA completed by creating a hidden marker
    # file
    qafile_path = os.path.join(subject_datadir, '.qacomplete')
    if visit_data['mri_qa_completed'] == '1':
        try:
            if not os.path.exists(qafile_path):
                qafile = open(qafile_path, 'w')
                qafile.close()
        except IOError as error:
            print("ERROR: unable to open QA marker file in {}. {}".format(
                subject_datadir, error))
    else:
        try:
            if os.path.exists(qafile_path):
                os.remove(qafile_path)
        except OSError as error:
            print("ERROR: unable to remove QA marker file {}. {}".format(
                qafile_path, error))

    # Check if the "measures" subdirectory already exists - this is where all
    # the csv files go. Create it if necessary.
    measures_dir = os.path.join(subject_datadir, 'measures')
    if not os.path.exists(measures_dir):
        os.makedirs(measures_dir)

    # Export demographics (if selected)
    if not select_exports or 'demographics' in select_exports:
        # Create "demographics" file "by hand" - this has some data not (yet)
        # in REDCap.

        # Latino and race coding arrives here as floating point numbers; make
        # int strings from that (cannot use "int()" because it would fail for
        # missing data
        hispanic_code = re.sub('(.0)|(nan)', '', str(subject_data['hispanic']))
        race_code = re.sub('(.0)|(nan)', '', str(subject_data['race']))

        # scanner manufacturer map
        mfg = dict(A='siemens', B='ge', C='ge', D='siemens', E='ge')
        scanner_mfg = ''
        scanner_model = ''
        mri_scanner = str(visit_data['mri_scanner'])
        if mri_scanner != 'nan' :
            mri_scanner= mri_scanner.upper()

            if 'DISCOVERY MR750' in mri_scanner :
                scanner_mfg = 'ge' 
                scanner_model = 'MR750'
            elif 'PRISMA_FIT' in mri_scanner :
                scanner_mfg = 'siemens' 
                scanner_model = 'Prisma_Fit'
            elif 'TRIOTRIM' in mri_scanner or 'TRIOTIM' in mri_scanner:
                scanner_mfg = 'siemens' 
                scanner_model = 'TrioTim'
            else :
                sibis.logging(subject, "Error: Do not know scanner type",
                              script='export_redcap_to_pipeline.py',
                              mri_scanner = visit_data['mri_scanner'],
                              subject = subject_code,
                              arm = arm_code,
                              visit = visit_code)

        demographics = [
            ['subject', subject_code],
            ['arm', arm_code],
            ['visit', visit_code],
            ['site', site],
            ['sex', subject[8]],
            ['visit_age', truncate_age(visit_age)],
            ['mri_structural_age', truncate_age(visit_data['mri_t1_age'])],
            ['mri_diffusion_age', truncate_age(visit_data['mri_dti_age'])],
            ['mri_restingstate_age',
             truncate_age(visit_data['mri_rsfmri_age'])],
            ['exceeds_bl_drinking',
             'NY'[int(subject_data['enroll_exception___drinking'])]],
            ['siblings_enrolled_yn',
             'NY'[int(subject_data['siblings_enrolled___true'])]],
            ['siblings_id_first', subject_data['siblings_id1']],
            ['hispanic', code_to_label_dict['hispanic'][hispanic_code][0:1]],
            ['race', race_code],
            ['race_label', code_to_label_dict['race'][race_code]],
            ['participant_id', subject],
            ['scanner', scanner_mfg], 
            ['scanner_model', scanner_model],
        ]

        if race_code == '6':
            # if other race is specified, mark race label with manually curated
            # race code
            demographics[14] = ('race_label', subject_data['race_other_code'])

        series = pandas.Series()
        for (key, value) in demographics:
            series = series.set_value(key, value)

        safe_csv_export(pandas.DataFrame(series).T,
                        os.path.join(measures_dir, 'demographics.csv'),
                        verbose=verbose)

    # First get data for all fields across all forms in this event - this
    # speeds up transfers over getting each form separately
    all_fields = ['study_id']
    export_list = []
    for export_name in export_forms.keys():
        if (import_forms[export_name] in forms_this_event) \
                and (not select_exports or export_name in select_exports):
            all_fields += [re.sub('___.*', '', field_name) for field_name in
                           export_forms[export_name]]
            export_list.append(export_name)

    all_records = redcap_project.export_records(fields=all_fields,
                                                records=[subject],
                                                events=[event],
                                                format='df')

    # Now go form by form and export data
    for export_name in export_list:
        # Remove the complete field from the list of forms
        complete = '{}_complete'.format(import_forms.get(export_name))
        fields = [column for column in export_forms.get(export_name)
                  if column != complete]

        # Select data for this form - "reindex_axis" is necessary to put
        # fields in listed order - REDCap returns them lexicographically sorted
        fields = [i for i in fields if i not in ['subject', 'arm', 'visit']]
        record = all_records[fields].reindex_axis(fields, axis=1)

        if len(record) == 1:
            # First, add the three index columns
            record.insert(0, 'subject', subject_code)
            record.insert(1, 'arm', arm_code)
            record.insert(2, 'visit', visit_code)

            field_idx = 0
            output_fields = []
            for field in record.columns:
                # Rename field for output if necessary
                if field in export_rename[export_name].keys():
                    output_field = export_rename[export_name][field]
                else:
                    output_field = field
                output_fields.append(output_field)

                # If this is an "age" field, truncate to 2 digits for privacy
                if re.match('.*_age$', field):
                    record[field] = record[field].apply(truncate_age)

                # If this is a radio or dropdown field
                # (except "FORM_[missing_]why"), add a separate column for the
                # coded label
                if field in code_to_label_dict.keys() and not re.match(
                        '.*_why$', field):
                    code = str(record[field].ix[0])
                    label = ''
                    if code in code_to_label_dict[field].keys():
                        label = code_to_label_dict[field][code]
                    field_idx += 1
                    record.insert(field_idx, output_field + '_label', label)
                    output_fields.append(output_field + '_label')

                field_idx += 1

            # Apply renaming to columns
            record.columns = output_fields

            # Figure out path for CSV file and export this record
            safe_csv_export(record,
                            os.path.join(measures_dir, export_name + '.csv'),
                            verbose=verbose)