コード例 #1
0
def process_phantom_session( interface, project, subject, session, label, force_updates=False ):
    # Get the experiment object
    experiment = interface.select.experiment( session )
    # First, see if the QA files are already there
    files = experiment.resources().files().get()
    if force_updates or not (('t1.nii.gz' in files) and ('phantom.xml' in files) and ('phantom.nii.gz' in files)):
        dicom_path=''

        # Get list of all scans in the session
        scans = experiment.scans().get()
        for scan in scans:
            # Check only 'usable' scans with the proper name
            [scan_type,quality] = experiment.scan( scan ).attrs.mget( ['type', 'quality'] )
            if ('mprage' in scan_type) or ('t1spgr' in scan_type):
                # Extract the DICOM file directory from the XML representation
                match = re.match( '.*(/fs/storage/XNAT/.*)scan_.*_catalog.xml.*', experiment.scan( scan ).get(), re.DOTALL )
                if match:
                    dicom_path = match.group(1)

        if dicom_path:
            # If we found a matching scan, run the QA
            run_phantom_qa( interface, project, subject, session, label, dicom_path )
        else:
            # If there was no matching scan in the session, print a warning
            warning = "WARNING: ADNI phantom session: {}, experiment: {}, subject: {} does not have \
                       a usable T1-weighted scan".format(session, experiment, subject)
            slog.info(hashlib.sha1('t1_qa_functions').hexdigest()[0:6], warning,
                          script='t1_qa_functions')
コード例 #2
0
def check_xml_file( xml_file, project, session, label ):
    xml = open( xml_file, 'r' )

    warnings = []
    try:
        for line in xml:
            # Check fallbacks triggered
            if 'fallbackOrientationCNR' in line:
                warnings.append( "CNR spheres used for orientation - problem detecting 15mm spheres?" )
            if 'fallbackCentroidCNR' in line:
                match = re.match( '^.*distance="([0-9]+\.[0-9]+)".*$', line )
                distance = float( match.group(1) )
                if distance > 3.0:
                    warnings.append( "CNR spheres used for centroid location (distance to SNR center = %f mm) - problem with the SNR sphere?" % distance )

            # Check number of landmarks
            match = re.match( '<landmarkList.*count="([0-9]+)">', line )
            if match:
                count = int( match.group(1) )
                if ( count < 165 ):
                    warnings.append( "Landmark count=%d" % (project,session,count) )

            # Check SNR
            match = re.match( '<snr>([0-9]*\.[0-9]*)</snr>', line )
            if match:
                snr = float( match.group(1) )
                if ( snr < 50 ):
                    warnings.append( "Low SNR=%f" % (project,session,snr) )

            # Check scale
            match = re.match( '<scale>([0-9]*\.[0-9]*)\s+([0-9]*\.[0-9]*)\s+([0-9]*\.[0-9]*)</scale>', line )
            if match:
                for idx in [0,1,2]:
                    scale = float( match.group( idx+1 ) )
                    if ( (scale < 0.99) or (scale > 1.01) ):
                        warnings.append( "Non-unit scale[%d]=%f" % (project,session,idx,scale) )

            # Check nonlinearity
            match = re.match( '<nonlinear>([0-9]*\.[0-9]*)\s+([0-9]*\.[0-9]*)\s+([0-9]*\.[0-9]*)</nonlinear>', line )
            if match:
                for idx in [0,1,2]:
                    nonlinear = float( match.group( idx+1 ) )
                    if ( (nonlinear > 0.5) ):
                        warnings.append( "Nonlinearity[%d]=%f" % (project,session,idx,nonlinear) )
    except:
         error='Could not open XML file for experiment.'
         slog.info(session,error,
                     project_id=project)


    finally:
        xml.close()

    # Print warnings if there were any
    if len( warnings ) > 0:
        warning = " ".join(warnings)
        slog.info(label, warning,
                      session_id=session,
                      project=project,
                      script='t1_qa_functions')
コード例 #3
0
def check_for_stroop(xnat, xnat_eid_list, verbose=False):
    stroop_files = []
    if verbose:
        print "check_for_stroop: " + str(xnat_eid_list)

    for xnat_eid in xnat_eid_list:
        experiment = xnat.select.experiment(xnat_eid)

        # Get list of resource files that match the Stroop file name pattern
        for resource in experiment.resources().get():
            resource_files = xnat._get_json(
                '/data/experiments/%s/resources/%s/files?format=json' %
                (xnat_eid, resource))
            stroop_files += [
                (xnat_eid, resource, re.sub('.*\/files\/', '', file['URI']))
                for file in resource_files if re.match(
                    '^NCANDAStroopMtS_3cycles_7m53stask_.*.txt$', file['Name'])
            ]

    # No matching files - nothing to do
    if len(stroop_files) == 0:
        if verbose:
            print "check_for_stroop: no stroop"
        return (None, None, None)

    # Get first file from list, warn if more files
    if len(stroop_files) > 1:
        error = "ERROR: experiment have/has more than one Stroop .txt file. Please make sure there is exactly one per session."
        for xnat_eid in xnat_eid_list:
            slog.info(xnat_eid, error)
        return (None, None, None)
    if verbose:
        print "check_for_stroop: Stroop File: " + str(stroop_files[0])

    return stroop_files[0]
コード例 #4
0
ファイル: session.py プロジェクト: sibis-platform/sibis
    def connect_server(self,api_type, timeFlag=False):
        """
        Connect to servers, setting each property.
        """
        if api_type not in self.api :
            slog.info('session.connect_server','api type ' + api_type + ' not defined !',
                      api_types = str(list(self.api.keys())))
            return None

        if timeFlag : 
            slog.startTimer2() 

        if api_type == 'xnat' :
            connectionPtr = self.__connect_xnat__()
        elif api_type == 'xnat_http' :
            connectionPtr = self.__connect_xnat_http__()
        elif api_type == 'browser_penncnp' : 
            connectionPtr = self.__connect_penncnp__()
        elif api_type == 'svn_laptop' : 
            connectionPtr = self.__connect_svn_laptop__()
        elif api_type == 'redcap_mysql_db' : 
            connectionPtr = self.__connect_redcap_mysql__()
        else :
            connectionPtr = self.__connect_redcap_project__(api_type)
            
        if timeFlag : 
            slog.takeTimer2('connect_' + api_type) 

        return connectionPtr
def check_for_stroop( xnat, xnat_eid_list, verbose=False ):
    stroop_files = []
    if verbose : 
        print "check_for_stroop: " + str(xnat_eid_list)

    for xnat_eid in xnat_eid_list:
        experiment = xnat.select.experiment( xnat_eid )

        # Get list of resource files that match the Stroop file name pattern
        for resource in  experiment.resources().get():
            resource_files = xnat._get_json( '/data/experiments/%s/resources/%s/files?format=json' % ( xnat_eid, resource ) );
            stroop_files += [ (xnat_eid, resource, re.sub( '.*\/files\/', '', file['URI']) ) for file in resource_files if re.match( '^NCANDAStroopMtS_3cycles_7m53stask_.*.txt$', file['Name'] ) ]

    # No matching files - nothing to do
    if len( stroop_files ) == 0:
        if verbose : 
            print "check_for_stroop: no stroop"
        return (None, None, None)

    # Get first file from list, warn if more files
    if len( stroop_files ) > 1:
        error = "ERROR: experiment have/has more than one Stroop .txt file. Please make sure there is exactly one per session."
        for xnat_eid in xnat_eid_list:
            slog.info(xnat_eid,error)
	return (None, None, None)
    if verbose : 
        print "check_for_stroop: Stroop File: " + str(stroop_files[0])

    return stroop_files[0]
コード例 #6
0
ファイル: session.py プロジェクト: sibis-platform/sibis
    def xnat_export_general(self,form, fields, conditions, time_label = None): 
        xnat_api = self.__get_xnat_api__()
        if not xnat_api: 
            return None

        if time_label:
            slog.startTimer2() 
        try:
            #  python if one cannot connect to server then 
            with Capturing() as xnat_output: 
                xnat_data = list(xnat_api.search(form, fields).where(conditions).items())
        
        except Exception as err_msg:
            if xnat_output : 
                slog.info("session.xnat_export_general","ERROR: querying XNAT failed most likely due disconnect to server ({})".format(time.asctime()),
                          xnat_api_output = str(xnat_output),
                          form = str(form),
                          fields = str(fields),
                          conditions = str(conditions),
                          err_msg = str(err_msg))
            else :                
                slog.info("session.xnat_export_general","ERROR: querying XNAT failed at {}".format(time.asctime()),
                      form = str(form),
                      fields = str(fields),
                      conditions = str(conditions),
                      err_msg = str(err_msg))
            return None

        if time_label:
            slog.takeTimer2("xnat_export_" + time_label) 
        
        return xnat_data
コード例 #7
0
ファイル: session.py プロジェクト: sibis-platform/sibis
 def __get_analysis_dir(self) :
     analysis_dir = self.__config_usr_data.get_value('analysis_dir')
     if analysis_dir == None :
         slog.info("session.__get_analysis_dir-" + hashlib.sha1(str(self.__config_usr_data.get_config_file()).encode('utf-8')).hexdigest()[0:6],"ERROR: 'analysis_dir' is not defined in config file !",
                   config_file = self.__config_usr_data.get_config_file())
         
     return  analysis_dir
コード例 #8
0
ファイル: session.py プロジェクト: sibis-platform/sibis
    def redcap_import_record_to_api(self, records, api_type, error_label, time_label = None): 
        if len(records) == 0 : 
            return None

        if api_type == None :
            api_type = self.__active_redcap_project__

        if api_type in self.api :
            red_api = self.api[api_type]
        else :
            return None
            
        if not red_api: 
            return None

        if time_label:
            slog.startTimer2() 
        try:
            import_response = red_api.import_records(records, overwrite='overwrite')

        except requests.exceptions.RequestException as e:
            error = 'session:redcap_import_record:Failed to import into REDCap' 
            err_list = ast.literal_eval(str(e))['error'].split('","')
            error_label  += '-' + hashlib.sha1(str(e).encode('utf-8')).hexdigest()[0:6] 

            slog.info(error_label, error,
                      requestError=str(e), 
                      red_api = api_type)
            return None

        if time_label:
            slog.takeTimer2("redcap_import_" + time_label, str(import_response)) 
        
        return import_response
コード例 #9
0
ファイル: session.py プロジェクト: sibis-platform/sibis
    def redcap_export_records_from_api(self, time_label, api_type, **selectStmt):
        if api_type == None :
            red_api =  self.__get_active_redcap_api__()
        else :
            if api_type in self.api :
                red_api = self.api[api_type]
            else :
                return None
            
        if not red_api: 
            return None

        if time_label:
            slog.startTimer2() 
        try:
            with warnings.catch_warnings(record=True) as w:
                redcap_data = red_api.export_records(**selectStmt)
            if len(w):
                w_str = str(w[-1])
                if "Specify dtype option on import or set low_memory=False"  not in w_str : 
                    slog.info("session.redcap_export_records","Waring: exporting data from REDCap caused warning at {}".format(time.asctime()),
                              warning_msg = w_msg,
                              **selectStmt)

        except Exception as err_msg:
            slog.info("session.redcap_export_records","ERROR: exporting data from REDCap failed at {}".format(time.asctime()),
                      err_msg = str(err_msg),
                      **selectStmt)
            return None

        if time_label:
            slog.takeTimer2("redcap_export_" + time_label) 
        
        return redcap_data
コード例 #10
0
def copy_sanitize(redcap_visit_id, eprime_in, eprime_out):
    # List of "banned" ePrime log file keys - these are removed from the file while copying
    banned_keys = [
        'name', 'age', 'sessiondate', 'sessiontimeutc', 'subject', 'session',
        'clock.information'
    ]

    try:
        infile = codecs.open(eprime_in, 'Ur', 'utf-16')
        try:
            outfile = open(eprime_out, 'w')

            for line in infile.readlines():
                match = re.match('^\s*([^:]+):.*$', line)
                if not (match and (match.group(1).lower() in banned_keys)):
                    outfile.write(line)

            outfile.close()
        except:
            slog.info(redcap_visit_id,
                      "ERROR: failed to open output file " + str(eprime_out))

        infile.close()

    except:
        slog.info(redcap_visit_id,
                  "ERROR: failed to open input file " + str(eprime_in))
コード例 #11
0
    def __check_all_forms__(self):
        # Filter each form
        text_list = list()
        non_redcap_list = list()
        for export_name in list(self.__export_forms.keys()):
            (form_text_list, form_non_redcap_list)  = self.__check_form__(export_name)
            if form_text_list :
                text_list += form_text_list
            if form_non_redcap_list:
                non_redcap_list += form_non_redcap_list

        if text_list:
            slog.info('redcap_to_casesdir.__check_all_forms__.' + hashlib.sha1(str(text_list).encode()).hexdigest()[0:6], "ERROR: The txt file(s) in '" + str(self.__forms_dir) + "' list non-numeric redcap variable names!",
                      form_variable_list = str(text_list),
                      info = "Remove it from form file or modify definition in REDCap")

        if non_redcap_list :
            slog.info('redcap_to_casesdir.__check_all_forms__.' +  hashlib.sha1(str(text_list).encode()).hexdigest()[0:6], "ERROR: The txt file(s) in '" + str(self.__forms_dir) + "' list variables that do not exist in redcap!",
                      form_variable_list = str(non_redcap_list),
                      info = "Remove it from form or modify definition REDCap")

        if non_redcap_list or text_list:
            return False

        return True
コード例 #12
0
    def schedule_cluster_job(self,job_script, job_title,submit_log=None, job_log=None, verbose=False):
        qsub_cmd= '/opt/sge/bin/lx-amd64/qsub'
        if not os.path.exists(qsub_cmd):
            slog.info(job_title + "-" +hashlib.sha1(str(job_script).encode('utf-8')).hexdigest()[0:6],"ERROR: Failed to schedule job as '" + qsub_cmd + "' cannot be found!", job_script = str(job_script))
            return False

        sge_env = os.environ.copy()
        sge_env['SGE_ROOT'] = '/opt/sge'
        sge_param = self.__sibis_defs['cluster_parameters'].split(',')
        if job_log :
            sge_param += ['-o', job_log]
        else :
            sge_param += ['-o','/dev/null']

        qsub_args= [ qsub_cmd ] + sge_param + ['-N', '%s' % (job_title) ]
        #stderr=subprocess.STDOUT
        qsub_process = subprocess.Popen( qsub_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr= subprocess.PIPE, env=sge_env)
        (stdoutdata, stderrdata) = qsub_process.communicate(str(job_script).encode('utf-8'))

        cmd_str='echo "%s" | %s\n' % (job_script," ".join(qsub_args))
        if stderrdata :
            slog.info(job_title + "-" + hashlib.sha1(str(stderrdata).encode('utf-8')).hexdigest()[0:6],"ERROR: Failed to schedule job !", cmd = cmd_str, err_msg = str(stderrdata))
            return False

        if verbose:
            print(cmd_str)
            if stdoutdata:
                print(stdoutdata.decode('utf-8'))

        if submit_log:
            with open(submit_log, "a") as myfile:
               myfile.write(cmd_str)
               myfile.write(stdoutdata.decode('utf-8'))

        return True
コード例 #13
0
def get_all_gradients(session_label, dti_stack, decimals=None):
    """
    Parses a list of dti sidecar files for subject.

    Returns
    =======
    list of np.array
    """
    gradients_per_frame = list()
    gradients_as_array = np.asanyarray([])

    error_xml_path_list=[] 
    error_msg=""

    for xml_path in dti_stack:
        xml_sidecar = read_xml_sidecar(xml_path)
        try:
            gradients_per_frame.append(get_gradient_table(xml_sidecar,
                                                     decimals=decimals))
            gradients_as_array = np.asanyarray(gradients_per_frame)
        except Exception as e:
            error_xml_path_list.append(xml_path)
            error_msg=str(e)

    if error_xml_path_list != [] :
        slog.info(session_label,
                      'ERROR: Could not get gradient table from xml sidecar',
                      script='xnat/check_gradient_tables.py',
                      sidecar=str(xml_sidecar),
                      error_xml_path_list=str(error_xml_path_list),
                      error_msg=error_msg)
    return gradients_as_array
コード例 #14
0
    def mail_user( self, uid, msglist ):
        # Get user full name and email address
	try:
            user_firstname = self._interface.manage.users.firstname( uid )
            user_lastname = self._interface.manage.users.lastname( uid )
            user_email = self._interface.manage.users.email( uid )
	except:
	    slog.info('xnat_email',"ERROR: failed to get detail information for user" + str(uid))
	    return
                
        problem_list = [ '<ol>' ]
        for m in msglist:
            problem_list.append( '<li>%s</li>' % m )
        problem_list.append( '</ol>' )
            
        # Create the body of the message (a plain-text and an HTML version).
        html = '<html>\n\
<head></head>\n\
<body>\n\
<p>Dear %s %s:<br><br>\n\
We have detected the following problem(s) with data you uploaded to the <a href="%s">%s XNAT image repository</a>:</br>\n\
%s\n\
Please address these issues as soon as possible (direct links to the respective data items are provided above for your convenience).\n\
You may want to consult the <a href="http://www.nitrc.org/docman/view.php/672/1206/N-CANDA%%20MRI%%20and%%20Image%%20Management%%20Manual">NCANDA MRI and Image Management Manual</a> for instructions.<br></br>\n\
If you have further questions, feel free to contact the <a href="mailto:%s">NCANDA support</a>\n\
</p>\n\
</body>\n\
</html>' % (user_firstname, user_lastname, self._site_url, self._site_name, '\n'.join( problem_list ), self._xnat_admin_email)
    
        self.send( "NCANDA XNAT: problems with your uploaded data", self._xnat_admin_email, [ user_email ], html )
コード例 #15
0
ファイル: session.py プロジェクト: sibis-platform/sibis
    def svn_client(self):
        svn_laptop = self.api['svn_laptop']
        if not svn_laptop:
            slog.info('session.svn_client',"ERROR: svn api is not defined")
            return None

        client = svn_laptop['client']
        return client
コード例 #16
0
ファイル: session.py プロジェクト: sibis-platform/sibis
 def xnat_get_classes(self):
     xnat_api = self.__get_xnat_api__()
     if not xnat_api:
         error_msg = "XNAT API is not defined! Cannot retrieve classes!",
         slog.info(eid,error_msg,
                   function = "session.xnat_get_classes")
         return None
     return xnat_api.client.classes
コード例 #17
0
    def translate_subject_and_event( self, subject_code, event_label):
        if event_label in list(self.__event_dict.keys()):
            (arm_code,visit_code) = self.__event_dict[event_label]
        else:
            slog.info(str(subject_code),"ERROR: Cannot determine study Arm and Visit from event %s" % event_label )

        pipeline_workdir_rel = os.path.join( subject_code, arm_code, visit_code )
        return (arm_code,visit_code,pipeline_workdir_rel)
コード例 #18
0
    def create_datadict(self, export_name, datadict_dir):
         if export_name not in self.__export_forms.keys() : 
             slog.info('redcap_to_casesdir.create_datadict',"ERROR: could not create data dictionary for form " + export_name)
             return None 

         export_form_entry_list = self.__export_forms[export_name]
         size_entry_list = len(export_form_entry_list)
         export_form_list = [export_name] * size_entry_list
         return self.__create_datadicts_general__(datadict_dir, export_name, export_form_list,export_form_entry_list)
コード例 #19
0
    def __load_ground_truth_gradients(self,gt_dti_path):
        dti_stack = glob.glob(gt_dti_path)
        if not len(dti_stack): 
            slog.info("__load_ground_truth_gradients","Error: Cannot find " + gt_dti_path)
            return []

        dti_stack.sort()
        # Parse the xml files to get scanner specific gradients per frame
        (gradients, errorFlag) = self.__get_all_gradients(gt_dti_path, "", "",dti_stack) 
        return np.array(gradients)
def delete_workdir(workdir,redcap_visit_id,verbose=False): 
    if os.path.exists(workdir):
        if verbose :
            print "Deleting " + workdir

        try :
            shutil.rmtree(workdir)            
        except Exception as err_msg:  
            slog.info(redcap_visit_id,"Error: Could not delete directory " + workdir, 
                      err_msg = str(err_msg))
コード例 #21
0
def delete_workdir(workdir, redcap_visit_id, verbose=False):
    if os.path.exists(workdir):
        if verbose:
            print "Deleting " + workdir
        try:
            shutil.rmtree(workdir)
        except Exception as err_msg:
            slog.info(redcap_visit_id,
                      "Error: Could not delete directory " + workdir,
                      err_msg=str(err_msg))
コード例 #22
0
ファイル: session.py プロジェクト: sibis-platform/sibis
    def __get_active_redcap_api__(self):
        project = self.__active_redcap_project__
        if not project :
            slog.info('__get_active_redcap_api__','Error: an active redcap project is currently not defined ! Most likely redcap api was not initialized correctly')  
            return None

        if not self.api[project]: 
            slog.info('__get_active_redcap_api__','Error: ' + str(project) + ' api not defined')  
            return None
            
        return self.api[project]
コード例 #23
0
    def __get_scanner_mfg_and_model__(self, mri_scanner, expid):
        if mri_scanner == 'nan' :
            return ["",""]

        mri_scanner= mri_scanner.upper()
        for TYPE in list(self.__scanner_dict.keys()) :
            if TYPE in mri_scanner :
                return self.__scanner_dict[TYPE]

        slog.info(expid, "Error: Do not know scanner type", script='redcap_to_casesdir.py', mri_scanner = mri_scanner)
        return ["",""]
コード例 #24
0
def run_phantom_qa( interface, project, subject, session, label, dicom_path ):
    # Make a temporary directory
    temp_dir = tempfile.mkdtemp()

    # Switch to temp directory
    original_wd = os.getcwd()
    os.chdir( temp_dir )

    # Create NIFTI file from the DICOM files
    nii_file = 't1.nii.gz'
    subprocess.call( 'cmtk dcm2image --tolerance 1e-3 -rO %s %s >& /dev/null' % ( nii_file, dicom_path ), shell=True )
    if not os.path.exists( nii_file ):
        error = "ERROR: NIFTI file was not created from DICOM files experiment"
        slog.info('{}/{}'.format(project,session),error,
                         session = session,
                         project = project,
                         nii_file = nii_file,
                         dicom_path = dicom_path)
        return

    # Upload NIFTI file
    try:
        file = interface.select.project( project ).subject( subject ).experiment( session ).resource('QA').file( nii_file )
        file.insert( nii_file, format='nifti_gz', tags='qa,adni,nifti_gz', content='ADNI Phantom QA File', overwrite=True )
    except:
        print "Something bad happened uploading file %s to Experiment %s/%s/%s" % (nii_file,project,session,label)

    # Run the PERL QA script and capture its output
    xml_file = 'phantom.xml'
    lbl_file = 'phantom.nii.gz'
    subprocess.call( 'cmtk detect_adni_phantom --tolerant --refine-xform --erode-snr 15 --write-labels %s %s %s' % ( lbl_file, nii_file, xml_file ), shell=True )
    if not os.path.exists( xml_file ) or not os.path.exists( lbl_file ):
        error = "ERROR: mandatory output file (either xml or label image) was not created from file %s, experiment %s/%s/%s" % ( nii_file,project,session,label )
        slog.info('{}/{}/{}'.format(project,session,label),error,
                       nii_file=nii_file,
                       project = project,
                       session = session,
                       label= label)
        return

    # Upload phantom files to XNAT
    for (fname,fmt) in [ (xml_file, 'xml'), (lbl_file, 'nifti_gz') ]:
        try:
            file = interface.select.project( project ).subject( subject ).experiment( session ).resource('QA').file( fname )
            file.insert( fname, format=fmt, tags='qa,adni,%s' % fmt, content='ADNI Phantom QA File', overwrite=True )
        except:
            print "Something bad happened uploading file %s to Experiment %s/%s" % (fname,project,session)

    # Read and evaluate phantom XML file
    check_xml_file( xml_file, project, session, label )

    # Clean up - remove temp directory
    os.chdir( original_wd )
    shutil.rmtree( temp_dir )
コード例 #25
0
ファイル: session.py プロジェクト: sibis-platform/sibis
    def get_penncnp_export_report(self,wait) :
        from selenium.webdriver.support import expected_conditions as EC
        from selenium.webdriver.common.by import By
        try: 
            report = wait.until(EC.element_to_be_clickable((By.NAME,'Export Report')))
        except Exception as e:
                slog.info('session.get_penncnp_export', "ERROR: Timeout, could not find Export Report",
                  info = "Try increasing wait time at WebDriverWait", 
                  msg=str(e))
                return None

        return report
コード例 #26
0
ファイル: session.py プロジェクト: sibis-platform/sibis
    def __connect_svn_laptop__(self):
        # Check that config file is correctly defined 
        if "svn_laptop" not in list(self.__config_usr_data.keys()):
            slog.info("session.__connnect_svn_laptop__","ERROR: svn laptop user info not defined!")
            return None
        usr_data = self.__config_usr_data.get_category('svn_laptop')

        svnDir = self.get_laptop_svn_dir()
        client = SibisSvnClient(svnDir, username=usr_data['user'], password=usr_data['password'])
        self.api['svn_laptop'] = {"client": client, "user" : usr_data['user'], "password": usr_data['password']}         

        return client
コード例 #27
0
    def __create_datadicts_general__(self,datadict_dir, datadict_base_file,export_forms_list, variable_list):
        redcap_datadict_columns = ["Variable / Field Name", "Form Name",
                                   "Section Header", "Field Type", "Field Label",
                                   "Choices, Calculations, OR Slider Labels",
                                   "Field Note",
                                   "Text Validation Type OR Show Slider Number",
                                   "Text Validation Min", "Text Validation Max",
                                   "Identifier?",
                                   "Branching Logic (Show field only if...)",
                                   "Required Field?", "Custom Alignment",
                                   "Question Number (surveys only)",
                                   "Matrix Group Name", "Matrix Ranking?"]

        # Insert standard set of data elements into each datadict.
        for i in range(3):
            elements = ['subject', 'arm', 'visit']
            export_forms_list.insert(i, export_forms_list[0])
            variable_list.insert(i, elements[i])

        if not os.path.exists(datadict_dir):
            os.makedirs(datadict_dir)

        ddict = pandas.DataFrame(index=variable_list,columns=redcap_datadict_columns)

        for name_of_form, var in zip(export_forms_list, variable_list):
            field_name = re.sub('___.*', '', var)
            ddict["Variable / Field Name"][var] = field_name
            ddict["Form Name"][var] = name_of_form

            # Check if var is in data dict ('FORM_complete' fields are NOT)
            if field_name in list(self.__metadata_dict.keys()):
                ddict["Field Type"][var] = self.__metadata_dict[field_name][0]
                # need to transfer to utf-8 code otherwise can create problems when
                # writing dictionary to file it just is a text field so it should
                #  not matter
                ddict["Field Label"][var] = self.__metadata_dict[field_name][2].encode('utf-8')
                ddict["Text Validation Type OR Show Slider Number"][var] = self.__metadata_dict[field_name][1]
                ddict["Text Validation Min"][var] = self.__metadata_dict[field_name][3]
                ddict["Text Validation Max"][var] = self.__metadata_dict[field_name][4]
                # need to transfer to utf-8 code otherwise can create problems when
                # writing dictionary to file it just is a choice field so it
                # should not matter
                ddict["Choices, Calculations, OR Slider Labels"][var] = self.__metadata_dict[field_name][5].encode('utf-8')

        # Finally, write the data dictionary to a CSV file
        dicFileName = os.path.join(datadict_dir,datadict_base_file + '_datadict.csv')
        try:
            ddict.to_csv(dicFileName, index=False)
            return dicFileName
        except Exception as err_msg:
            slog.info('redcap_to_casesdir.__create_datadicts_general__',"ERROR: could not export dictionary" + dicFileName,
                      err_msg = str(err_msg))
            return None
コード例 #28
0
    def __transform_dict_string_into_tuple__(self,dict_name):
        dict_str = self.__sibis_defs[dict_name]
        dict_keys = list(dict_str.keys())
        if not len(dict_keys):
            slog.info('redcap_to_casesdir.configure',"ERROR: Cannot find '" + dict_name + "'' in config file!")
            return None

        dict_tup = dict()
        for key in dict_keys:
            # turn string into tuple
            dict_tup[key] = make_tuple("(" + dict_str[key] +")")

        return dict_tup
コード例 #29
0
def import_stroop_to_redcap( xnat, stroop_eid, stroop_resource, stroop_file, \
                             redcap_key, verbose=False, no_upload=False, post_to_github=False, time_log_dir=None):
    if verbose:
        print "Importing Stroop data from file %s:%s" % ( stroop_eid, stroop_file )

    # Download Stroop file from XNAT into temporary directory
    experiment = xnat.select.experiment( stroop_eid )
    tempdir = tempfile.mkdtemp()
    stroop_file_path = experiment.resource( stroop_resource ).file( stroop_file ).get_copy( os.path.join( tempdir, stroop_file ) )

    # Convert downloaded Stroop file to CSV scores file
    added_files = []
    try:
        added_files = subprocess.check_output( [ os.path.join( import_bindir, "stroop2csv" ), '--mr-session', '--record', redcap_key[0], '--event', redcap_key[1], stroop_file_path, tempdir ] )
    except:
        pass

    if len( added_files ):
        if not no_upload:
            # Upload CSV file(s) (should only be one anyway)
            for file in added_files.split( '\n' ):
                if re.match( '.*\.csv$', file ):
                    if verbose:
                        print "Uploading ePrime Stroop scores",file
                    command_array = [ os.path.join( bindir, 'csv2redcap' ) ]
                    if post_to_github:
                        command_array += ["-p"]
                    if time_log_dir:
                        command_array += ["-t", time_log_dir]

                    command_array += [ file ]
                    subprocess.call( command_array )
            # Upload original ePrime file for future reference
            cmd_array = [ os.path.join( import_bindir, "eprime2redcap" ) ]
            if post_to_github: 
                cmd_array += ["-p"]

            cmd_array += ['--project', 'data_entry', '--record' , redcap_key[0], '--event', redcap_key[1], stroop_file_path, 'mri_stroop_log_file' ] 
                
            if verbose:
                print "Uploading ePrime Stroop file",stroop_file_path
                # print " ".join(cmd_array)

            subprocess.check_output(cmd_array)
    else:
        error = "ERROR: could not convert Stroop file %s:%s" % ( redcap_key[0], stroop_file )
        slog.info(redcap_key[0], error,
                      stroop_file = stroop_file)

    shutil.rmtree( tempdir )
コード例 #30
0
def verify_image_count(session, session_label, scan, scantype, manufacturer,
                       images_created):
    if manufacturer in expected_images.keys():
        if scantype in expected_images[manufacturer].keys():
            imgrange = expected_images[manufacturer][scantype]
            if not images_created in imgrange:
                error = 'WARNING: Number of frames in archive differ from the standard'
                slog.info(session_label, error,
                              session=session,
                              scan_number=scan,
                              scan_type=scantype,
                              actual_frame_number=images_created,
                              manufacturer=manufacturer,
                              expected_frame_number=str(expected_images[manufacturer][scantype]))
def export_and_queue( redcap_visit_id, xnat, session_data, redcap_key, pipeline_root_dir, xnat_dir,stroop=(None,None,None), run_pipeline_script=None, verbose=False, timerFlag = False ):
    (subject_label, event_label) = redcap_key
    # Put together pipeline work directory for this subject and visit
    subject_code = session_data['mri_xnat_sid']
    (arm_code,visit_code,pipeline_workdir_rel) = (None, None, None)
    try:
        (arm_code,visit_code,pipeline_workdir_rel) = translate_subject_and_event( subject_code, event_label )
    except:
        print "Event",event_label,"is not supported yet."

    if arm_code != None:
        pipeline_workdir = os.path.join( pipeline_root_dir, pipeline_workdir_rel )

        if verbose:
            print subject_label,'/',subject_code,'/',event_label,'to',pipeline_workdir

        new_files_created = export_to_workdir(redcap_visit_id,xnat, session_data, pipeline_workdir, redcap_key, xnat_dir, stroop=stroop, verbose=verbose, timerFlag= timerFlag)

        if new_files_created and run_pipeline_script:
            if verbose:
                print 'Submitting script',run_pipeline_script,'to process',pipeline_workdir
            just_pipeline_script=os.path.basename(run_pipeline_script)

            sge_env = os.environ.copy()
            sge_env['SGE_ROOT'] = '/opt/sge' 
            qsub_args= [ '/opt/sge/bin/lx-amd64/qsub','-S','/bin/bash','-o','/dev/null','-j','y','-pe','smp','4','-l','h_vmem=32G','-N', '%s-%s-%s-Nightly' % (subject_code,visit_code,just_pipeline_script) ]
            qsub_exe = 'cd %s; %s %s' % ( pipeline_root_dir,run_pipeline_script,pipeline_workdir_rel)
            cmd_str='echo "%s" | %s\n' % (qsub_exe," ".join(qsub_args)) 
            qsub_command = subprocess.Popen( qsub_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=sge_env)
            (stdoutdata, stderrdata) = qsub_command.communicate(qsub_exe)

            if verbose and (stdoutdata != None):
                print stdoutdata
            
            # keep a log to make sure it is working 
            if verbose:
               print cmd_str

            with open("/tmp/ncanda_test_nightly.txt", "a") as myfile:
               myfile.write(cmd_str)
               myfile.write(stdoutdata) 
            
        # It is very important to clear the PyXNAT cache, lest we run out of disk space and shut down all databases in the process
        try:
            xnat.cache.clear()
        except:
            slog.info("export_mr_sessions_pipeline","WARNING: clearing PyXNAT cache threw an exception - are you running multiple copies of this script?")

        return new_files_created
コード例 #32
0
def compute_scores(instrument,input_data,demographics):
    try:
        scoresDF = functions[instrument](input_data, demographics)   
    except Exception as e:
        error = "ERROR: scoring failed for instrument", instrument
        slog.info(instrument + "-" + hashlib.sha1(str(e)).hexdigest()[0:6], error, exception=str(e))
        return pandas.DataFrame()

    # remove nan entries as they corrupt data ingest (REDCAP cannot handle it correctly) and superfluous zeros
    # this gave an error as it only works for float values to replace
    if len(scoresDF) :
        # Only execute it not empty 
        return scoresDF.astype(object).fillna('')   
            
    return scoresDF
コード例 #33
0
ファイル: sibis_email.py プロジェクト: sibis-platform/sibis
    def send( self, subject, from_email, to_email, html, sendToAdminFlag=True ):
        if not self._smtp_server : 
            slog.info("sibis_email.send","ERROR: smtp server not defined - email will not be sent!")
            return False

        if not to_email :
            slog.info("sibis_email.send","ERROR: no email address for recipient defined - email will not be sent!")
            return False

        # Create message container - the correct MIME type is multipart/alternative.
        msg = MIMEMultipart('alternative')
        msg['Subject'] = subject
        msg['From'] = from_email
        msg['To'] = ', '.join( to_email )

        # Record the MIME types of both parts - text/plain and text/html.
        text = ''
        part1 = MIMEText(text, 'plain')
        part2 = MIMEText(html, 'html')
    
        # Attach parts into message container.
        # According to RFC 2046, the last part of a multipart message, in this case
        # the HTML message, is best and preferred.
        msg.attach(part1)
        msg.attach(part2)
    
        # Send the message via local SMTP server.
        try : 
            s = smtplib.SMTP( self._smtp_server )
        except Exception as err_msg:
            slog.info("sibis_email.send","ERROR: failed to connect to SMTP server at {} ".format(time.asctime()),
                    err_msg = str(err_msg),
                    smtp_server = self._smtp_server) 
            return False
 
        try : 
            # sendmail function takes 3 arguments: sender's address, recipient's address
            # and message to send - here it is sent as one string.
            s.sendmail( from_email, to_email, msg.as_string() )
        
            # Send email also to sibis admin if defined
            if sendToAdminFlag and self._sibis_admin_email and to_email != self._sibis_admin_email : 
                s.sendmail( from_email, self._sibis_admin_email, msg.as_string() )

        except Exception as err_msg:
            slog.info("sibis_email.send","ERROR: failed to send email at {} ".format(time.asctime()),
                      err_msg = str(err_msg),
                      email_from = from_email, 
                      email_to = to_email,
                      sibis_admin_email = self._sibis_admin_email, 
                      email_msg = msg.as_string(),
                      smtp_server = self._smtp_server)
            s.quit()
            return False


        s.quit()
        return True
コード例 #34
0
def run_subject_qa(interface, project, subject, session, scan_number,
                   dicom_path):
    # Make a temporary directory
    temp_dir = tempfile.mkdtemp()

    # Make XML file as wrapper for the DICOM files
    bxh_file = '%s/dicoms.bxh' % temp_dir
    if (not sutil.dicom2bxh(dicom_path,
                            bxh_file)) or (not os.path.exists(bxh_file)):
        error = "ERROR: BXH file was not created from DICOM files"
        slog.info(session, error, bxh_file=bxh_file, dicom_path=dicom_path)
    return

    # Run the PERL QA script and capture its output
    html_dir = '%s/html' % temp_dir
    script_output = os.popen('fmriqa_generate.pl %s %s 2> /dev/null' %
                             (bxh_file, html_dir)).readlines()
    if not os.path.exists(html_dir):
        error = "ERROR: html directory was not created from BXH file"
        slog.info(html_dir, error, bxh_file=bxh_file)
        return

    # Convert QA results from html to pdf
    summary_file_path = '%s/QA-Summary.pdf' % temp_dir
    if sutils.htmldoc(
            '--webpage --browserwidth 1024 --no-title --no-toc --compression=9 --outfile %s %s/index.html >& /dev/null'
            %
        (summary_file_path, html_dir)) and os.path.exists(summary_file_path):
        # Upload QA files to XNAT as file resources
        try:
            qa_file = interface.select.project(project).subject(
                subject).experiment(session).resource('QA').file(
                    'QA-%s-Summary.pdf' % scan_number)
            qa_file.insert(summary_file_path,
                           format='pdf',
                           tags='qa,fbirn,pdf',
                           content='QA Analysis Summary',
                           overwrite=True)
        except:
            print "Something bad happened uploading QA summary file to Experiment %s" % session
    else:
        print "Unable to create PDF QA summary file %s" % summary_file_path

    # Clean up - remove temp directory
    shutil.rmtree(temp_dir)
コード例 #35
0
def process_phantom_session(interface,
                            project,
                            subject,
                            session,
                            label,
                            xnat_dir,
                            force_updates=False):
    # Get the experiment object
    experiment = interface.select.experiment(session)
    # First, see if the QA files are already there
    files = experiment.resources().files().get()
    if force_updates or not (('t1.nii.gz' in files) and
                             ('phantom.xml' in files) and
                             ('phantom.nii.gz' in files)):
        dicom_path = ''

        # Get list of all scans in the session
        scans = experiment.scans().get()
        for scan in scans:
            # Check only 'usable' scans with the proper name
            [scan_type,
             quality] = experiment.scan(scan).attrs.mget(['type', 'quality'])
            if ('mprage' in scan_type) or ('t1spgr' in scan_type):
                # Extract the DICOM file directory from the XML representation
                match = re.match(
                    '.*(' + xnat_dir + '/.*)scan_.*_catalog.xml.*',
                    experiment.scan(scan).get(), re.DOTALL)
                if match:
                    dicom_path = match.group(1)

        if dicom_path:
            # If we found a matching scan, run the QA
            run_phantom_qa(interface, project, subject, session, label,
                           dicom_path)
        else:
            # If there was no matching scan in the session, print a warning
            warning = "WARNING: ADNI phantom session: {}, experiment: {}, subject: {} does not have \
                       a usable T1-weighted scan".format(
                session, experiment, subject)
            slog.info(hashlib.sha1('t1_qa_functions').hexdigest()[0:6],
                      warning,
                      script='t1_qa_functions')
コード例 #36
0
def export_to_nifti(experiment,
                    subject,
                    session,
                    session_label,
                    scan,
                    scantype,
                    xnat_dir,
                    verbose=False):
    if verbose:
        print "Starting export of nifti files for ", subject, session, session_label, scan, scantype, xnat_dir

    error_msg = []

    # logfile_resource = '%s_%s/dcm2image.log' % (scan, scantype)
    # xnat_log = interface.select.project(project).subject(subject).experiment(session).resource('nifti').file(logfile_resource)
    # To test gradient directions without having to delete nifti files in xnat just uncomment this line
    # and comment out the proceeding one
    # if not xnat_log.exists() or 'dti60b1000' in scantype:
    # if not xnat_log.exists():
    match = re.match('.*(' + xnat_dir + '/.*)scan_.*_catalog.xml.*',
                     experiment.scan(scan).get(), re.DOTALL)
    if not match:
        error_msg.append(
            "XNAT scan info fails to contain catalog.xml location! SID:"
            "%s EID:%s Label: %s SCAN: %s" %
            (dicom_path, subject, session, session_label, scan))
        return error_msg, 0

    dicom_path = match.group(1)
    if not os.path.exists(dicom_path):
        #try another description
        dicom_path = re.sub('storage/XNAT', 'ncanda-xnat', dicom_path)

        if not os.path.exists(dicom_path):
            error_msg.append(
                "Path %s does not exist - export_to_nifti failed for SID:%s EID:%s Label: %s!"
                % (dicom_path, subject, session, session_label))
            return error_msg, 0

    nifti_log_search = glob.glob(
        re.sub('/DICOM/', '_%s/dcm2image.log' % (scantype),
               re.sub('/SCANS/', '/RESOURCES/nifti/', dicom_path)))

    # if nifti files were created make sure that they are newer than dicom file otherwise recreate them
    if nifti_log_search != []:
        # we changed code here from *.* to avoid getting errors of not finding dicom files for sessions such as
        # NCANDA_E01386 / B-00454-M-9-20140214 - scan 1 /ncanda-localizer-v1
        dicom_file_pattern = dicom_path + '*'
        dicom_file_list = glob.glob(dicom_file_pattern)
        # ommit xml file - so that only dicom  files are left - xml file is updated every time somebody changes something in the gui for that session - which has no meaning for xml file
        dicom_file_list = [x for x in dicom_file_list if '.xml' not in x]

        # if dicom file is not there something odd is going on
        if dicom_file_list == []:
            slog.info(session_label,
                      "Error: could not find dicom files ",
                      session=session,
                      subject=subject,
                      scan_number=scan,
                      scan_type=scantype,
                      dicom_log_file=dicom_file_pattern)
            return error_msg, 0

        # check time stamp - if newer than there is nothing to do
        nifti_time = time.strftime(
            '%Y-%m-%d %H:%m:%S',
            time.gmtime(os.path.getmtime(nifti_log_search[0])))
        dicom_time = time.strftime(
            '%Y-%m-%d %H:%m:%S',
            time.gmtime(os.path.getmtime(dicom_file_list[0])))

        if nifti_time > dicom_time:
            if verbose:
                print("... nothing to do as nifti files are up to date")
            return error_msg, 0

        slog.info(
            session_label + "_" + scan,
            "Warning: nifti seem outdated (dicom > nifti time) so they are recreated!",
            session=session,
            subject=subject,
            check_nifti=str(nifti_time) + " " + str(nifti_log_search[0]),
            check_dicom=str(dicom_time) + " " + str(dicom_file_list[0]),
            info=
            "If the issue reappears then simply open up the session in  XNAT, go to 'Manage Files', delete the directory 'Resources/nifti/"
            + scantype +
            "'. If the pop-up window does not say that it is deleting 'dicom.log' then most likely you will have to manually delete the directory from the hard drive. To find out, simply run the script again. If the error message still reappears then repeat the previous procedure and afterwards delete the directory that the log file in check_nifti is located!"
        )

    #
    # Turn dicoms into niftis
    #
    temp_dir = tempfile.mkdtemp()

    args = '--tolerance 1e-3 --write-single-slices --no-progress -rvxO %s/%s_%s/image%%n.nii %s 2>&1' % (
        temp_dir, scan, scantype, dicom_path)
    (ecode, sout, eout) = sutils.dcm2image(args)
    if ecode:
        error_msg.append(
            "The following command failed: %s" %
            (sutils.dcm2image_cmd + args + " ! msg : " + str(eout)))
        # Clean up - remove temp directory
        shutil.rmtree(temp_dir)
        return error_msg, 0

    # Needed as we check for dcm2image.log when rerunning the case - should come up with better mechanism
    log_filename = '%s/%s_%s/dcm2image.log' % (temp_dir, scan, scantype)
    output_file = open(log_filename, 'w')
    try:
        output_file.writelines(sout)
    finally:
        output_file.close()

    # Zipping directory with nifti files
    zip_path = '%s/%s_%s.zip' % (temp_dir, scan, scantype)
    try:
        fzip = zipfile.ZipFile(zip_path, 'w')
        for src in sorted(glob.glob('%s/*/*' % temp_dir)):
            fzip.write(src, re.sub('%s/' % temp_dir, '', src))
        fzip.close()
    except Exception as e:
        error_msg.append("Could not zip %s - err_msg: %s" % (zip_path, str(e)))
        # Clean up - remove temp directory
        shutil.rmtree(temp_dir)
        return error_msg, 0

    if not os.path.exists(zip_path):
        error_msg.append("Could not zip %s - does not exists !" % (zip_path))
        # Clean up - remove temp directory
        shutil.rmtree(temp_dir)
        return error_msg, 0

    try:
        experiment.resource('nifti').put_zip(zip_path,
                                             overwrite=True,
                                             extract=True)
    except:
        error_msg.append("Unable to upload ZIP file %s to experiment %s" %
                         (zip_path, session))
        # Clean up - remove temp directory
        shutil.rmtree(temp_dir)
        return error_msg, 0

    # Verify image counts for various series
    # images_created = len(glob.glob('%s/*/*.nii.gz' % temp_dir))
    shutil.rmtree(temp_dir)
    return error_msg, 1
コード例 #37
0
def import_stroop_to_redcap( xnat, stroop_eid, stroop_resource, stroop_file, \
                             redcap_key, verbose=False, no_upload=False, post_to_github=False, time_log_dir=None):
    if verbose:
        print "Importing Stroop data from file %s:%s" % (stroop_eid,
                                                         stroop_file)

    # Download Stroop file from XNAT into temporary directory
    experiment = xnat.select.experiment(stroop_eid)
    tempdir = tempfile.mkdtemp()
    stroop_file_path = experiment.resource(stroop_resource).file(
        stroop_file).get_copy(os.path.join(tempdir, stroop_file))

    # Convert downloaded Stroop file to CSV scores file
    cmd = str(os.path.join(
        import_bindir, "stroop2csv")) + ' --mr-session --record ' + redcap_key[
            0] + ' --event ' + redcap_key[1] + " " + str(
                stroop_file_path) + ' ' + str(tempdir)
    (ecode, sout, serr) = sutils.call_shell_program(cmd)
    if ecode:
        slog.info(str(redcap_key[0]) + "-" + str(redcap_key[1]),
                  "Error: import_stroop_to_redcap: failed to run stroop2csv!",
                  cmd=str(cmd),
                  stderr=str(serr),
                  stdout=str(sout))

    added_files = sout

    if len(added_files):
        if not no_upload:
            # Upload CSV file(s) (should only be one anyway)
            for file in added_files.split('\n'):
                if re.match('.*\.csv$', file):
                    if verbose:
                        print "Uploading ePrime Stroop scores", file
                    cmd = str(os.path.join(bindir, 'csv2redcap'))
                    if post_to_github:
                        cmd += " -p"
                    if time_log_dir:
                        cmd += " -t " + str(time_log_dir)

                    cmd += " " + str(file)
                    (ecode, sout, serr) = sutils.call_shell_program(cmd)
                    if ecode:
                        slog.info(
                            str(redcap_key[0]) + "-" + str(redcap_key[1]),
                            "Error: import_stroop_to_redcap: failed to run csv2redcap!",
                            cmd=str(cmd),
                            stderr=str(serr),
                            stdout=str(sout))

            # Upload original ePrime file for future reference
            cmd = str(os.path.join(import_bindir, "eprime2redcap"))
            if post_to_github:
                cmd += " -p"

            cmd += ' --project data_entry --record ' + str(
                redcap_key[0]) + ' --event ' + str(redcap_key[1]) + ' ' + str(
                    stroop_file_path) + ' mri_stroop_log_file'

            if verbose:
                print "Uploading ePrime Stroop file", stroop_file_path
                # print " ".join(cmd_array)

            (ecode, sout, serr) = sutils.call_shell_program(cmd)
            if ecode:
                slog.info(
                    str(redcap_key[0]) + "-" + str(redcap_key[1]),
                    "Error: import_stroop_to_redcap: failed to run eprime2redcap!",
                    cmd=str(cmd),
                    stderr=str(serr),
                    stdout=str(sout))

    else:
        error = "ERROR: could not convert Stroop file %s:%s" % (redcap_key[0],
                                                                stroop_file)
        slog.info(str(redcap_key[0]) + '-' + str(redcap_key[1]),
                  error,
                  stroop_file=stroop_file)

    shutil.rmtree(tempdir)
コード例 #38
0
def run_phantom_qa(interface, project, subject, session, label, dicom_path):
    # Make a temporary directory
    temp_dir = tempfile.mkdtemp()

    # Switch to temp directory
    original_wd = os.getcwd()
    os.chdir(temp_dir)

    # Make XML file as wrapper for the DICOM files
    bxh_file = '%s.bxh' % session[7:]
    if (not sutil.dicom2bxh(dicom_path,
                            bxh_file)) or (not os.path.exists(bxh_file)):
        error = "ERROR: BXH file was not created from DICOM files"
        slog.info(session, error, bxh_file=bxh_file, dicom_path=dicom_path)
        return

    # Run the PERL QA script and capture its output
    html_dir = './html'
    script_output = os.popen('fmriqa_phantomqa.pl %s %s 2> /dev/null' %
                             (bxh_file, html_dir)).readlines()
    if not os.path.exists('%s/index.html' % html_dir):
        error = "ERROR: html file %s/index.html was not created from BXH file %s (session %s/%s)" % (
            html_dir, bxh_file, session, label)
        qa_script_output = '\n'.join(script_output)
        slog.info('session {}/{}'.format(session, label),
                  error,
                  html_dir=html_dir,
                  bxh_file=bxh_file,
                  qa_script_output=qa_script_output)
        return

    # Copy the entire output to a text file for upload to XNAT
    details_file_path = '%s/QA-Details.txt' % temp_dir
    script_output_file = open(details_file_path, 'w')
    script_output_file.writelines(script_output)
    script_output_file.close()

    # Upload detail file to XNAT
    try:
        qa_file = interface.select.project(project).subject(
            subject).experiment(session).resource('QA').file('QA-Details.txt')
        qa_file.insert(details_file_path,
                       format='text',
                       tags='qa,fbirn,txt',
                       content='QA Analysis Details',
                       overwrite=True)
    except:
        print "Something bad happened uploading QA details file to Experiment %s/%s/%s" % (
            project, session, label)

    # Step through QA output, line by line, and check for measures for which we have thresholds defined.
    for line in script_output:
        # Parse current line into key=value pairs
        match = re.match('^#([A-Za-z]+)=(.*)$', line)

        # Is this key in the list of thresholds?
        if match and (match.group(1) in QA_thresholds.keys()):
            value = float(match.group(2))
            metric = QA_thresholds[match.group(1)]
            if metric.exceeds(value):
                error = 'QA metric fails to meet threshhold.'
                slog.info(session,
                          error,
                          project_id=project,
                          experiment_site_id=label,
                          metric_name=metric._name,
                          metric_key=match.group(1),
                          metric_value=value,
                          metric_threshold=metric._thresh)

    # Convert QA results from html to pdf
    summary_file_path = '%s/QA-Summary.pdf' % temp_dir
    if sutils.htmldoc(
            '--quiet --webpage --no-title --no-toc --compression=9 --outfile %s %s/index.html'
            %
        (summary_file_path, html_dir)) and os.path.exists(summary_file_path):
        # Upload QA files to XNAT as file resources
        try:
            qa_file = interface.select.project(project).subject(
                subject).experiment(session).resource('QA').file(
                    'QA-Summary.pdf')
            qa_file.insert(summary_file_path,
                           format='pdf',
                           tags='qa,fbirn,pdf',
                           content='QA Analysis Summary',
                           overwrite=True)
        except:
            print "Something bad happened uploading QA summary file to Experiment %s/%s" % (
                session, label)
    else:
        print "Unable to create PDF QA summary file %s from DICOMs in %s (session %s/%s)" % (
            summary_file_path, dicom_path, session, label)

    # Clean up - remove temp directory
    os.chdir(original_wd)
    shutil.rmtree(temp_dir)
コード例 #39
0
def gzip_physio(physio_file_path):
    (ecode, sout, eout) = sutils.gzip('-9f ' + str(physio_file_path))
    if ecode:
        error = "ERROR: unable to compress physio file"
        slog.info(physio_file_path, error, err_msg=str(eout))
コード例 #40
0
def run_phantom_qa(interface, project, subject, session, label, dicom_path):
    # Make a temporary directory
    temp_dir = tempfile.mkdtemp()

    # Switch to temp directory
    original_wd = os.getcwd()
    os.chdir(temp_dir)

    # Create NIFTI file from the DICOM files
    nii_file = 't1.nii.gz'
    (ecode, sout, eout) = sutils.dcm2image(
        '--tolerance 1e-3 -rO %s %s >& /dev/null' % (nii_file, dicom_path))
    if ecode or (not os.path.exists(nii_file)):
        error = "ERROR: NIFTI file was not created from DICOM files experiment"
        slog.info('{}/{}'.format(project, session),
                  error,
                  session=session,
                  project=project,
                  nii_file=nii_file,
                  err_msg=str(eout),
                  dicom_path=dicom_path)
        return

    # Upload NIFTI file
    try:
        file = interface.select.project(project).subject(subject).experiment(
            session).resource('QA').file(nii_file)
        file.insert(nii_file,
                    format='nifti_gz',
                    tags='qa,adni,nifti_gz',
                    content='ADNI Phantom QA File',
                    overwrite=True)
    except:
        print "Something bad happened uploading file %s to Experiment %s/%s/%s" % (
            nii_file, project, session, label)

    # Run the PERL QA script and capture its output
    xml_file = 'phantom.xml'
    lbl_file = 'phantom.nii.gz'
    if (sutils.detect_adni_phantom(
            '--tolerant --refine-xform --erode-snr 15 --write-labels %s %s %s'
            %
        (lbl_file, nii_file, xml_file))) or (not os.path.exists(xml_file)) or (
            not os.path.exists(lbl_file)):
        error = "ERROR: mandatory output file (either xml or label image) was not created from file %s, experiment %s/%s/%s" % (
            nii_file, project, session, label)
        slog.info('{}/{}/{}'.format(project, session, label),
                  error,
                  nii_file=nii_file,
                  project=project,
                  session=session,
                  label=label)
        return

    # Upload phantom files to XNAT
    for (fname, fmt) in [(xml_file, 'xml'), (lbl_file, 'nifti_gz')]:
        try:
            file = interface.select.project(project).subject(
                subject).experiment(session).resource('QA').file(fname)
            file.insert(fname,
                        format=fmt,
                        tags='qa,adni,%s' % fmt,
                        content='ADNI Phantom QA File',
                        overwrite=True)
        except:
            print "Something bad happened uploading file %s to Experiment %s/%s" % (
                fname, project, session)

    # Read and evaluate phantom XML file
    check_xml_file(xml_file, project, session, label)

    # Clean up - remove temp directory
    os.chdir(original_wd)
    shutil.rmtree(temp_dir)
コード例 #41
0
def check_xml_file(xml_file, project, session, label):
    xml = open(xml_file, 'r')

    warnings = []
    try:
        for line in xml:
            # Check fallbacks triggered
            if 'fallbackOrientationCNR' in line:
                warnings.append(
                    "CNR spheres used for orientation - problem detecting 15mm spheres?"
                )
            if 'fallbackCentroidCNR' in line:
                match = re.match('^.*distance="([0-9]+\.[0-9]+)".*$', line)
                distance = float(match.group(1))
                if distance > 3.0:
                    warnings.append(
                        "CNR spheres used for centroid location (distance to SNR center = %f mm) - problem with the SNR sphere?"
                        % distance)

            # Check number of landmarks
            match = re.match('<landmarkList.*count="([0-9]+)">', line)
            if match:
                count = int(match.group(1))
                if (count < 165):
                    warnings.append("Landmark count=%d" %
                                    (project, session, count))

            # Check SNR
            match = re.match('<snr>([0-9]*\.[0-9]*)</snr>', line)
            if match:
                snr = float(match.group(1))
                if (snr < 50):
                    warnings.append("Low SNR=%f" % (project, session, snr))

            # Check scale
            match = re.match(
                '<scale>([0-9]*\.[0-9]*)\s+([0-9]*\.[0-9]*)\s+([0-9]*\.[0-9]*)</scale>',
                line)
            if match:
                for idx in [0, 1, 2]:
                    scale = float(match.group(idx + 1))
                    if ((scale < 0.99) or (scale > 1.01)):
                        warnings.append("Non-unit scale[%d]=%f" %
                                        (project, session, idx, scale))

            # Check nonlinearity
            match = re.match(
                '<nonlinear>([0-9]*\.[0-9]*)\s+([0-9]*\.[0-9]*)\s+([0-9]*\.[0-9]*)</nonlinear>',
                line)
            if match:
                for idx in [0, 1, 2]:
                    nonlinear = float(match.group(idx + 1))
                    if ((nonlinear > 0.5)):
                        warnings.append("Nonlinearity[%d]=%f" %
                                        (project, session, idx, nonlinear))
    except:
        error = 'Could not open XML file for experiment.'
        slog.info(session, error, project_id=project)

    finally:
        xml.close()

    # Print warnings if there were any
    if len(warnings) > 0:
        warning = " ".join(warnings)
        slog.info(label,
                  warning,
                  session_id=session,
                  project=project,
                  script='t1_qa_functions')
コード例 #42
0
def export_series(redcap_visit_id,
                  xnat,
                  redcap_key,
                  session_and_scan_list,
                  to_directory,
                  filename_pattern,
                  xnat_dir,
                  verbose=False,
                  timer_label=None):
    (subject_label, event_label) = redcap_key
    # List should have at least one "SESSION/SCAN" entry
    if not '/' in session_and_scan_list:
        return False

    # Put together target directory and filename pattern
    to_path_pattern = os.path.join(to_directory, filename_pattern)

    # If filename is a pattern with substitution, check whether entire directory exists
    if '%' in filename_pattern:
        pipeline_file_pattern = re.sub(
            '%T%N', '*', re.sub('%n', '*', to_path_pattern)) + ".xml"
        eid_file_path = os.path.join(to_directory, 'eid')
    else:
        pipeline_file_pattern = to_path_pattern + ".xml"
        eid_file_path = re.sub('\.[^/]*', '.eid', to_path_pattern)

    # Check if EID is still the same
    eid_unchanged_flag = check_eid_file(eid_file_path, session_and_scan_list)

    # Check if files are already created
    pipeline_file_list = glob.glob(pipeline_file_pattern)

    dicom_path_list = []
    CreateDicomFlag = False
    for session_and_scan in session_and_scan_list.split(' '):
        [session, scan] = session_and_scan.split('/')
        match = re.match('.*(' + xnat_dir + '/.*)scan_.*_catalog.xml.*',
                         xnat.select.experiment(session).scan(scan).get(),
                         re.DOTALL)
        if match:
            dicom_path = match.group(1)
            if not os.path.exists(dicom_path):
                dicom_path = re.sub('storage/XNAT', 'ncanda-xnat', dicom_path)
            dicom_path_list.append(dicom_path)

            # If pipeline already has created file check date to xnat file - assumes that check_new_sessions is always run before this script otherwise pipeline is run twice ! If not created then or eid changed then create dicoms
            if eid_unchanged_flag and len(pipeline_file_list):
                # Look for xnat file
                xnat_file_pattern = re.sub(
                    '/DICOM/', '_*/image*.nii.xml',
                    re.sub('/SCANS/', '/RESOURCES/nifti/', dicom_path))
                xnat_file_search = glob.glob(xnat_file_pattern)

                # If date of xnat file is newer than in pipeline then update
                if xnat_file_search != [] and not check_file_date(
                        pipeline_file_list[0], xnat_file_search[0]):
                    CreateDicomFlag = True
            else:
                CreateDicomFlag = True

    if CreateDicomFlag == False:
        return False

    if len(pipeline_file_list):
        [session, scan] = session_and_scan_list.split(' ')[0].split('/')
        slog.info(redcap_visit_id + "_" + scan,
                  "Warning: existing MR images of the pipeline are updated",
                  file=to_path_pattern,
                  experiment_xnat_id=session,
                  session_scan_list=session_and_scan_list)

        # Remove existing files of that type to make sure we start with clean slate
        for xml_file in pipeline_file_list:
            os.remove(xml_file)
            nii_file = re.sub('.nii.xml', '.nii', xml_file)
            if os.path.exists(nii_file):
                os.remove(nii_file)
            nii_file += ".gz"
            if os.path.exists(nii_file):
                os.remove(nii_file)

    if len(dicom_path_list):
        temp_dir = tempfile.mkdtemp()
        # to_path_pattern = os.path.join( to_directory, filename_pattern )
        tmp_path_pattern = os.path.join(temp_dir, filename_pattern)
        if timer_label:
            slog.startTimer2()

        args = '--tolerance 1e-3 --write-single-slices --no-progress -rxO %s %s 2>&1' % (
            tmp_path_pattern, ' '.join(dicom_path_list))
        (ecode, sout, eout) = sutils.dcm2image(args)
        if ecode:
            slog.info(redcap_visit_id + "_" + scan,
                      "Error: Unable to create dicom file",
                      experiment_site_id=session,
                      cmd=sutils.dcm2image_cmd + " " + args,
                      err_msg=str(eout))
            shutil.rmtree(temp_dir)
            return False

        if timer_label:
            slog.takeTimer2('convert_dicom_to_nifti', timer_label)

        try:
            if not os.path.exists(to_directory):
                os.makedirs(to_directory)

            open(eid_file_path, 'w').writelines(session_and_scan_list)
        except:
            error = "ERROR: unable to write EID file"
            slog.info(redcap_visit_id + "_" + scan,
                      error,
                      experiment_site_id=session,
                      eid_file_path=eid_file_path)

        try:
            for f in os.listdir(temp_dir):
                shutil.move(os.path.join(temp_dir, f), to_directory)

        except Exception as err_msg:
            error = "ERROR: unable to move files"
            slog.info(redcap_visit_id + "_" + scan,
                      error,
                      experiment_site_id=session,
                      src_dir=temp_dir,
                      dest_dir=to_directory,
                      err_msg=str(err_msg))
            shutil.rmtree(temp_dir)
            return False

        shutil.rmtree(temp_dir)
        return True
    return False
コード例 #43
0
def export_and_queue(red2cas,
                     redcap_visit_id,
                     xnat,
                     session_data,
                     redcap_key,
                     pipeline_root_dir,
                     xnat_dir,
                     stroop=(None, None, None),
                     run_pipeline_script=None,
                     verbose=False,
                     timerFlag=False):
    (subject_label, event_label) = redcap_key
    # Put together pipeline work directory for this subject and visit
    subject_code = session_data['mri_xnat_sid']
    (arm_code, visit_code, pipeline_workdir_rel) = (None, None, None)
    try:
        (arm_code, visit_code,
         pipeline_workdir_rel) = red2cas.translate_subject_and_event(
             subject_code, event_label)
    except:
        slog.info(redcap_visit_id,
                  "ERROR: Event " + event_label + "is not supported yet.")
        return None

    if not arm_code:
        return None

    pipeline_workdir = os.path.join(pipeline_root_dir, pipeline_workdir_rel)

    if verbose:
        print subject_label, '/', subject_code, '/', event_label, 'to', pipeline_workdir

    new_files_created = export_to_workdir(redcap_visit_id,
                                          xnat,
                                          session_data,
                                          pipeline_workdir,
                                          redcap_key,
                                          xnat_dir,
                                          stroop=stroop,
                                          verbose=verbose,
                                          timerFlag=timerFlag)

    if (new_files_created and run_pipeline_script):
        if verbose:
            print 'Submitting script', run_pipeline_script, 'to process', pipeline_workdir
        just_pipeline_script = os.path.basename(run_pipeline_script)
        qsub_exe = 'cd %s; %s %s' % (pipeline_root_dir, run_pipeline_script,
                                     pipeline_workdir_rel)
        red2cas.schedule_cluster_job(
            qsub_exe,
            '%s-%s-%s-Nightly' %
            (subject_code, visit_code, just_pipeline_script),
            submit_log='/tmp/ncanda_test_nightly.txt',
            verbose=verbose)

    # It is very important to clear the PyXNAT cache, lest we run out of disk space and shut down all databases in the process
    try:
        xnat.cache.clear()
    except:
        slog.info(
            "export_mr_sessions_pipeline",
            "WARNING: clearing PyXNAT cache threw an exception - are you running multiple copies of this script?"
        )

    return new_files_created
コード例 #44
0
def do_export_spiral_files(redcap_visit_id,
                           xnat,
                           redcap_key,
                           resource_location,
                           to_directory,
                           spiral_nifti_out,
                           tmpdir,
                           verbose=None):
    # Do the actual export using a temporary directory that is managed by the caller
    # (simplifies its removal regardless of exit taken)
    # print "do_export_spiral_files" , str(xnat), str(resource_location), str(to_directory), str(spiral_nifti_out), xnat_eid, str(resource_id), str(resource_file_bname)
    [xnat_eid, resource_id, resource_file_bname] = resource_location.split('/')
    try:
        tmp_file_path = xnat.select.experiment(xnat_eid).resource(
            resource_id).file(resource_file_bname).get_copy(
                os.path.join(tmpdir, "pfiles.tar.gz"))
    except Exception as err_msg:
        slog.info(xnat_eid + "_" + resource_id,
                  "Error: failed to download from xnat " + resource_file_bname,
                  err_msg=str(err_msg))
        return False

    errcode, stdout, stderr = sutils.untar(tmp_file_path, tmpdir)
    if errcode != 0:
        error = "ERROR: Unable to un-tar resource file. File is likely corrupt."
        slog.info(redcap_visit_id,
                  error,
                  tempfile_path=tmp_file_path,
                  xnat_eid=xnat_eid,
                  spiral_tar_file=resource_location,
                  err_msg=str(stderr),
                  err_cod=str(errcode))
        return False

    spiral_E_files = glob_for_files_recursive(tmpdir, pattern="E*P*.7")
    if len(spiral_E_files) > 1:
        error = "ERROR: more than one E file found"
        slog.info(redcap_visit_id,
                  error,
                  xnat_eid=xnat_eid,
                  spiral_e_files=', '.join(spiral_E_files))
        return False

    physio_files = glob_for_files_recursive(tmpdir, pattern="P*.physio")
    if len(physio_files) > 1:
        error = 'More than one physio file found in spiral tar file.'
        slog.info(redcap_visit_id,
                  error,
                  xnat_eid=xnat_eid,
                  tmp_file_path=tmp_file_path,
                  physio_files=physio_files,
                  spiral_tar_file=resource_location)
        return False

    if len(spiral_E_files) == 1:
        # Make directory first
        spiral_dir_out = os.path.join(to_directory, 'native')
        if not os.path.exists(spiral_dir_out):
            os.makedirs(spiral_dir_out)
        # Now try to make the NIfTI
        errcode, stdout, stderr = sutils.make_nifti_from_spiral(
            spiral_E_files[0], spiral_nifti_out)
        if not os.path.exists(spiral_nifti_out):
            error = "Unable to make NIfTI from resource file, please try running makenifti manually"
            slog.info(redcap_visit_id,
                      error,
                      xnat_eid=xnat_eid,
                      spiral_file=spiral_E_files[0],
                      ecode=str(errcode),
                      eout=str(stderr),
                      sout=str(stdout))
            return False
    else:
        error = "ERROR: no spiral data file found"
        slog.info(redcap_visit_id,
                  error,
                  xnat_eid=xnat_eid,
                  spiral_tar_file=resource_location)
        return False

    if len(physio_files) == 1:
        spiral_physio_out = os.path.join(to_directory, 'native', 'physio')
        shutil.copyfile(physio_files[0], spiral_physio_out)
        (ecode, sout, eout) = sutils.gzip('-9 ' + str(spiral_physio_out))
        return not ecode

    return True
コード例 #45
0
def check_excluded_subjects(excluded_subjects, pipeline_root_dir):
    for subject in excluded_subjects:
        subject_dir = os.path.join(pipeline_root_dir, subject)
        if os.path.exists(subject_dir):
            error = "ERROR: pipeline directory is from an *excluded* subject and should probable be deleted"
            slog.info(subject_dir, error)