Пример #1
0
class Sorting:
    def __init__(self):
        self.tableName = 'Sorting'
        self.DBClient = DbUtils()
        self.sqlBuilder = SQLBuilder()

    def getObjectFromTuple(self, tuple):
        valuesDict = dict(record_id=tuple[0], study=tuple[1], rid=tuple[2], scan_type=tuple[3],
                          scan_date=tuple[4].strftime("%Y-%m-%d"), scan_time=str(tuple[5]),
                          s_identifier=tuple[6], i_identifier=tuple[7], file_type=tuple[8], download_folder=tuple[9],
                          raw_folder=tuple[10], moved=tuple[11])
        return SortingObject(valuesDict)

    def insertToTable(self, objList):
        for obj in objList:
            self.DBClient.executeNoResult(
                self.sqlBuilder.getSQL_AddNewEntryToSortingTable(obj.sqlInsert()))

    def getUnmovedFilesPerStudy(self, study):
        unmovedList = self.DBClient.executeAllResults(
            self.sqlBuilder.getSQL_getUnmovedFilesFromSortingTable(study, tuple(sc.ProcessingModalityAndPipelineTypePerStudy[study].keys())))
        return [self.getObjectFromTuple(t) for t in unmovedList]

    def setMovedTrue(self, sortingObj):
        sortingObj.moved = 1
        self.saveObj(sortingObj)

    def saveObj(self, sortingObj):
        self.DBClient.executeNoResult(self.sqlBuilder.getSQL_saveObjSortingTable(sortingObj))
 def __init__(self):
     self.processingPPDict = {
         'ADNI': {
             'V1': {
                 'T1': ADNI_V1_T1(),
                 'FMRI': ADNI_V1_FMRI(),
                 'AV45': ADNI_V1_AV45(),
                 'FDG': ADNI_V1_FDG(),
                 'AV1451': ADNI_V1_AV1451()
             },
             'V2': {
                 'T1': ADNI_V1_T1(),
                 'FMRI': ADNI_V1_FMRI(),
                 'AV45': ADNI_V2_AV45(),
                 'FDG': ADNI_V2_FDG(),
                 'AV1451': ADNI_V2_AV1451()
             }
         },
         'DIAN': {
             'V1': {
                 'T1': DIAN_V1_T1(),
                 'FDG': DIAN_V1_FDG(),
                 'PIB': DIAN_V1_PIB()
             }
         }
     }
     self.DBClient = DbUtils()
     self.QCH = QCHandler()
class Processing:
    def __init__(self):
        self.DBClient = DbUtils()
        self.sqlBuilder = SQLBuilder()

    def getObjectFromTuple(self, tuple):
        valuesDict = dict(record_id=tuple[0], study=tuple[1], rid=tuple[2], modality=tuple[3],
                          scan_date=tuple[4].strftime("%Y-%m-%d"), scan_time=str(tuple[5]),
                          s_identifier=tuple[6], i_identifier=tuple[7], root_folder=tuple[8], converted_folder=tuple[9], version=tuple[10],
                          processed=tuple[12])
        return ProcessingObject(valuesDict)

    def insertToTable(self, objList):
        for obj in objList:
            self.DBClient.executeNoResult(
                self.sqlBuilder.getSQL_AddNewEntryToProcessingTable(obj.sqlInsert()))

    def insertFromConvertionObj(self, convertionObj):
        convertionValues = convertionObj.getValuesDict()
        convertionValues['modality'] = sc.ProcessingModalityAndPipelineTypePerStudy[convertionObj.study][convertionObj.scan_type]
        convertionValues['root_folder'] = '/'.join(convertionObj.converted_folder.split('/')[0:-2])  # Keeping only the three last elements
        self.insertToTable([ProcessingObject(convertionValues)])

    def getToProcessListPerStudy(self, study):
        toProcessList = self.DBClient.executeAllResults(
            self.sqlBuilder.getSQL_getToBeProcessedFromProcessingTable(study))
        return [self.getObjectFromTuple(t) for t in toProcessList]
class CoregHandler:
    def __init__(self):
        self.DBClient = DbUtils()

    def requestCoreg(self, study, rid, type, pet_folder, t1_folder, petScanType, t1ScanType, xfm_name):
        regsql = "INSERT IGNORE INTO Coregistration VALUES (Null, '{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}', 0, 0, 0, Null)".format(study.upper(), rid,
                                                                                                                         type.upper(),
                                                                                                                         pet_folder,
                                                                                                                         t1_folder, petScanType, t1ScanType, xfm_name)

        self.DBClient.executeNoResult(regsql)
Пример #5
0
class CoregHandler:
    def __init__(self):
        self.DBClient = DbUtils()

    def requestCoreg(self, study, rid, type, pet_folder, t1_folder,
                     petScanType, t1ScanType, xfm_name):
        regsql = "INSERT IGNORE INTO Coregistration VALUES (Null, '{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}', 0, 0, 0, Null)".format(
            study.upper(), rid, type.upper(), pet_folder, t1_folder,
            petScanType, t1ScanType, xfm_name)

        self.DBClient.executeNoResult(regsql)
    def __init__(self, inputFolder, database_location):
        # Initiate Database Client
        self.DbClient = DbUtils(database=database_location)

        # For each csv file, import it into the SQL database
        for inputFile in glob.glob(inputFolder + '/*.csv'):
            if inputFile in CSVconfig.AdniIgnored:
                continue
            sqlLocation = os.path.basename(inputFile).replace('.csv', '')
            with open(inputFile, 'r') as csvFile:
                csvToDatabase(self.DbClient, csvFile, sqlLocation)

        #  Close the connection to the database
        self.DbClient.close()
class AdniCsvImport:
    def __init__(self, inputFolder, database_location):
        # Initiate Database Client
        self.DbClient = DbUtils(database=database_location)

        # For each csv file, import it into the SQL database
        for inputFile in glob.glob(inputFolder + "/*.csv"):
            if inputFile in CSVconfig.AdniIgnored:
                continue
            sqlLocation = os.path.basename(inputFile).replace(".csv", "")
            with open(inputFile, "r") as csvFile:
                csvToDatabase(self.DbClient, csvFile, sqlLocation)

        #  Close the connection to the database
        self.DbClient.close()
    def __init__(self, studyList, version):
        self.DBClient = DbUtils()
        self.studyList = [i.upper() for i in studyList]
        self.version = version
        self.recursorList = []
        self._getRecursorList(studyList)
        self.sortingDataList = []
        self.sqlBuilder = SQLBuilder()

        self.moveSortingObjListDict = {}
        self.toConvertObjListDict = {}

        self.sortingTable = Sorting()
        self.conversionTable = Conversion()

        self.raw2mincConverter = Raw2MINCConverter()
        self.pool = Pool()
        self.qsubJobHandler = QSubJobHandler()
        self.qsubJobHandler.start()

        self.convertedListDict = {}

        self.processingTable = Processing()

        self.toProcessListDict = {}
        self.pipelineHanlder = PipelineHandler()
        self.QCHandler = QCHandler()

        self.MongoManger = MongoDBManager()
        self.MongoXMLManager = MongoScanXMLManager()
        self.MongoXMLManager.processXMLs()
Пример #9
0
class Processing:
    def __init__(self):
        self.DBClient = DbUtils()
        self.sqlBuilder = SQLBuilder()

    def getObjectFromTuple(self, tuple):
        valuesDict = dict(record_id=tuple[0],
                          study=tuple[1],
                          rid=tuple[2],
                          modality=tuple[3],
                          scan_date=tuple[4].strftime("%Y-%m-%d"),
                          scan_time=str(tuple[5]),
                          s_identifier=tuple[6],
                          i_identifier=tuple[7],
                          root_folder=tuple[8],
                          converted_folder=tuple[9],
                          version=tuple[10],
                          processed=tuple[12])
        return ProcessingObject(valuesDict)

    def insertToTable(self, objList):
        for obj in objList:
            self.DBClient.executeNoResult(
                self.sqlBuilder.getSQL_AddNewEntryToProcessingTable(
                    obj.sqlInsert()))

    def insertFromConvertionObj(self, convertionObj):
        convertionValues = convertionObj.getValuesDict()
        convertionValues[
            'modality'] = sc.ProcessingModalityAndPipelineTypePerStudy[
                convertionObj.study][convertionObj.scan_type]
        convertionValues['root_folder'] = '/'.join(
            convertionObj.converted_folder.split('/')
            [0:-2])  # Keeping only the three last elements
        self.insertToTable([ProcessingObject(convertionValues)])

    def getToProcessListPerStudy(self, study):
        toProcessList = self.DBClient.executeAllResults(
            self.sqlBuilder.getSQL_getToBeProcessedFromProcessingTable(study))
        return [self.getObjectFromTuple(t) for t in toProcessList]
Пример #10
0
class QCHandler:
    def __init__(self):
        self.DBClient = DbUtils()

    def requestQC(self, study, modal_table, modal_tableId, qcField, qctype, qcFolder):
        qcsql = "INSERT IGNORE INTO QC VALUES (Null, '{0}', '{1}', '{2}', '{3}', '{4}','{5}' , 0, 0, 0, 0, Null)".format(study.upper(), modal_table,
                                                                                                                         modal_tableId,
                                                                                                                         qcField,
                                                                                                                         qctype, qcFolder)

        self.DBClient.executeNoResult(qcsql)

    def checkQCJobs(self, study, modality):
        sql = "SELECT * FROM {0}_{1}_Pipeline WHERE QC = 1 AND FINISHED = 1".format(study, modality)
        res = self.DBClient.executeAllResults(sql)
        if len(res) < 1:
            return 0
        else:
            for result in res:
                proc_id = result[1]
                setProcessedSQL = "UPDATE Processing SET PROCESSED = 1, QCPASSED = 1 WHERE RECORD_ID = {0}".format(proc_id)
                self.DBClient.executeNoResult(setProcessedSQL)
Пример #11
0
class Conversion:
    def __init__(self):
        self.tableName = 'Conversion'
        self.DBClient = DbUtils()
        self.sqlBuilder = SQLBuilder()

    def getObjectFromTuple(self, tuple):
        valuesDict = dict(record_id=tuple[0], study=tuple[1], rid=tuple[2], scan_type=tuple[3],
                          scan_date=tuple[4].strftime("%Y-%m-%d"), scan_time=str(tuple[5]),
                          s_identifier=tuple[6], i_identifier=tuple[7], file_type=tuple[8], raw_folder=tuple[9],
                          converted_folder=tuple[10], version=tuple[11], converted=tuple[12])
        return ConversionObject(valuesDict)

    def insertToTable(self, objList):
        for obj in objList:
            self.DBClient.executeNoResult(
                self.sqlBuilder.getSQL_AddNewEntryToConversionTable(obj.sqlInsert()))

    def get_version(self, sortingObj, versionDict):
        if  sortingObj.study == 'ADNI':
            dl_path = sortingObj.download_folder
            if 'Uniform' in dl_path:
                return 'V2'
            else:
                return versionDict[sc.ProcessingModalityAndPipelineTypePerStudy[sortingObj.study][sortingObj.scan_type]] if sc.ProcessingModalityAndPipelineTypePerStudy[sortingObj.study][sortingObj.scan_type] in versionDict else 'V1'
        else:
            return versionDict[sc.ProcessingModalityAndPipelineTypePerStudy[sortingObj.study][sortingObj.scan_type]] if sc.ProcessingModalityAndPipelineTypePerStudy[sortingObj.study][sortingObj.scan_type] in versionDict else 'V1'


    def insertFromSortingObj(self, sortingObj, versionDict):
        sortingValues = sortingObj.getValuesDict()
        version = self.get_version(sortingObj, versionDict)
        sortingValues['converted_folder'] = '{0}/{1}/{2}/{3}/{4}_{5}_{6}/{7}/converted/final'.format(sc.studyDatabaseRootDict[sortingObj.study],
                                                                        sortingObj.study, sortingObj.scan_type, sortingObj.rid,
                                                                        sortingObj.scan_date, sortingObj.s_identifier, sortingObj.i_identifier, version)
        sortingValues['version'] = version
        sortingValues['converted'] = 0
        self.insertToTable([ConversionObject(sortingValues)])

    def gettoBeConvertedPerStudy(self, study):
        toConvertList = self.DBClient.executeAllResults(
            self.sqlBuilder.getSQL_getToBeConvertedFileFromConversionTable(study))
        return [self.getObjectFromTuple(t) for t in toConvertList]

    def setConvertedTrue(self, convertionObj):
        convertionObj.converted = 1
        self.saveObj(convertionObj)

    def setConvertedFailed(self, convertionObj):
        convertionObj.skip = 1
        self.saveObj(convertionObj)

    def saveObj(self, convertionObj):
        self.DBClient.executeNoResult(self.sqlBuilder.getSQL_saveObjConversionTable(convertionObj))

    def getConvertedListPerStudy(self, study):
        convertedList = self.DBClient.executeAllResults(self.sqlBuilder.getSQL_getAllConvertedFromConvertionTable(study))
        return [self.getObjectFromTuple(t) for t in convertedList]
Пример #12
0
class QCHandler:
    def __init__(self):
        self.DBClient = DbUtils()

    def requestQC(self, study, modal_table, modal_tableId, qcField, qctype,
                  qcFolder):
        qcsql = "INSERT IGNORE INTO QC VALUES (Null, '{0}', '{1}', '{2}', '{3}', '{4}','{5}' , 0, 0, 0, 0, Null)".format(
            study.upper(), modal_table, modal_tableId, qcField, qctype,
            qcFolder)

        self.DBClient.executeNoResult(qcsql)

    def checkQCJobs(self, study, modality):
        sql = "SELECT * FROM {0}_{1}_Pipeline WHERE QC = 1 AND FINISHED = 1".format(
            study, modality)
        res = self.DBClient.executeAllResults(sql)
        if len(res) < 1:
            return 0
        else:
            for result in res:
                proc_id = result[1]
                setProcessedSQL = "UPDATE Processing SET PROCESSED = 1, QCPASSED = 1 WHERE RECORD_ID = {0}".format(
                    proc_id)
                self.DBClient.executeNoResult(setProcessedSQL)
class QSubJobStatusReporter:
    def __init__(self):
        self.DBClient = DbUtils()
        self.QCHandler = QCHandler()

    def setStatus(self, job, status):
        if job.jobType == 'beast':
            nestedJob = job.job
            table = '{0}_{1}_Pipeline'.format(nestedJob.study, nestedJob.modality)
            table_id = nestedJob.table_id
            if status == 'Success':
                setSql = 'UPDATE {0} SET BEAST_MASK = 1 WHERE RECORD_ID = {1}'.format(table, table_id)
            elif status == 'Fail':
                setSql = 'UPDATE {0} SET BEAST_MASK = -1, BEAST_SKIP = 1 WHERE RECORD_ID = {1}'.format(table, table_id)
            self.DBClient.executeNoResult(setSql)
            if status == 'Fail':
                PipelineLogger.log('manager', 'error','QSUB job Status Failed: - {0} - Processing Table ID : {1} - Modality Table ID : {2}'.format(job.jobType, nestedJob.processing_rid, nestedJob.table_id))

        if job.jobType == 'av45':
            nestedJob = job.job
            table = '{0}_{1}_Pipeline'.format(nestedJob.study, nestedJob.modality)
            table_id = nestedJob.table_id
            if status == 'Success':
                setSql = "UPDATE {0} SET FINISHED = 1, PROC_Failed = Null WHERE RECORD_ID = {1}".format(table, table_id)
                self.requestQC(nestedJob, 'av45')
            elif status == 'Fail':
                setSql = "UPDATE {0} SET PROC_Failed = 'Failed' , SKIP = 1 WHERE RECORD_ID = {1}".format(table, table_id)
            self.DBClient.executeNoResult(setSql)
            if status == 'Fail':
                PipelineLogger.log('manager', 'error','QSUB job Status Failed: - {0} - Processing Table ID : {1} - Modality Table ID : {2}'.format(job.jobType, nestedJob.processing_rid, nestedJob.table_id))

        if job.jobType == 'fdg':
            nestedJob = job.job
            table = '{0}_{1}_Pipeline'.format(nestedJob.study, nestedJob.modality)
            table_id = nestedJob.table_id
            if status == 'Success':
                setSql = "UPDATE {0} SET FINISHED = 1, PROC_Failed = Null WHERE RECORD_ID = {1}".format(table, table_id)
                self.requestQC(nestedJob, 'fdg')
            elif status == 'Fail':
                setSql = "UPDATE {0} SET PROC_Failed = 'Failed' , SKIP = 1 WHERE RECORD_ID = {1}".format(table, table_id)
            self.DBClient.executeNoResult(setSql)
            if status == 'Fail':
                PipelineLogger.log('manager', 'error','QSUB job Status Failed: - {0} - Processing Table ID : {1} - Modality Table ID : {2}'.format(job.jobType, nestedJob.processing_rid, nestedJob.table_id))


    def requestQC(self, processingItemObj, qctype):
        qcFieldDict = dict(civet='QC', beast='BEAST_QC', av45='QC', fdg='QC')
        qcFolderDict = { 'civet' : '{0}/civet'.format(processingItemObj.root_folder),
                         'beast' : '{0}/beast'.format(processingItemObj.root_folder),
                         'av45' : '{0}/processed'.format(processingItemObj.root_folder),
                         'fdg' : '{0}/processed'.format(processingItemObj.root_folder)}
        self.QCHandler.requestQC(processingItemObj.study, '{0}_{1}_Pipeline'.format(processingItemObj.study,
                                                                                    processingItemObj.modality),
                                 processingItemObj.table_id, qcFieldDict[qctype], qctype, qcFolderDict[qctype])
 def __init__(self):
     self.DBClient = DbUtils()
     self.QCHandler = QCHandler()
Пример #15
0
 def __init__(self):
     self.DBClient = DbUtils()
Пример #16
0
 def __init__(self):
     self.DBClient = DbUtils()
     self.sqlBuilder = SQLBuilder()
__author__ = 'sulantha'
import datetime
from Utils.DbUtils import DbUtils
DBClient = DbUtils()
with open('/data/data03/sulantha/Downloads/av45_list.csv', 'r') as file:
    next(file)
    for line in file:
        row = line.split(',')
        rid = row[0]
        date = row[1].strip()
        dateT = datetime.datetime.strptime(date, '%m/%d/%Y')
        dateS = dateT.strftime('%Y-%m-%d')
        findSQL = "SELECT * FROM Processing WHERE RID = {0} AND MODALITY = 'AV45' AND SCAN_DATE = '{1}'".format(rid, dateS)
        res = DBClient.executeAllResults(findSQL)
        print('{0}-{1} {2}'.format(rid, len(res), '############' if len(res) is 0 else '')) if len(res) is 0 else None
        processingSQL = "UPDATE Processing SET SKIP = 0 WHERE RID = {0} AND MODALITY = 'AV45' AND SCAN_DATE = '{1}'".format(rid, dateS)
        DBClient.executeNoResult(processingSQL)



 def __init__(self):
     self.processingPPDict = {'ADNI':{'V1':{'T1':ADNI_V1_T1(), 'FMRI':ADNI_V1_FMRI(), 'AV45':ADNI_V1_AV45(), 'FDG':ADNI_V1_FDG(), 'AV1451': ADNI_V1_AV1451()},
                                      'V2':{'T1':ADNI_V1_T1(), 'FMRI':ADNI_V1_FMRI(), 'AV45':ADNI_V2_AV45(), 'FDG':ADNI_V2_FDG(), 'AV1451': ADNI_V2_AV1451()}}}
     self.DBClient = DbUtils()
     self.QCH = QCHandler()
class DIAN_T1_Helper:
    def __init__(self):
        self.DBClient = DbUtils()
        self.MatchDBClient = DbUtils(database=pc.DIAN_dataMatchDBName)

    def getMatchingT1(self, processingItemObj):
        modalityID = '{0}{1}{2}{3}{4}{5}{6}'.format(
            processingItemObj.study, processingItemObj.version,
            processingItemObj.subject_rid, processingItemObj.modality,
            processingItemObj.scan_date.replace('-', ''),
            processingItemObj.s_identifier, processingItemObj.i_identifier)
        getFromMatchTableSQL = "SELECT * FROM MatchT1 WHERE MODALITY_ID = '{0}'".format(
            modalityID)
        existingMatchedRec = self.DBClient.executeAllResults(
            getFromMatchTableSQL)
        if len(existingMatchedRec) == 1:
            getConvSQL = "SELECT * FROM Conversion WHERE RECORD_ID = '{0}'".format(
                existingMatchedRec[0][3])
            return self.DBClient.executeAllResults(getConvSQL)[0]
        else:

            if processingItemObj.modality == 'FMRI':
                PipelineLogger.log(
                    'root', 'error',
                    'FMRI T1 Matching not implemented. {0} - {1} - {2}'.format(
                        processingItemObj.subject_rid,
                        processingItemObj.s_identifier.replace('S', ''),
                        processingItemObj.i_identifier.replace('I', '')))
                return None

            else:  # By Default, for PET images
                date_str = processingItemObj.scan_date.replace('-', '')
                name_and_Mod = '{0}{1}'.format(processingItemObj.subject_rid,
                                               processingItemObj.modality)
                visit = processingItemObj.i_identifier.split('x')[0].replace(
                    date_str, '').replace(name_and_Mod, '')
                pet_label = '{0}_{1}_{2}'.format(
                    processingItemObj.subject_rid, visit,
                    processingItemObj.modality.lower())
                getRecordSQL = "SELECT * FROM PET_MRI_Proc_Match WHERE Label LIKE '{0}'".format(
                    pet_label)

            petrecord = self.MatchDBClient.executeAllResults(getRecordSQL)
            if not petrecord:
                PipelineLogger.log(
                    'root', 'error',
                    'Cannot find PET record : {0} - {1} - {2}'.format(
                        processingItemObj.subject_rid,
                        processingItemObj.s_identifier.replace('S', ''),
                        processingItemObj.i_identifier.replace('I', '')))
                return None

            mr_name = petrecord[0][5]
            if mr_name == '':
                ### Processed with MR entry not found. Have to switch to date based matching.
                PipelineLogger.log(
                    'root', 'error',
                    'Processed with MR entry not found. : {0} - {1} - {2} - Searching based on scan date. +/- 60 days from PET date'
                    .format(processingItemObj.subject_rid,
                            processingItemObj.modality, visit))
                return None

            mr_fid = petrecord[0][6]
            mr_visit = mr_name.split('_')[1]

            matchedT1withScanDescriptions = []

            for t1_type in ['MPRAGE', 'IRFSPGR', 'MPR', 'FSPGR']:
                mr_DB_iid = '{0}{3}{1}%x{2}'.format(
                    processingItemObj.subject_rid, mr_visit, mr_fid, t1_type)
                getScanFromConversionSQL = "SELECT * FROM Conversion WHERE STUDY = '{0}' AND I_IDENTIFIER LIKE '{1}' AND SKIP = 0".format(
                    processingItemObj.study, mr_DB_iid)
                t1_conversion = self.DBClient.executeAllResults(
                    getScanFromConversionSQL)
                if len(t1_conversion) > 0:
                    matchedT1withScanDescriptions.append(t1_conversion[0])
            if len(matchedT1withScanDescriptions) < 1:
                PipelineLogger.log(
                    'root', 'error',
                    'Matched T1s are not in the database. : Subject, visit and FID - {0} {1} {2}'
                    .format(processingItemObj.subject_rid, mr_visit, mr_fid))
                return None
            else:
                if len(matchedT1withScanDescriptions) == 1:
                    ## ONLY ONE MATCHED T1. GOOD> CHECK IF THE T1 is a good scan type and not a bluff !!!
                    self.addToMatchT1Table(processingItemObj, modalityID,
                                           matchedT1withScanDescriptions[0])
                    return matchedT1withScanDescriptions[0]

                else:
                    #### MORE THAN ONE FOUND. Very weird fro DIAN.
                    PipelineLogger.log(
                        'root', 'error',
                        'MORE THAN ONE T1 Match FOUND. Very weird fro DIAN. : Subject and visit - {0} {1}'
                        .format(processingItemObj.subject_rid, mr_visit))
                    return None

    def checkProcessed(self, t1Record):
        subject_id = t1Record[2]
        version = t1Record[11]
        s_id = t1Record[6]
        i_id = t1Record[7]
        checkProcessedSQL = "SELECT * FROM Processing WHERE RID = '{0}' AND VERSION = '{1}' AND S_IDENTIFIER = '{2}' AND I_IDENTIFIER = '{3}'".format(
            subject_id, version, s_id, i_id)
        result = self.DBClient.executeAllResults(checkProcessedSQL)[0]
        if len(result) < 1:
            PipelineLogger.log(
                'root', 'error',
                'Matched T1 is not added to the processing table. {0} - {1} - {2}'
                .format(subject_id, s_id, i_id))
            return False
        else:
            if result[12] == 1 and result[13] == 1:
                return result[8]
            else:
                PipelineLogger.log(
                    'root', 'error',
                    'Matched T1 is not process or QC failed. {0} - {1} - {2}'.
                    format(subject_id, s_id, i_id))
                self.startProcessOFT1(result)
                return False

    def addToMatchT1Table(self, processingItemObj, modalityID, t1Record):
        pet_date = datetime.strptime(processingItemObj.scan_date, '%Y-%m-%d')
        mri_date = datetime.combine(t1Record[4], datetime.min.time())
        date_diff = abs(mri_date - pet_date)
        t1ID = '{0}{1}{2}_x_{3}_x_{4}{5}{6}'.format(
            t1Record[1], t1Record[11], t1Record[2], t1Record[3],
            t1Record[4].strftime('%Y-%m-%d').replace('-', ''), t1Record[6],
            t1Record[7])
        conversionID = t1Record[0]
        sql = "INSERT IGNORE INTO MatchT1 VALUES (Null, '{0}', '{1}', '{2}', {3}, Null)".format(
            modalityID, t1ID, conversionID, date_diff.days)
        self.DBClient.executeNoResult(sql)

    def startProcessOFT1(self, processTableEntry):
        recordId = processTableEntry[0]
        study = processTableEntry[1]
        sql = "UPDATE {0}_T1_Pipeline SET SKIP = 0 WHERE PROCESSING_TID = {1}".format(
            study, recordId)
        self.DBClient.executeNoResult(sql)
Пример #20
0
import csv
from Utils.DbUtils import DbUtils
inputFile = '/home/sulantha/reRUNAv45.csv'
DBC = DbUtils()
with open(inputFile, 'r') as inputFile:
    csvFile = csv.reader(inputFile)
    for line in csvFile:
        RID = line[0].split('/')[6]
        IID = line[0].split('/')[7].split('_')[-1]
        sql = "UPDATE ADNI_AV45_Pipeline SET FINISHED = 0, SKIP = 0 WHERE PROCESSING_TID IN (SELECT RECORD_ID FROM Processing WHERE RID = {0} AND I_IDENTIFIER = '{1}')".format(
            RID, IID)
        DBC.executeNoResult(sql)
Пример #21
0
from datetime import datetime
from Utils.DbUtils import DbUtils
import glob, itertools

DBClient = DbUtils()
outLines = []
date_col = 3
type= 'AV1451'
with open('/home/sulantha/Downloads/22_Aug_TAU_AMY.csv', 'r') as file:
    next(file)
    for line in file:
        row = line.split(',')
        rid = row[0].split('_')[-1]
        date = row[date_col].strip()
        if date == '':
            continue
        dateT = datetime.strptime(date, '%m/%d/%Y')
        dateS = dateT.strftime('%Y-%m-%d')
        findSQLV2 = "SELECT CONVERTED_FOLDER FROM Conversion WHERE RID = {0} AND SCAN_TYPE = '{3}' AND SCAN_DATE = '{1}' AND VERSION = '{2}' LIMIT 1".format(
            rid, dateS, 'V2', type.upper())
        resv2 = DBClient.executeSomeResults(findSQLV2, 1)

        if len(resv2) is 0:
            print(rid)
            v2Path = ''
        elif len(resv2) == 1:
            v2Path = '{0}/*_{1}.mnc'.format(resv2[0][0], type.upper())
            v2Path = glob.glob(v2Path)[0]

        findanyT1 = "SELECT ROOT_FOLDER FROM Processing WHERE RID = {0} AND MODALITY = '{1}' AND PROCESSED = 1 AND QCPASSED = 1 ORDER BY SCAN_DATE DESC LIMIT 1".format(
            rid, 'T1')
Пример #22
0
 def __init__(self):
     self.DBClient = DbUtils()
     self.QCHandler = QCHandler()
__author__ = 'Sulantha'

from Utils.DbUtils import DbUtils
import glob, os, sys, fileinput

if __name__ == '__main__':
    DBClient = DbUtils()
    sql1 = "SELECT * FROM Processing WHERE PROCESSED = 1 AND MODALITY ='T1'"
    res = DBClient.executeAllResults(sql1)
    for result in res:
        proc_id = result[0]
        #sql2 = "SELECT * FROM Processing WHERE RECORD_ID = {0}".format(proc_id)
        #process_rec = DBClient.executeAllResults(sql2)[0]

        T1Path = result[8]
        try:
            civet_nl_xfm_name = '{0}/civet/transforms/nonlinear/*nlfit_It.xfm'.format(T1Path)
            civet_nl_xfm_file = glob.glob(civet_nl_xfm_name)[0]

            civet_nl_mnc_name = '{0}/civet/transforms/nonlinear/*nlfit_It_grid_0.mnc'.format(T1Path)
            civet_nl_mnc_file = glob.glob(civet_nl_mnc_name)[0]
            civet_nl_mnc_name_base = os.path.basename(civet_nl_mnc_file)

            for line in fileinput.input(civet_nl_xfm_file, inplace=True):
                if 'Displacement_Volume' in line:
                    line = 'Displacement_Volume = {0};'.format(civet_nl_mnc_name_base)
                sys.stdout.write(line)
        except:
            s = "UPDATE Processing SET QCPASSED = 0 WHERE RECORD_ID = {0}".format(proc_id)
            DBClient.executeNoResult(s)
            print('Files not found - {0} - {1}'.format(proc_id, T1Path))
Пример #24
0
class ADNI_V1_T1:
    def __init__(self):
        self.DBClient = DbUtils()
        self.QCHandler = QCHandler()

    def process(self, processingItem):
        processingItemObj = ProcessingItemObj(processingItem)

        if processingItemObj.beast_skip and processingItemObj.manual_skip and not processingItemObj.civet:
            self.runCivet(processingItemObj, 'N')
        elif processingItemObj.manual_mask and not processingItemObj.manual_skip and not processingItemObj.civet:
            self.runCivet(processingItemObj, 'M')
        elif processingItemObj.beast_mask == 0 and not processingItemObj.beast_skip and processingItemObj.beast_qc == 0 and not processingItemObj.manual_mask:
            self.runBeast(processingItemObj)
        elif processingItemObj.beast_skip and not processingItemObj.manual_mask and not processingItemObj.manual_skip:
            PipelineLogger.log(
                'manager', 'error',
                '$$$$$$$$$$$$$$$$$ Manual Mask Requested $$$$$$$$$$$$$$$$$$ - {0}'
                .format(processingItem))
            pass
        elif processingItemObj.beast_mask == 1 and not processingItemObj.beast_skip and processingItemObj.beast_qc == 1 and not processingItemObj.manual_mask and not processingItemObj.civet:
            self.runCivet(processingItemObj, 'B')
        elif processingItemObj.beast_mask == 1 and not processingItemObj.beast_skip and processingItemObj.beast_qc == 0 and not processingItemObj.manual_mask and not processingItemObj.manual_skip:
            self.requestQC(processingItemObj, 'beast')
        elif processingItemObj.civet == 1 and processingItemObj.civet_qc == 0:
            self.requestQC(processingItemObj, 'civet')
        else:
            if processingItemObj.civet_qc == -1:
                PipelineLogger.log(
                    'manager', 'error',
                    'Civet QC failed. Skipping. - {0}'.format(processingItem))
            PipelineLogger.log(
                'manager', 'error',
                'Error handling obj for processing - {0}'.format(
                    processingItem))
            return 0

    def getScanType(self, processingItemObj):
        r = self.DBClient.executeAllResults(
            "SELECT SCAN_TYPE FROM Conversion WHERE STUDY = '{0}' AND RID = '{1}' "
            "AND SCAN_DATE = '{2}' AND S_IDENTIFIER = '{3}' "
            "AND I_IDENTIFIER = '{4}'".format(processingItemObj.study,
                                              processingItemObj.subject_rid,
                                              processingItemObj.scan_date,
                                              processingItemObj.s_identifier,
                                              processingItemObj.i_identifier))
        return r[0][0]

    def checkNative(self, processingItemObj):
        orig_ScanType = self.getScanType(processingItemObj)
        converted_file = '{0}/{1}_{2}{3}{4}{5}_{6}.mnc'.format(
            processingItemObj.converted_folder, processingItemObj.study,
            processingItemObj.subject_rid,
            processingItemObj.scan_date.replace('-', ''),
            processingItemObj.s_identifier, processingItemObj.i_identifier,
            orig_ScanType)
        nativeFolder = '{0}/native'.format(processingItemObj.root_folder)
        nativeFileName = '{0}/{1}_{2}{3}{4}{5}_{6}.mnc'.format(
            nativeFolder, processingItemObj.study,
            processingItemObj.subject_rid,
            processingItemObj.scan_date.replace('-', ''),
            processingItemObj.s_identifier, processingItemObj.i_identifier,
            processingItemObj.modality.lower())
        if not os.path.exists(nativeFileName):
            try:
                distutils.dir_util.mkpath(nativeFolder)
                shutil.copyfile(converted_file, nativeFileName)
            except Exception as e:
                PipelineLogger.log(
                    'manager', 'error',
                    'Error in creating folders or copying native file. \n {0}'.
                    format(e))
                PipelineLogger.log(
                    'manager', 'error',
                    'Setting to restart conversion. \n {0}'.format(e))
                sql = "UPDATE Conversion SET CONVERTED = 0, SKIP = 0 WHERE S_IDENTIFIER = '{0}' AND I_IDENTIFIER = '{1}'".format(
                    processingItemObj.s_identifier,
                    processingItemObj.i_identifier)
                self.DBClient.executeNoResult(sql)
                return None
        return nativeFileName

    def runBeast(self, processingItemObj):
        nativeFileName = self.checkNative(processingItemObj)
        if not nativeFileName:
            return 0
        beastFolder = '{0}/beast'.format(processingItemObj.root_folder)
        logDir = '{0}/logs'.format(processingItemObj.root_folder)
        PipelineLogger.log('manager', 'info',
                           'BeAST starting for {0}'.format(nativeFileName))
        PipelineLogger.log('manager', 'info',
                           'Current working folder : {0}'.format(os.getcwd()))
        try:
            distutils.dir_util.mkpath(logDir)
        except Exception as e:
            PipelineLogger.log('manager', 'error',
                               'Error in creating log folder \n {0}'.format(e))
            return 0

        id = '{0}{1}{2}{3}'.format(
            processingItemObj.subject_rid,
            processingItemObj.scan_date.replace('-', ''),
            processingItemObj.s_identifier, processingItemObj.i_identifier)
        beastCMD = 'source /opt/minc-1.9.15/minc-toolkit-config.sh; Pipelines/ADNI_T1/ADNI_V1_T1_BeAST {0} {1} {2} {3} {4} {5}'.format(
            id, nativeFileName, beastFolder, logDir, socket.gethostname(),
            50500)
        try:
            shutil.rmtree(beastFolder)
        except:
            pass
        try:
            distutils.dir_util.mkpath(beastFolder)
        except Exception as e:
            PipelineLogger.log(
                'manager', 'error',
                'Error in creating BeAST folder. \n {0}'.format(e))
            return 0

        PipelineLogger.log('manager', 'debug',
                           'Command : {0}'.format(beastCMD))
        os.chdir(pc.SourcePath)
        p = subprocess.Popen(beastCMD,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE,
                             executable='/bin/bash')
        out, err = p.communicate()
        PipelineLogger.log(
            'manager', 'debug',
            'Beast Log Output : \n{0}'.format(out.decode("utf-8")))
        PipelineLogger.log('manager', 'debug',
                           'Beast Log Err : \n{0}'.format(err.decode("utf-8")))

        QSubJobHandler.submittedJobs[id] = QSubJob(id, '02:00:00',
                                                   processingItemObj, 'beast')
        return 1

    def runCivet(self, processingItemObj, maskStatus):
        nativeFileName = self.checkNative(processingItemObj)
        if not nativeFileName:
            return 0
        copyFolder = pc.T1TempDirForCIVETProcessing
        subjectFileName_base = '{0}_{1}{2}{3}{4}_{5}'.format(
            processingItemObj.study, processingItemObj.subject_rid,
            processingItemObj.scan_date.replace('-', ''),
            processingItemObj.s_identifier, processingItemObj.i_identifier,
            processingItemObj.modality.lower())
        jobId = '{0}_{1}_{2}_{3}{4}{5}{6}_CIVETRUN'.format(
            processingItemObj.study, processingItemObj.modality,
            processingItemObj.table_id, processingItemObj.subject_rid,
            processingItemObj.scan_date.replace('-', ''),
            processingItemObj.s_identifier, processingItemObj.i_identifier)
        checkJobPresentSql = "SELECT * FROM externalWaitingJobs WHERE JOB_ID = '{0}'".format(
            jobId)
        if len(self.DBClient.executeAllResults(checkJobPresentSql)) is 0:
            beastFileName = '{0}/beast/mask/{1}_skull_mask_native.mnc'.format(
                processingItemObj.root_folder, subjectFileName_base)
            beastMaskName_base = '{0}_{1}{2}{3}{4}_mask'.format(
                processingItemObj.study, processingItemObj.subject_rid,
                processingItemObj.scan_date.replace('-', ''),
                processingItemObj.s_identifier, processingItemObj.i_identifier)
            beastMaskName = '{0}/{1}.mnc'.format(copyFolder,
                                                 beastMaskName_base)
            manualFileName = '{0}/manual/mask/{1}_skull_mask_native.mnc'.format(
                processingItemObj.root_folder, subjectFileName_base)
            manualMaskName_base = '{0}_{1}{2}{3}{4}_mask'.format(
                processingItemObj.study, processingItemObj.subject_rid,
                processingItemObj.scan_date.replace('-', ''),
                processingItemObj.s_identifier, processingItemObj.i_identifier)
            manualMaskName = '{0}/{1}.mnc'.format(copyFolder,
                                                  manualMaskName_base)
            try:
                distutils.file_util.copy_file(nativeFileName, copyFolder)
                if maskStatus == 'B':
                    distutils.file_util.copy_file(beastFileName, beastMaskName)
                elif maskStatus == 'M':
                    distutils.file_util.copy_file(manualFileName,
                                                  manualMaskName)
                elif maskStatus == 'N':
                    pass
                else:
                    PipelineLogger.log(
                        'manager', 'error',
                        'Unknown mask status - {0} Entry : Processing ID - {1}, Table ID - {3}'
                        .format(maskStatus, processingItemObj.processing_rid,
                                processingItemObj.table_id))

                addExternalJobSQL = "INSERT INTO externalWaitingJobs VALUES ('{0}', '{1}', '{2}', NULL, NULL, NULL)".format(
                    jobId,
                    '{0}_{1}_Pipeline'.format(processingItemObj.study,
                                              processingItemObj.modality),
                    'CIVET')
                self.DBClient.executeNoResult(addExternalJobSQL)
            except Exception as e:
                PipelineLogger.log(
                    'manager', 'error',
                    'Error copying for CIVET input. Rolling back... - Processing Table ID -> {0} Table ID -> {1}'
                    .format(processingItemObj.processing_rid,
                            processingItemObj.table_id))
                PipelineLogger.log('manager', 'exception', e)
                nativeFileOnCopyFolder = '{0}/{1}'.format(
                    copyFolder, os.path.basename(nativeFileName))
                os.remove(nativeFileOnCopyFolder) if os.path.exists(
                    nativeFileOnCopyFolder) else None
                os.remove(beastMaskName) if os.path.exists(
                    beastMaskName) else None
                os.remove(manualMaskName) if os.path.exists(
                    manualMaskName) else None

    def requestQC(self, processingItemObj, qctype):
        qcFieldDict = dict(civet='QC', beast='BEAST_QC')
        qcFolderDict = {
            'civet': '{0}/civet'.format(processingItemObj.root_folder),
            'beast': '{0}/beast'.format(processingItemObj.root_folder)
        }
        tablename = '{0}_{1}_Pipeline'.format(processingItemObj.study,
                                              processingItemObj.modality)
        self.QCHandler.requestQC(processingItemObj.study, tablename,
                                 processingItemObj.table_id,
                                 qcFieldDict[qctype], qctype,
                                 qcFolderDict[qctype])
__author__ = 'sulantha'
from Utils.DbUtils import DbUtils
CSVFile = '/data/data03/sulantha/Downloads/missing_list_preprocessed.csv'
dbc = DbUtils()
with open(CSVFile, 'rU') as csv_file:
    for line in csv_file:
        lin = line.strip()

        rid = lin.split('/')[6]
        print(rid)
        s_id = lin.split('/')[7].split('_')[-2]
        i_id = lin.split('/')[7].split('_')[-1]
        sql = "UPDATE ADNI_AV45_Pipeline SET SKIP = 0, QC = 0, FINISHED = 0, PROC_Failed = NULL, MANUAL_XFM = 'Req_man_reg' WHERE PROCESSING_TID IN (SELECT RECORD_ID FROM Processing WHERE RID = '{0}' AND MODALITY = 'AV45' AND VERSION = 'V2' AND S_IDENTIFIER = '{1}' )".format(rid, s_id)
        dbc.executeNoResult(sql)
Пример #26
0
class Niak:
    def __init__(self):
        self.DBClient = DbUtils()

    def getScanType(self, processingItemObj):
        r = self.DBClient.executeAllResults(
            "SELECT SCAN_TYPE FROM Conversion WHERE STUDY = '{0}' AND RID = '{1}' "
            "AND SCAN_DATE = '{2}' AND S_IDENTIFIER = '{3}' "
            "AND I_IDENTIFIER = '{4}'".format(processingItemObj.study,
                                              processingItemObj.subject_rid,
                                              processingItemObj.scan_date,
                                              processingItemObj.s_identifier,
                                              processingItemObj.i_identifier))
        return r[0][0]

    def process(self, processingItemObj):
        try:
            matlabScript, nativeFileName, niakFolder = self.readTemplateFile(
                processingItemObj)
            PipelineLogger.log('manager', 'info',
                               'NIAK starting for {0}'.format(nativeFileName))
        except:
            return 0

        # Delete PIPE.lock file, if is exists
        if os.path.isfile("%s/preprocessing/logs/PIPE.lock" % niakFolder):
            os.remove("%s/preprocessing/logs/PIPE.lock" % niakFolder)

        success = self.executeScript(processingItemObj, matlabScript,
                                     niakFolder)

        #### After, if Niak succeeded, concatenate all runs together using combiningRuns
        if False:
            if success:
                self.combiningRuns(processingItemObj)
            else:
                PipelineLogger.log()
                #### Report error

    def readTemplateFile(self, processingItemObj):
        niakTemplateFile = os.path.dirname(
            __file__) + '/MatlabScripts/niakPreprocessingTemplate.m'

        niakFolder = '{0}/niak'.format(processingItemObj.root_folder)
        logDir = '{0}/logs'.format(processingItemObj.root_folder)

        # Get the corresponding subject-space MRI path
        correspondingMRI = self.findCorrespondingMRI(processingItemObj)
        if not correspondingMRI:  # If there is no corresponding MRI file
            return 0
        else:
            anat = correspondingMRI + '/civet/native/*t1.mnc'  # correspondingMRI[9] returns the root folder of the T1 MRI file
            anat = glob.glob(anat)[0]

        # Get all subjects
        patientInfo = "files_in.subject1.anat = '%s';" % (anat)
        for fmri in glob.glob(processingItemObj.converted_folder + '/*.mnc*'):
            iteration = fmri[fmri.rindex('_run') + 4:fmri.rindex('.mnc')]
            patientInfo = patientInfo + "\nfiles_in.subject1.fmri.session1{%s} = '%s'" % (
                iteration, fmri)

        # Read templateFileWithInformation
        with open(niakTemplateFile, 'r') as templateFile:
            templateFileWithInformation = templateFile.read()
            templateFile.close()

        # Replacing template placeholders with information
        replacing_dict = {
            '%{patient_information}': patientInfo,
            '%{opt.folder_out}': niakFolder,
            '%{niak_location}': config.niak_location,
            '%{nu_correct}': processingItemObj.parameters['nu_correct']
        }
        templateFileWithInformation = self.replaceString(
            templateFileWithInformation, replacing_dict)

        return templateFileWithInformation, fmri, niakFolder

    def findCorrespondingMRI(self, processingItemObj):
        # Find Matching T1
        matching_t1 = ADNI_T1_Fmri_Helper().getMatchingT1(processingItemObj)
        if not matching_t1:
            return 0

        # Find out whether T1 has been processed
        processed = ADNI_T1_Fmri_Helper().checkProcessed(matching_t1)
        if not processed:
            PipelineLogger.log(
                'root', 'error',
                'FMRI cannot be processed due to matching T1 not being processed.'
            )
            return 0
        else:
            return processed

    def replaceString(self, templateText, replacing_dict):
        for query, replacedInto in replacing_dict.items():
            templateText = templateText.replace(query, replacedInto)
        return templateText

    def createMatlabFile(self, matlabScript, niakFolder):
        matlab_file_path = niakFolder + '/preprocessing_script.m'
        if not os.path.exists(niakFolder):
            os.makedirs(niakFolder)
        with open(
                matlab_file_path, 'w'
        ) as matlab_file:  # Overwrite previous matlab script file if it already existed
            matlab_file.write(matlabScript)
        return matlab_file_path

    def executeScript(self, processingItemObj, matlabScript, niakFolder):

        # Create a matlab file to be called later on
        matlabFile = self.createMatlabFile(matlabScript, niakFolder)

        # Prepare matlab command
        #matlabCommand = '%s run %s;exit"' % (config.matlab_call, matlabFile)
        matlabCommand = '%s' % (matlabFile)
        # Creating log folder
        logDir = '{0}/logs'.format(processingItemObj.root_folder)
        try:
            distutils.dir_util.mkpath(logDir)
        except Exception as e:
            PipelineLogger.log('manager', 'error',
                               'Error in creating log folder \n {0}'.format(e))
            return 0

        # Create list of files that should be present
        fmri_file = niakFolder + '/fmri/fmri_subject1_session1_run1.mnc'
        anat_ln_file = niakFolder + '/anat/anat_subject1_nuc_stereolin.mnc'
        anat_nl_file = niakFolder + '/anat/anat_subject1_nuc_stereonl.mnc'
        fmri_mean_file = niakFolder + '/anat/func_subject1_mean_stereonl.mnc'
        func_coregister = niakFolder + '/quality_control/group_coregistration/func_tab_qc_coregister_stereonl.csv'
        anat_ln_coregister = niakFolder + '/quality_control/group_coregistration/anat_tab_qc_coregister_stereolin.csv'
        anat_nl_coregister = niakFolder + '/quality_control/group_coregistration/anat_tab_qc_coregister_stereonl.csv'
        func_motion = niakFolder + '/quality_control/group_motion/qc_scrubbing_group.csv'
        outputFiles = ' '.join([
            fmri_file, anat_ln_file, anat_nl_file, fmri_mean_file,
            func_coregister, anat_ln_coregister, anat_nl_coregister,
            func_motion
        ])

        # Prepare bash command
        id = '{0}{1}{2}{3}'.format(
            processingItemObj.subject_rid,
            processingItemObj.scan_date.replace('-', ''),
            processingItemObj.s_identifier, processingItemObj.i_identifier)
        command = 'Pipelines/ADNI_Fmri/MatlabScripts/ADNI_V1_startMatlabScript.sh %s %s %s %s %s %s %s' % \
                  (id, matlabCommand, niakFolder, logDir, socket.gethostname(), '50500', outputFiles)

        # Create NIAK folder
        if not os.path.exists(niakFolder):
            os.makedirs(niakFolder)

        # Run converter command
        PipelineLogger.log('converter', 'debug',
                           'Command : {0}'.format(command))
        p = subprocess.Popen(command,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE,
                             executable='/bin/bash')
        out, err = p.communicate()
        PipelineLogger.log('converter', 'debug',
                           'Conversion Log Output : \n{0}'.format(out))
        PipelineLogger.log('converter', 'debug',
                           'Conversion Log Err : \n{0}'.format(err))

        QSubJobHandler.submittedJobs[id] = QSubJob(id, '01:00:00',
                                                   processingItemObj, 'niak')
        return 1

    def combiningRuns(self, processingItemObj):
        #### Needs to improve it a lot more
        command = "%s combiningRuns('%s', '%s', %s, %s, %s)" % \
                  (config.matlab_call, config.fmristat_location, config.emma_tools_location,
                   processingItemObj.root_folder, processingItemObj.subject_rid, '1')

        # Run matlab command
        PipelineLogger.log('processing', 'debug',
                           'Command : {0}'.format(command))
        p = subprocess.Popen(command,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE,
                             executable='/bin/bash')
        out, err = p.communicate()
        PipelineLogger.log('processing', 'debug',
                           'combiningRuns Log Output : \n{0}'.format(out))
        PipelineLogger.log('processing', 'debug',
                           'combiningRuns Log Err : \n{0}'.format(err))

        return out
__author__ = 'sulantha'
from Utils.DbUtils import DbUtils
CSVFile = '/data/data03/sulantha/Downloads/missing_list_preprocessed.csv'
dbc = DbUtils()
with open(CSVFile, 'rU') as csv_file:
    for line in csv_file:
        lin = line.strip()

        rid = lin.split('/')[6]
        print(rid)
        s_id = lin.split('/')[7].split('_')[-2]
        i_id = lin.split('/')[7].split('_')[-1]
        sql = "UPDATE ADNI_AV45_Pipeline SET SKIP = 0, QC = 0, FINISHED = 0, PROC_Failed = NULL, MANUAL_XFM = 'Req_man_reg' WHERE PROCESSING_TID IN (SELECT RECORD_ID FROM Processing WHERE RID = '{0}' AND MODALITY = 'AV45' AND VERSION = 'V2' AND S_IDENTIFIER = '{1}' )".format(
            rid, s_id)
        dbc.executeNoResult(sql)
Пример #28
0
from datetime import datetime
from Utils.DbUtils import DbUtils
import glob
import itertools

DBClient = DbUtils()
outLines = []
with open('/home/sulantha/Downloads/Av1451_V2.csv', 'r') as file:
    next(file)
    for line in file:
        row = line.split(',')
        rid = row[2]
        date = row[4].strip()
        dateT = datetime.strptime(date, '%Y-%m-%d')
        dateS = dateT.strftime('%Y-%m-%d')
        closestAV= ['']*20
        closestFD = ['']*20
        findAV45 = "SELECT * FROM Processing WHERE RID = {0} AND MODALITY = 'AV45' AND PROCESSED = 1 AND QCPASSED = 1 AND VERSION = 'V2'".format(
            rid)
        resvav45 = DBClient.executeAllResults(findAV45)
        if len(resvav45) > 0:
            sortedRecs = sorted(resvav45,
                                key=lambda x: abs(datetime.strptime(x[4].strftime('%Y-%m-%d'), '%Y-%m-%d') - dateT))
            closestDate = [k for k, g in itertools.groupby(sortedRecs, key=lambda x: abs(
                datetime.strptime(x[4].strftime('%Y-%m-%d'), '%Y-%m-%d') - dateT))][0]
            closestMatchedRecs = [list(g) for k, g in itertools.groupby(sortedRecs, key=lambda x: abs(
                datetime.strptime(x[4].strftime('%Y-%m-%d'), '%Y-%m-%d') - dateT))][0]
            closestAV = closestMatchedRecs[0]
        findFDG = "SELECT * FROM Processing WHERE RID = {0} AND MODALITY = 'FDG' AND PROCESSED = 1 AND QCPASSED = 1 AND VERSION = 'V2'".format(
            rid)
        resvFDG = DBClient.executeAllResults(findFDG)
__author__ = 'sulantha'
import datetime
from Utils.DbUtils import DbUtils
csvFile = '/data/data03/sulantha/Downloads/av45_list.csv'
MatchDBClient = DbUtils(database='Study_Data.ADNI')
DBClient = DbUtils()
with open(csvFile, 'r') as csv:
    next(csv)
    for line in csv:
        row = line.split(',')
        rid = row[0].strip()
        date = row[1].strip()
        dateT = datetime.datetime.strptime(date, '%m/%d/%Y')
        #dateT = datetime.datetime.strptime(date, '%Y-%m-%d')
        dateS = dateT.strftime('%Y-%m-%d')
        sql = "SELECT DISTINCT subject, visit, seriesid, imageid FROM PET_META_LIST WHERE subject like '%_%_{0}' and scandate = '{1}' and origproc = 'Original'".format(
            rid, dateS)
        result = MatchDBClient.executeAllResults(sql)
        checkDBSQL = "SELECT * FROM Conversion WHERE RID = '{0}' AND S_IDENTIFIER = '{1}' AND I_IDENTIFIER = '{2}'".format(
            rid, 'S{0}'.format(result[0][2]), 'I{0}'.format(result[0][3]))
        #print(checkDBSQL)
        resultN = DBClient.executeAllResults(checkDBSQL)
        if len(resultN) == 0:
            print('########################### Not in DB - {0} - {1}'.format(
                rid, date))
        else:
            pass
Пример #30
0
class Conversion:
    def __init__(self):
        self.tableName = 'Conversion'
        self.DBClient = DbUtils()
        self.sqlBuilder = SQLBuilder()

    def getObjectFromTuple(self, tuple):
        valuesDict = dict(record_id=tuple[0],
                          study=tuple[1],
                          rid=tuple[2],
                          scan_type=tuple[3],
                          scan_date=tuple[4].strftime("%Y-%m-%d"),
                          scan_time=str(tuple[5]),
                          s_identifier=tuple[6],
                          i_identifier=tuple[7],
                          file_type=tuple[8],
                          raw_folder=tuple[9],
                          converted_folder=tuple[10],
                          version=tuple[11],
                          converted=tuple[12])
        return ConversionObject(valuesDict)

    def insertToTable(self, objList):
        for obj in objList:
            self.DBClient.executeNoResult(
                self.sqlBuilder.getSQL_AddNewEntryToConversionTable(
                    obj.sqlInsert()))

    def get_version(self, sortingObj, versionDict):
        if sortingObj.study == 'ADNI':
            dl_path = sortingObj.download_folder
            if 'Uniform' in dl_path:
                return 'V2'
            else:
                return versionDict[
                    sc.ProcessingModalityAndPipelineTypePerStudy[
                        sortingObj.study]
                    [sortingObj.
                     scan_type]] if sc.ProcessingModalityAndPipelineTypePerStudy[
                         sortingObj.study][
                             sortingObj.scan_type] in versionDict else 'V1'
        else:
            return versionDict[
                sc.ProcessingModalityAndPipelineTypePerStudy[sortingObj.study]
                [sortingObj.
                 scan_type]] if sc.ProcessingModalityAndPipelineTypePerStudy[
                     sortingObj.study][
                         sortingObj.scan_type] in versionDict else 'V1'

    def insertFromSortingObj(self, sortingObj, versionDict):
        sortingValues = sortingObj.getValuesDict()
        version = self.get_version(sortingObj, versionDict)
        sortingValues[
            'converted_folder'] = '{0}/{1}/{2}/{3}/{4}_{5}_{6}/{7}/converted/final'.format(
                sc.studyDatabaseRootDict[sortingObj.study], sortingObj.study,
                sortingObj.scan_type, sortingObj.rid, sortingObj.scan_date,
                sortingObj.s_identifier, sortingObj.i_identifier, version)
        sortingValues['version'] = version
        sortingValues['converted'] = 0
        self.insertToTable([ConversionObject(sortingValues)])

    def gettoBeConvertedPerStudy(self, study):
        toConvertList = self.DBClient.executeAllResults(
            self.sqlBuilder.getSQL_getToBeConvertedFileFromConversionTable(
                study))
        return [self.getObjectFromTuple(t) for t in toConvertList]

    def setConvertedTrue(self, convertionObj):
        convertionObj.converted = 1
        self.saveObj(convertionObj)

    def setConvertedFailed(self, convertionObj):
        convertionObj.skip = 1
        self.saveObj(convertionObj)

    def saveObj(self, convertionObj):
        self.DBClient.executeNoResult(
            self.sqlBuilder.getSQL_saveObjConversionTable(convertionObj))

    def getConvertedListPerStudy(self, study):
        convertedList = self.DBClient.executeAllResults(
            self.sqlBuilder.getSQL_getAllConvertedFromConvertionTable(study))
        return [self.getObjectFromTuple(t) for t in convertedList]
class PipelineHandler:
    def __init__(self):
        self.processingPPDict = {'ADNI':{'V1':{'T1':ADNI_V1_T1(), 'FMRI':ADNI_V1_FMRI(), 'AV45':ADNI_V1_AV45(), 'FDG':ADNI_V1_FDG(), 'AV1451': ADNI_V1_AV1451()},
                                         'V2':{'T1':ADNI_V1_T1(), 'FMRI':ADNI_V1_FMRI(), 'AV45':ADNI_V2_AV45(), 'FDG':ADNI_V2_FDG(), 'AV1451': ADNI_V2_AV1451()}}}
        self.DBClient = DbUtils()
        self.QCH = QCHandler()

    def checkExternalJobs(self, study, modality):
        getExtJobSql = "SELECT * FROM externalWaitingJobs WHERE JOB_ID LIKE '{0}_{1}_%'".format(study, modality)
        extJobs = self.DBClient.executeAllResults(getExtJobSql)
        for job in extJobs:
            jobType = job[0].split('_')[-1]
            reportTable = job[1]
            tableID = job[0].split('_')[2]
            reportField = job[2]
            subjectScanID = job[0].split('_')[3]
            success = 0
            if jobType == 'CIVETRUN':
                if glob.glob('{0}/{1}_{2}_*'.format(PipelineConfig.T1TempDirForCIVETDownload, study, subjectScanID)):
                    getProccessRecSql = "SELECT * FROM Processing WHERE RECORD_ID IN (SELECT PROCESSING_TID FROM {0}_T1_Pipeline WHERE RECORD_ID = {1})".format(study, tableID)
                    processingEntry = self.DBClient.executeAllResults(getProccessRecSql)[0]

                    civetFolder = '{0}/civet'.format(processingEntry[8])

                    if os.path.exists(civetFolder):
                        shutil.rmtree(civetFolder)
                    try:
                        PipelineLogger.log('manager', 'info', 'Copying - {0} -> {1}'.format(glob.glob('{0}/{1}_{2}_*'.format(PipelineConfig.T1TempDirForCIVETDownload, study, subjectScanID))[0], civetFolder))
                        dir_util.copy_tree(glob.glob('{0}/{1}_{2}_*'.format(PipelineConfig.T1TempDirForCIVETDownload, study, subjectScanID))[0], civetFolder)
                        success = 1
                    except:
                        success = 0
                else:
                    continue
            else:
                PipelineLogger.log('manager', 'error', 'Unknown external job type - {}'.format(jobType))

            if success:
                updateSQL = "UPDATE {0} SET {1} = 1 WHERE RECORD_ID = {2}".format(reportTable, reportField, tableID)
                self.DBClient.executeNoResult(updateSQL)

                if jobType == 'CIVETRUN':
                    finishSQL = "UPDATE {0} SET FINISHED = 1 WHERE RECORD_ID = {1}".format(reportTable, tableID)
                    self.DBClient.executeNoResult(finishSQL)
                    modal_table = reportTable
                    modal_tableId = tableID
                    qcField = 'QC'
                    qctype = 'civet'
                    qcFolder = civetFolder
                    self.QCH.requestQC(study, modal_table, modal_tableId, qcField, qctype, qcFolder)


                rmSql = "DELETE FROM externalWaitingJobs WHERE JOB_ID LIKE '{0}_{1}_{2}_{3}_%'".format(study, modality, tableID, subjectScanID)
                self.DBClient.executeNoResult(rmSql)


    def process(self, study, modality):
        os.environ['PATH'] = ':'.join(libpath.PATH)
        os.environ['LD_LIBRARY_PATH'] = ':'.join(libpath.LD_LIBRARY_PATH)
        os.environ['LD_LIBRARYN32_PATH'] = ':'.join(libpath.LD_LIBRARYN32_PATH)
        os.environ['PERL5LIB'] = ':'.join(libpath.PERL5LIB)
        os.environ['MNI_DATAPATH'] = ':'.join(libpath.MNI_DATAPATH)
        os.environ['ROOT'] = ';'.join(libpath.ROOT)
        os.environ['MINC_TOOLKIT_VERSION'] = libpath.MINC_TOOLKIT_VERSION
        os.environ['MINC_COMPRESS'] = libpath.MINC_COMPRESS
        os.environ['MINC_FORCE_V2'] = libpath.MINC_FORCE_V2

        toProcessinModalityPerStudy = self.DBClient.executeAllResults("SELECT * FROM Processing INNER JOIN (SELECT * FROM {0}_{1}_Pipeline WHERE NOT (FINISHED OR SKIP)) as TMP ON Processing.RECORD_ID=TMP.PROCESSING_TID".format(study, modality))
        for processingItem in toProcessinModalityPerStudy:
            version = processingItem[10]
            # Calling on the process .section of given studies and modalities
            self.processingPPDict[study][version][modality].process(processingItem)

        return 0


    def addToPipelineTable(self, processingObj):
        study = processingObj.study
        version = processingObj.version
        modality = processingObj.modality
        r_id = processingObj.record_id

        addToTableDict = dict(T1="INSERT IGNORE INTO {0}_T1_Pipeline VALUES (NULL, {1}, \"{2}\", 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, NULL)".format(study, r_id, PipelineConfig.defaultT1config),
                              AV45="INSERT IGNORE INTO {0}_AV45_Pipeline VALUES (NULL, {1}, \"{2}\", '{3}', 0, 0, 0, NULL, NULL)".format(study, r_id, PipelineConfig.defaultAV45config, ''),
                              AV1451="INSERT IGNORE INTO {0}_AV1451_Pipeline VALUES (NULL, {1}, \"{2}\", '{3}', 0, 0, 0, NULL, NULL)".format(study, r_id, PipelineConfig.defaultAV1451config, ''),
                              FDG="INSERT IGNORE INTO {0}_FDG_Pipeline VALUES (NULL, {1}, \"{2}\", '{3}', 0, 0, 0, NULL, NULL)".format(study, r_id, PipelineConfig.defaultFDGconfig, ''),
                              FMRI="INSERT IGNORE INTO {0}_FMRI_Pipeline VALUES (NULL, {1}, \"{2}\", '{3}', 0, 0, 0, NULL, NULL)".format(study, r_id, PipelineConfig.defaultFMRIconfig, 'NIAK_STH_COMESHERE'))

        self.DBClient.executeNoResult(addToTableDict[modality])
Пример #32
0
 def __init__(self):
     self.tableName = 'Conversion'
     self.DBClient = DbUtils()
     self.sqlBuilder = SQLBuilder()
 def __init__(self):
     self.DBClient = DbUtils()
     self.MatchDBClient = DbUtils(database=pc.DIAN_dataMatchDBName)
Пример #34
0
from Utils.DbUtils import DbUtils
DBClient = DbUtils()

getAllTodoSQL = "SELECT JOB_ID FROM externalWaitingJobs WHERE `JOB_ID` NOT LIKE '%CIVETRUN'"
res = DBClient.executeAllResults(getAllTodoSQL)

for job_id in res:
    job_id = job_id[0]
    job_type = job_id.split('_')[-1]
    old_job_id = job_id
    new_job_id = job_id.replace(job_type, 'CIVETRUN')
    ins_sql = "UPDATE externalWaitingJobs SET `JOB_ID` = \'{0}\' WHERE `JOB_ID` = \'{1}\'".format(
        new_job_id, old_job_id)
    try:
        DBClient.executeNoResult(ins_sql)
    except Exception as e:
        print(new_job_id)
Пример #35
0
class ADNI_T1_Fmri_Helper:
    def __init__(self):
        self.DBClient = DbUtils()
        self.MatchDBClient = DbUtils(database=pc.ADNI_dataMatchDBName)

    def getMatchingT1(self, processingItemObj):
        modalityID = '{0}{1}{2}{3}{4}{5}{6}'.format(
            processingItemObj.study, processingItemObj.version,
            processingItemObj.subject_rid, processingItemObj.modality,
            processingItemObj.scan_date.replace('-', ''),
            processingItemObj.s_identifier, processingItemObj.i_identifier)
        getFromMatchTableSQL = "SELECT * FROM MatchT1 WHERE MODALITY_ID = '{0}'".format(
            modalityID)

        # Find matching record in matching T1 table
        existingMatchedRec = self.DBClient.executeAllResults(
            getFromMatchTableSQL)
        if len(existingMatchedRec) == 1:
            getConvSQL = "SELECT * FROM Conversion WHERE RECORD_ID = '{0}'".format(
                existingMatchedRec[0][3])
            return self.DBClient.executeAllResults(getConvSQL)[0]
        else:
            # If can't find them, look into MRIList to find an equivalent
            getFmriRecordSQL = "SELECT * FROM MRILIST WHERE subject LIKE '%_%_{0}' AND seriesid = {1} AND imageuid = {2}".format(
                processingItemObj.subject_rid,
                processingItemObj.s_identifier.replace('S', ''),
                processingItemObj.i_identifier.replace('I', ''))
            FmriRecord = self.MatchDBClient.executeAllResults(getFmriRecordSQL)
            if not FmriRecord:
                PipelineLogger.log(
                    'root', 'error',
                    'Cannot find Fmri record : {0} - {1} - {2}'.format(
                        processingItemObj.subject_rid,
                        processingItemObj.s_identifier.replace('S', ''),
                        processingItemObj.i_identifier.replace('I', '')))
                return None

            visit_code = pc.ADNI_visitCode_Dict[FmriRecord[0][2]]
            getMRIRecordsSQL = "SELECT * FROM MPRAGEMETA WHERE subjectid LIKE '%_%_{0}'".format(
                processingItemObj.subject_rid)

            mrirecords = self.MatchDBClient.executeAllResults(getMRIRecordsSQL)
            if not mrirecords:
                PipelineLogger.log(
                    'root', 'error',
                    '################################  - Error !!!!! Cannot find any MRI records : {0} - Please check ADNI recs. ################################'
                    .format(processingItemObj.subject_rid))
                return None

            # getMRISecondarySQL = "SELECT * FROM MRILIST WHERE subject LIKE '%_%_{0}'".format(processingItemObj.subject_rid)
            # mriSecondaryRecords = self.MatchDBClient.executeAllResults(getMRISecondarySQL)
            # t_mrirecords = mrirecords
            # for record in mriSecondaryRecords:
            #     distint = 1
            #     for i in t_mrirecords:
            #         if record[7] == i[7] and record[8] == i[8]:
            #             distint = 0
            #     if distint:
            #         mrirecords.append(record)

            matchedT1Recs = []
            for rec in mrirecords:
                if pc.ADNI_visitCode_Dict[rec[2]] == visit_code:
                    matchedT1Recs.append(rec)
            if len(matchedT1Recs) == 0:
                PipelineLogger.log(
                    'root', 'error',
                    'Cannot match visit codes for : {0} - {1} - {2} - Searching based on scan date. +/- 60 days from PET date'
                    .format(processingItemObj.subject_rid,
                            processingItemObj.modality, visit_code))
                pet_date = datetime.strptime(processingItemObj.scan_date,
                                             '%Y-%m-%d')
                sortedRecs = sorted(
                    mrirecords,
                    key=lambda x: abs(
                        datetime.strptime(x[5], '%Y-%m-%d') - pet_date))
                closestDate = [
                    k for k, g in itertools.groupby(
                        sortedRecs,
                        key=lambda x: abs(
                            datetime.strptime(x[5], '%Y-%m-%d') - pet_date))
                ][0]
                PipelineLogger.log(
                    'root', 'error',
                    'PET MRI Matching based on dates - match visit codes for : {0} - {1} - {2} - Distance between MRI/PET : {3} days.'
                    .format(processingItemObj.subject_rid,
                            processingItemObj.modality, visit_code,
                            closestDate))
                closestMatchedRecs = [
                    list(g) for k, g in itertools.groupby(
                        sortedRecs,
                        key=lambda x: abs(
                            datetime.strptime(x[5], '%Y-%m-%d') - pet_date))
                ][0]
                matchedT1Recs = closestMatchedRecs
            if len(matchedT1Recs) == 0:
                PipelineLogger.log(
                    'root', 'error',
                    'Cannot match visit codes for : {0} - {1} - {2}'.format(
                        processingItemObj.subject_rid,
                        processingItemObj.modality, visit_code))
                return None

            matchedT1withScanDescriptions = []
            for rec in matchedT1Recs:
                getScanFromConversionSQL = "SELECT * FROM Conversion WHERE STUDY = '{0}' AND S_IDENTIFIER = '{1}' AND I_IDENTIFIER = '{2}' AND SKIP = 0".format(
                    processingItemObj.study, 'S{0}'.format(rec[7]),
                    'I{0}'.format(rec[8]))
                t1_conversion = self.DBClient.executeAllResults(
                    getScanFromConversionSQL)
                if len(t1_conversion) > 0:
                    matchedT1withScanDescriptions.append(t1_conversion[0])
                else:
                    PipelineLogger.log(
                        'root', 'error',
                        'Correspoding MRI was not found in the system : {0} - {1} - {2}'
                        .format(processingItemObj.subject_rid,
                                'S{0}'.format(rec[7]), 'I{0}'.format(rec[8])))
                    continue
            if len(matchedT1withScanDescriptions) < 1:
                PipelineLogger.log(
                    'root', 'error',
                    'Matched T1s are not in the database. : Matched T1 s - \n {0}'
                    .format(matchedT1Recs))
                return None
            else:
                if len(matchedT1withScanDescriptions) == 1:
                    ## ONLY ONE MATCHED T1. GOOD> CHECK IF THE T1 is a good scan type and not a bluff !!!
                    if matchedT1withScanDescriptions[0][
                            3] in pc.ADNI_T1_match_accepted_scantypes:
                        self.addToMatchT1Table(
                            processingItemObj, modalityID,
                            matchedT1withScanDescriptions[0])
                        return matchedT1withScanDescriptions[0]
                    else:
                        PipelineLogger.log(
                            'root', 'error',
                            'Matched T1s is not accepted scan type. : Matched T1 s - \n {0}'
                            .format(matchedT1withScanDescriptions[0]))
                        return None

                else:
                    #### MORE THAN ONE FOUND. SELECT ONE BASED ON SCAN TYPE PRIORITY
                    sortedList = sorted(matchedT1withScanDescriptions,
                                        key=lambda x:
                                        (pc.ADNI_T1_match_scantype_priorityList
                                         .index(x[3]), -x[5]))
                    self.addToMatchT1Table(processingItemObj, modalityID,
                                           sortedList[0])
                    return sortedList[0]

    def checkProcessed(self, t1Record):
        subject_id = t1Record[2]
        version = t1Record[11]
        s_id = t1Record[6]
        i_id = t1Record[7]
        checkProcessedSQL = "SELECT * FROM Processing WHERE RID = '{0}' AND VERSION = '{1}' AND S_IDENTIFIER = '{2}' AND I_IDENTIFIER = '{3}'".format(
            subject_id, version, s_id, i_id)
        result = self.DBClient.executeAllResults(checkProcessedSQL)[0]
        if len(result) < 1:
            PipelineLogger.log(
                'root', 'error',
                'Matched T1 is not added to the processing table. {0} - {1} - {2}'
                .format(subject_id, s_id, i_id))
            return False
        else:
            if result[12] == 1 and result[13] == 1:
                PipelineLogger.log(
                    'root', 'debug',
                    'Matched T1 is processed and QC passed. {0} - {1} - {2}'.
                    format(subject_id, s_id, i_id))
                return result[8]
            else:
                PipelineLogger.log(
                    'root', 'error',
                    'Matched T1 is not process or QC failed. {0} - {1} - {2}'.
                    format(subject_id, s_id, i_id))
                self.startProcessOFT1(result)
                return False

    def addToMatchT1Table(self, processingItemObj, modalityID, t1Record):
        pet_date = datetime.strptime(processingItemObj.scan_date, '%Y-%m-%d')
        mri_date = datetime.combine(t1Record[4], datetime.min.time())
        date_diff = abs(mri_date - pet_date)
        t1ID = '{0}{1}{2}_x_{3}_x_{4}{5}{6}'.format(
            t1Record[1], t1Record[11], t1Record[2], t1Record[3],
            t1Record[4].strftime('%Y-%m-%d').replace('-', ''), t1Record[6],
            t1Record[7])
        conversionID = t1Record[0]
        sql = "INSERT IGNORE INTO MatchT1 VALUES (Null, '{0}', '{1}', '{2}', {3})".format(
            modalityID, t1ID, conversionID, date_diff.days)
        self.DBClient.executeNoResult(sql)

    def startProcessOFT1(self, processTableEntry):
        recordId = processTableEntry[0]
        study = processTableEntry[1]
        sql = "UPDATE {0}_T1_Pipeline SET SKIP = 0 WHERE PROCESSING_TID = {1}".format(
            study, recordId)
        self.DBClient.executeNoResult(sql)
__author__ = 'sulantha'
from Utils.DbUtils import DbUtils
DBClient = DbUtils()
RIDList = ['4225','4746','4799','4136','4142','4192','4713','4960','4387','0021','4827','4579','4580','4616','4668','4696','4809','4549','4680','5012','5019','4674','4757','4385','4721','4947','4714','4715','4736','4706','4720','4661','4728','4767','4739','4089','4379','0382','4732','0230','4586','4653','4671','4742','4369','4589','4730','4676','4689','4722','4723','4587','4631','4632','4672','4678','4756','4711','4764']

for rid in RIDList:
    sql1 = "DELETE FROM Sorting WHERE RID = {0} AND SCAN_TYPE NOT IN ('AV45', 'FDG')".format(rid)
    DBClient.executeNoResult(sql1)
    sql2 = "DELETE FROM Conversion WHERE RID = {0} AND SCAN_TYPE NOT IN ('AV45', 'FDG')".format(rid)
    DBClient.executeNoResult(sql2)
    sql3 = "SELECT RECORD_ID FROM Processing WHERE RID = {0} AND MODALITY NOT IN ('AV45', 'FDG')".format(rid)
    recs = DBClient.executeAllResults(sql3)
    for rec in recs:
        sql4 = "DELETE FROM ADNI_T1_Pipeline WHERE PROCESSING_TID = {0}".format(rec[0])
        DBClient.executeNoResult(sql4)
    sql5 = "DELETE FROM Processing WHERE RID = {0} AND MODALITY NOT IN ('AV45', 'FDG')".format(rid)
    DBClient.executeNoResult(sql5)
Пример #37
0
 def __init__(self):
     self.DBClient = DbUtils()
     self.sqlBuilder = SQLBuilder()
__author__ = 'sulantha'
import datetime
from Utils.DbUtils import DbUtils
csvFile = '/data/data03/sulantha/Downloads/av45_list.csv'
MatchDBClient = DbUtils(database='Study_Data.ADNI')
DBClient = DbUtils()
with open(csvFile, 'r') as csv:
    next(csv)
    for line in csv:
        row = line.split(',')
        rid = row[0].strip()
        date = row[1].strip()
        dateT = datetime.datetime.strptime(date, '%m/%d/%Y')
        #dateT = datetime.datetime.strptime(date, '%Y-%m-%d')
        dateS = dateT.strftime('%Y-%m-%d')
        sql = "SELECT DISTINCT subject, visit, seriesid, imageid FROM PET_META_LIST WHERE subject like '%_%_{0}' and scandate = '{1}' and origproc = 'Original'".format(rid, dateS)
        result = MatchDBClient.executeAllResults(sql)
        checkDBSQL = "SELECT * FROM Conversion WHERE RID = '{0}' AND S_IDENTIFIER = '{1}' AND I_IDENTIFIER = '{2}'".format(rid, 'S{0}'.format(result[0][2]), 'I{0}'.format(result[0][3]))
        #print(checkDBSQL)
        resultN = DBClient.executeAllResults(checkDBSQL)
        if len(resultN) == 0:
            print('########################### Not in DB - {0} - {1}'.format(rid, date))
        else:
            pass





__author__ = 'sulantha'
import datetime
from Utils.DbUtils import DbUtils
import glob
DBClient = DbUtils()
outLines = []
with open('/data/data03/sulantha/MarinaAnalysis/AV45_list_with_dates.csv',
          'r') as file:
    next(file)
    for line in file:
        row = line.split(',')
        rid = row[0]
        date = row[1].strip()
        dateT = datetime.datetime.strptime(date, '%Y-%m-%d')
        dateS = dateT.strftime('%Y-%m-%d')
        findSQLV1 = "SELECT * FROM Processing WHERE RID = {0} AND MODALITY = 'AV45' AND SCAN_DATE = '{1}' AND VERSION = '{2}'".format(
            rid, dateS, 'V1')
        resv1 = DBClient.executeAllResults(findSQLV1)
        findSQLV2 = "SELECT * FROM Processing WHERE RID = {0} AND MODALITY = 'AV45' AND SCAN_DATE = '{1}' AND VERSION = '{2}'".format(
            rid, dateS, 'V2')
        resv2 = DBClient.executeAllResults(findSQLV2)

        if len(resv1) is 0:
            v1Path = ''
        elif len(resv1) == 1:
            if resv1[0][12] == 1:
                v1Path = '{0}/processed/final/*tal_nlin_pbavg_ref_cerGM_wmnorm_085.mnc'.format(
                    resv1[0][8])
                v1Path = glob.glob(v1Path)[0]
            else:
                v1Path = ''
Пример #40
0
 def __init__(self):
     self.tableName = 'Conversion'
     self.DBClient = DbUtils()
     self.sqlBuilder = SQLBuilder()
 def __init__(self):
     self.DBClient = DbUtils()
     self.MatchDBClient = DbUtils(database=pc.ADNI_dataMatchDBName)
     self.PETHelper = PETHelper()
Пример #42
0
sys.path.append('/home/sulantha/PycharmProjects/Processing_Pipeline')
from Utils.DbUtils import DbUtils
from Config import QCConfig
import os, subprocess, signal
import getpass
import hashlib
import psutil

parser = argparse.ArgumentParser()
parser.add_argument('-s','--study', help='Study name. ', choices=['adni'])
parser.add_argument('-t','--type', help='The type of qc. ', choices=['civet', 'av45', 'beast', 'fdg', 'fmri', 'av1451'])
parser.add_argument('-u','--user', help='Username ')
parser.add_argument('--createUser', help=argparse.SUPPRESS)
args = parser.parse_args()

DBClient = DbUtils()
currentRec = None

def kill(proc_pid):
    process = psutil.Process(proc_pid)
    for proc_ in process.children(recursive=True):
        proc_.kill()
    process.kill()

def runCIVETQC(study, username):
    while 1:
        getEntrySql = "SELECT * FROM QC WHERE QC_TYPE = 'civet' AND STUDY = '{0}' AND SKIP = 0 AND START = 0 AND END = 0 LIMIT 1".format(study)
        resT = DBClient.executeAllResults(getEntrySql)
        if len(resT) < 1:
            print('No files to QC. ')
            break
Пример #43
0
class Niak:
    def __init__(self):
        self.DBClient = DbUtils()

    def getScanType(self, processingItemObj):
        r = self.DBClient.executeAllResults("SELECT SCAN_TYPE FROM Conversion WHERE STUDY = '{0}' AND RID = '{1}' "
                                        "AND SCAN_DATE = '{2}' AND S_IDENTIFIER = '{3}' "
                                        "AND I_IDENTIFIER = '{4}'".format(processingItemObj.study,
                                                                          processingItemObj.subject_rid,
                                                                          processingItemObj.scan_date,
                                                                          processingItemObj.s_identifier,
                                                                          processingItemObj.i_identifier))
        return r[0][0]

    def process(self, processingItemObj):
        try:
            matlabScript, nativeFileName, niakFolder = self.readTemplateFile(processingItemObj)
            PipelineLogger.log('manager', 'info', 'NIAK starting for {0}'.format(nativeFileName))
        except:
            return 0

        # Delete PIPE.lock file, if is exists
        if os.path.isfile("%s/preprocessing/logs/PIPE.lock" % niakFolder):
            os.remove("%s/preprocessing/logs/PIPE.lock" % niakFolder)

        success = self.executeScript(processingItemObj, matlabScript, niakFolder)

        #### After, if Niak succeeded, concatenate all runs together using combiningRuns
        if False:
            if success:
                self.combiningRuns(processingItemObj)
            else:
                PipelineLogger.log()
                #### Report error

    def readTemplateFile(self, processingItemObj):
        niakTemplateFile = os.path.dirname(__file__) + '/MatlabScripts/niakPreprocessingTemplate.m'

        niakFolder = '{0}/niak'.format(processingItemObj.root_folder)
        logDir = '{0}/logs'.format(processingItemObj.root_folder)

        # Get the corresponding subject-space MRI path
        correspondingMRI = self.findCorrespondingMRI(processingItemObj)
        if not correspondingMRI: # If there is no corresponding MRI file
            return 0
        else:
            anat = correspondingMRI + '/civet/native/*t1.mnc'  # correspondingMRI[9] returns the root folder of the T1 MRI file
            anat = glob.glob(anat)[0]

        # Get all subjects
        patientInfo = "files_in.subject1.anat = '%s';" % (anat)
        for fmri in glob.glob(processingItemObj.converted_folder + '/*.mnc*'):
            iteration = fmri[fmri.rindex('_run') + 4 : fmri.rindex('.mnc')]
            patientInfo = patientInfo + "\nfiles_in.subject1.fmri.session1{%s} = '%s'" % (iteration, fmri)

        # Read templateFileWithInformation
        with open(niakTemplateFile, 'r') as templateFile:
            templateFileWithInformation = templateFile.read()
            templateFile.close()

        # Replacing template placeholders with information
        replacing_dict = {'%{patient_information}': patientInfo,
                          '%{opt.folder_out}': niakFolder,
                          '%{niak_location}': config.niak_location,
                          '%{nu_correct}': processingItemObj.parameters['nu_correct']
                          }
        templateFileWithInformation = self.replaceString(templateFileWithInformation, replacing_dict)

        return templateFileWithInformation, fmri, niakFolder

    def findCorrespondingMRI(self, processingItemObj):
		# Find Matching T1
        matching_t1 = ADNI_T1_Fmri_Helper().getMatchingT1(processingItemObj)
        if not matching_t1:
            return 0
			
		# Find out whether T1 has been processed
        processed = ADNI_T1_Fmri_Helper().checkProcessed(matching_t1)
        if not processed:
            PipelineLogger.log('root', 'error', 'FMRI cannot be processed due to matching T1 not being processed.')
            return 0
        else:
            return processed

    def replaceString(self, templateText, replacing_dict):
        for query, replacedInto in replacing_dict.items():
            templateText = templateText.replace(query, replacedInto)
        return templateText


    def createMatlabFile(self, matlabScript, niakFolder):
        matlab_file_path = niakFolder + '/preprocessing_script.m'
        if not os.path.exists(niakFolder):
            os.makedirs(niakFolder)
        with open(matlab_file_path, 'w') as matlab_file:  # Overwrite previous matlab script file if it already existed
            matlab_file.write(matlabScript)
        return matlab_file_path


    def executeScript(self, processingItemObj, matlabScript, niakFolder):

        # Create a matlab file to be called later on
        matlabFile = self.createMatlabFile(matlabScript, niakFolder)

        # Prepare matlab command
        matlabCommand = '%s run %s;exit"' % (config.matlab_call, matlabFile)

        # Creating log folder
        logDir = '{0}/logs'.format(processingItemObj.root_folder)
        try:
            distutils.dir_util.mkpath(logDir)
        except Exception as e:
            PipelineLogger.log('manager', 'error', 'Error in creating log folder \n {0}'.format(e))
            return 0

        # Create list of files that should be present
        fmri_file = niakFolder + '/fmri/fmri_subject1_session1_run1.mnc'
        anat_ln_file = niakFolder + '/anat/anat_subject1_nuc_stereolin.mnc'
        anat_nl_file = niakFolder + '/anat/anat_subject1_nuc_stereonl.mnc'
        fmri_mean_file = niakFolder + '/anat/func_subject1_mean_stereonl.mnc'
        func_coregister = niakFolder + '/quality_control/group_coregistration/func_tab_qc_coregister_stereonl.csv'
        anat_ln_coregister = niakFolder + '/quality_control/group_coregistration/anat_tab_qc_coregister_stereolin.csv'
        anat_nl_coregister = niakFolder + '/quality_control/group_coregistration/anat_tab_qc_coregister_stereonl.csv'
        func_motion = niakFolder + '/quality_control/group_motion/qc_scrubbing_group.csv'
        outputFiles = ' '.join([fmri_file, anat_ln_file, anat_nl_file, fmri_mean_file, func_coregister,
                                anat_ln_coregister, anat_nl_coregister, func_motion])

        # Prepare bash command
        id = '{0}{1}{2}{3}'.format(processingItemObj.subject_rid, processingItemObj.scan_date.replace('-', ''),
                                   processingItemObj.s_identifier, processingItemObj.i_identifier)
        command = '%s; Pipelines/ADNI_Fmri/MatlabScripts/startMatlabScript.sh %s %s %s %s %s %s %s' % \
                  (config.sourcing, id, matlabCommand, niakFolder, logDir, socket.gethostname(), '50500', outputFiles)

        # Create NIAK folder
        if not os.path.exists(niakFolder):
            os.makedirs(niakFolder)

        # Run converter command
        PipelineLogger.log('converter', 'debug', 'Command : {0}'.format(command))
        p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, executable='/bin/bash')
        out, err = p.communicate()
        PipelineLogger.log('converter', 'debug', 'Conversion Log Output : \n{0}'.format(out))
        PipelineLogger.log('converter', 'debug', 'Conversion Log Err : \n{0}'.format(err))

        QSubJobHandler.submittedJobs[id] = QSubJob(id, '01:00:00', processingItemObj, 'niak')
        return 1

    def combiningRuns(self, processingItemObj):
        #### Needs to improve it a lot more
        command = "%s combiningRuns('%s', '%s', %s, %s, %s)" %\
                  (config.matlab_call, config.fmristat_location, config.emma_tools_location,
                   processingItemObj.root_folder, processingItemObj.subject_rid, '1')

        # Run matlab command
        PipelineLogger.log('processing', 'debug', 'Command : {0}'.format(command))
        p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, executable='/bin/bash')
        out, err = p.communicate()
        PipelineLogger.log('processing', 'debug', 'combiningRuns Log Output : \n{0}'.format(out))
        PipelineLogger.log('processing', 'debug', 'combiningRuns Log Err : \n{0}'.format(err))

        return out
Пример #44
0
sys.path.append('/home/sulantha/PycharmProjects/Processing_Pipeline')
from Utils.DbUtils import DbUtils
from Config import QCConfig
import os, subprocess, signal
import getpass
import hashlib
import psutil

parser = argparse.ArgumentParser()
parser.add_argument('-s','--study', help='Study name. ', choices=['adni', 'dian'])
parser.add_argument('-t','--type', help='The type of qc. ', choices=['civet', 'av45', 'beast', 'fdg', 'fmri', 'av1451', 'pib'])
parser.add_argument('-u','--user', help='Username ')
parser.add_argument('--createUser', help=argparse.SUPPRESS)
args = parser.parse_args()

DBClient = DbUtils()
currentRec = None

def kill(proc_pid):
    process = psutil.Process(proc_pid)
    for proc_ in process.children(recursive=True):
        proc_.kill()
    process.kill()

def runCIVETQC(study, username):
    while 1:
        getEntrySql = "SELECT * FROM QC WHERE QC_TYPE = 'civet' AND STUDY = '{0}' AND SKIP = 0 AND START = 0 AND END = 0 LIMIT 1".format(study)
        try:
            resT = DBClient.executeSomeResults(getEntrySql, 1)[0]
        except IndexError:
            print('No files to QC. ')
__author__ = 'sulantha'

maskList = '/home/sulantha/Desktop/ManualMaskMatch.csv'
outputpath = '/data/data03/MANUAL_MASK'
from Utils.DbUtils import DbUtils
import shutil

Dbclient = DbUtils()
with open(maskList, 'r') as inf:
    for line in inf:
        row = line.split(',')
        if row[0].strip() == 'None' or row[1].strip(
        ) == 'None' or row[2].strip() == 'None':
            pass
        else:
            study = row[1].split('/')[-1].split('_')[0].upper()
            rid = row[2].split('_')[3]
            t1sid = row[2].split('.')[0].split('_')[-2]
            t1iid = row[2].split('.')[0].split('_')[-1]
            uid = 'SKULLMASK_{0}_{1}_{2}_{3}'.format(study, rid, t1sid, t1iid)
            path = '{0}/{1}.mnc'.format(outputpath, uid)

            print(study, rid, uid, sep=', ')

            try:
                shutil.copyfile(row[0], path)
                Dbclient.executeNoResult(
                    "INSERT IGNORE INTO MANUAL_MASK VALUES (Null, '{0}', '{1}', '{2}', '{3}')"
                    .format(study, rid, uid, path))

                sql2 = "UPDATE ADNI_T1_Pipeline SET MANUAL_MASK = 1 WHERE PROCESSING_TID IN (SELECT RECORD_ID FROM Processing WHERE RID = {0} AND S_IDENTIFIER = {1} AND I_IDENTIFIER = {2})".format(
Пример #46
0
class ADNI_V1_T1:
    def __init__(self):
        self.DBClient = DbUtils()
        self.QCHandler = QCHandler()

    def process(self, processingItem):
        processingItemObj = ProcessingItemObj(processingItem)

        if processingItemObj.beast_skip and processingItemObj.manual_skip and not processingItemObj.civet:
            self.runCivet(processingItemObj, 'N')
        elif processingItemObj.manual_mask and not processingItemObj.manual_skip and not processingItemObj.civet:
            self.runCivet(processingItemObj, 'M')
        elif processingItemObj.beast_mask == 0 and not processingItemObj.beast_skip and processingItemObj.beast_qc == 0 and not processingItemObj.manual_mask:
            self.runBeast(processingItemObj)
        elif processingItemObj.beast_skip and not processingItemObj.manual_mask and not processingItemObj.manual_skip:
            PipelineLogger.log('manager', 'error', '$$$$$$$$$$$$$$$$$ Manual Mask Requested $$$$$$$$$$$$$$$$$$ - {0}'.format(processingItem))
            pass
        elif processingItemObj.beast_mask == 1 and not processingItemObj.beast_skip and processingItemObj.beast_qc == 1 and not processingItemObj.manual_mask and not processingItemObj.civet:
            self.runCivet(processingItemObj, 'B')
        elif processingItemObj.beast_mask == 1 and not processingItemObj.beast_skip and processingItemObj.beast_qc == 0 and not processingItemObj.manual_mask and not processingItemObj.manual_skip:
            self.requestQC(processingItemObj, 'beast')
        elif processingItemObj.civet == 1 and processingItemObj.civet_qc == 0:
            self.requestQC(processingItemObj, 'civet')
        else:
            if processingItemObj.civet_qc == -1:
                PipelineLogger.log('manager', 'error', 'Civet QC failed. Skipping. - {0}'.format(processingItem))
            PipelineLogger.log('manager', 'error', 'Error handling obj for processing - {0}'.format(processingItem))
            return 0

    def getScanType(self, processingItemObj):
        r = self.DBClient.executeAllResults("SELECT SCAN_TYPE FROM Conversion WHERE STUDY = '{0}' AND RID = '{1}' "
                                        "AND SCAN_DATE = '{2}' AND S_IDENTIFIER = '{3}' "
                                        "AND I_IDENTIFIER = '{4}'".format(processingItemObj.study,
                                                                          processingItemObj.subject_rid,
                                                                          processingItemObj.scan_date,
                                                                          processingItemObj.s_identifier,
                                                                          processingItemObj.i_identifier))
        return r[0][0]

    def checkNative(self, processingItemObj):
        orig_ScanType = self.getScanType(processingItemObj)
        converted_file = '{0}/{1}_{2}{3}{4}{5}_{6}.mnc'.format(processingItemObj.converted_folder, processingItemObj.study,
                                                        processingItemObj.subject_rid, processingItemObj.scan_date.replace('-', ''),
                                                        processingItemObj.s_identifier, processingItemObj.i_identifier,
                                                        orig_ScanType)
        nativeFolder = '{0}/native'.format(processingItemObj.root_folder)
        nativeFileName = '{0}/{1}_{2}{3}{4}{5}_{6}.mnc'.format(nativeFolder, processingItemObj.study,
                                                        processingItemObj.subject_rid, processingItemObj.scan_date.replace('-', ''),
                                                        processingItemObj.s_identifier, processingItemObj.i_identifier,
                                                        processingItemObj.modality.lower())
        if not os.path.exists(nativeFileName):
            try:
                distutils.dir_util.mkpath(nativeFolder)
                shutil.copyfile(converted_file, nativeFileName)
            except Exception as e:
                PipelineLogger.log('manager', 'error', 'Error in creating folders or copying native file. \n {0}'.format(e))
                PipelineLogger.log('manager', 'error', 'Setting to restart conversion. \n {0}'.format(e))
                sql = "UPDATE Conversion SET CONVERTED = 0, SKIP = 0 WHERE S_IDENTIFIER = '{0}' AND I_IDENTIFIER = '{1}'".format(processingItemObj.s_identifier, processingItemObj.i_identifier)
                self.DBClient.executeNoResult(sql)
                return None
        return nativeFileName

    def runBeast(self, processingItemObj):
        nativeFileName = self.checkNative(processingItemObj)
        if not nativeFileName:
            return 0
        beastFolder = '{0}/beast'.format(processingItemObj.root_folder)
        logDir = '{0}/logs'.format(processingItemObj.root_folder)
        PipelineLogger.log('manager', 'info', 'BeAST starting for {0}'.format(nativeFileName))
        PipelineLogger.log('manager', 'info', 'Current working folder : {0}'.format(os.getcwd()))
        try:
            distutils.dir_util.mkpath(logDir)
        except Exception as e:
            PipelineLogger.log('manager', 'error', 'Error in creating log folder \n {0}'.format(e))
            return 0

        id = '{0}{1}{2}{3}'.format(processingItemObj.subject_rid, processingItemObj.scan_date.replace('-', ''), processingItemObj.s_identifier, processingItemObj.i_identifier)
        beastCMD = 'source /opt/minc-toolkit/minc-toolkit-config.sh; Pipelines/ADNI_T1/ADNI_V1_T1_BeAST {0} {1} {2} {3} {4} {5}'.format(id, nativeFileName, beastFolder, logDir, socket.gethostname(), 50500)
        try:
            shutil.rmtree(beastFolder)
        except:
            pass
        try:
            distutils.dir_util.mkpath(beastFolder)
        except Exception as e:
            PipelineLogger.log('manager', 'error', 'Error in creating BeAST folder. \n {0}'.format(e))
            return 0

        PipelineLogger.log('manager', 'debug', 'Command : {0}'.format(beastCMD))
        os.chdir(pc.SourcePath)
        p = subprocess.Popen(beastCMD, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, executable='/bin/bash')
        out, err = p.communicate()
        PipelineLogger.log('manager', 'debug', 'Beast Log Output : \n{0}'.format(out.decode("utf-8")))
        PipelineLogger.log('manager', 'debug', 'Beast Log Err : \n{0}'.format(err.decode("utf-8")))

        QSubJobHandler.submittedJobs[id] = QSubJob(id, '02:00:00', processingItemObj, 'beast')
        return 1

    def runCivet(self, processingItemObj, maskStatus):
        nativeFileName = self.checkNative(processingItemObj)
        if not nativeFileName:
            return 0
        copyFolder = pc.T1TempDirForCIVETProcessing
        subjectFileName_base = '{0}_{1}{2}{3}{4}_{5}'.format(processingItemObj.study,
                                                        processingItemObj.subject_rid, processingItemObj.scan_date.replace('-', ''),
                                                        processingItemObj.s_identifier, processingItemObj.i_identifier,
                                                        processingItemObj.modality.lower())
        jobId = '{0}_{1}_{2}_{3}{4}{5}{6}_CIVETRUN'.format(processingItemObj.study, processingItemObj.modality,
                                                           processingItemObj.table_id, processingItemObj.subject_rid,
                                                           processingItemObj.scan_date.replace('-', ''),
                                                            processingItemObj.s_identifier, processingItemObj.i_identifier)
        checkJobPresentSql = "SELECT * FROM externalWaitingJobs WHERE JOB_ID = '{0}'".format(jobId)
        if len(self.DBClient.executeAllResults(checkJobPresentSql)) is 0:
            beastFileName = '{0}/beast/mask/{1}_skull_mask_native.mnc'.format(processingItemObj.root_folder, subjectFileName_base)
            beastMaskName_base = '{0}_{1}{2}{3}{4}_mask'.format(processingItemObj.study,
                                                            processingItemObj.subject_rid, processingItemObj.scan_date.replace('-', ''),
                                                            processingItemObj.s_identifier, processingItemObj.i_identifier)
            beastMaskName = '{0}/{1}.mnc'.format(copyFolder, beastMaskName_base)
            manualFileName = '{0}/manual/mask/{1}_skull_mask_native.mnc'.format(processingItemObj.root_folder, subjectFileName_base)
            manualMaskName_base = '{0}_{1}{2}{3}{4}_mask'.format(processingItemObj.study,
                                                            processingItemObj.subject_rid, processingItemObj.scan_date.replace('-', ''),
                                                            processingItemObj.s_identifier, processingItemObj.i_identifier)
            manualMaskName = '{0}/{1}.mnc'.format(copyFolder, manualMaskName_base)
            try:
                distutils.file_util.copy_file(nativeFileName, copyFolder)
                if maskStatus == 'B':
                    distutils.file_util.copy_file(beastFileName, beastMaskName)
                elif maskStatus == 'M':
                    distutils.file_util.copy_file(manualFileName, manualMaskName)
                elif maskStatus == 'N':
                    pass
                else:
                    PipelineLogger.log('manager', 'error', 'Unknown mask status - {0} Entry : Processing ID - {1}, Table ID - {3}'.format(maskStatus, processingItemObj.processing_rid, processingItemObj.table_id))

                addExternalJobSQL = "INSERT INTO externalWaitingJobs VALUES ('{0}', '{1}', '{2}', NULL, NULL, NULL)".format(jobId, '{0}_{1}_Pipeline'.format(processingItemObj.study, processingItemObj.modality), 'CIVET')
                self.DBClient.executeNoResult(addExternalJobSQL)
            except Exception as e:
                PipelineLogger.log('manager', 'error', 'Error copying for CIVET input. Rolling back... - Processing Table ID -> {0} Table ID -> {1}'.format( processingItemObj.processing_rid, processingItemObj.table_id))
                PipelineLogger.log('manager', 'exception', e)
                nativeFileOnCopyFolder = '{0}/{1}'.format(copyFolder, os.path.basename(nativeFileName))
                os.remove(nativeFileOnCopyFolder) if os.path.exists(nativeFileOnCopyFolder) else None
                os.remove(beastMaskName) if os.path.exists(beastMaskName) else None
                os.remove(manualMaskName) if os.path.exists(manualMaskName) else None

    def requestQC(self, processingItemObj, qctype):
        qcFieldDict = dict(civet='QC', beast='BEAST_QC')
        qcFolderDict = { 'civet' : '{0}/civet'.format(processingItemObj.root_folder),
                         'beast' : '{0}/beast'.format(processingItemObj.root_folder)}
        tablename = '{0}_{1}_Pipeline'.format(processingItemObj.study, processingItemObj.modality)
        self.QCHandler.requestQC(processingItemObj.study, tablename, processingItemObj.table_id, qcFieldDict[qctype], qctype, qcFolderDict[qctype])
__author__ = 'sulantha'
xfmList = '/home/sulantha/Desktop/petMatchNew2.csv'
outputpath = '/data/data03/MANUAL_XFM'
from Utils.DbUtils import DbUtils
import shutil


Dbclient = DbUtils()
with open(xfmList, 'r') as inf:
    for line in inf:
        row = line.split(',')
        if row[0].strip() == 'None' or row[2].strip() == 'None' or row[4].strip() == 'None':
            pass
        else:
            study = row[0].split('/')[-1].split('_')[0].upper()
            rid = row[0].split('/')[-1].split('_')[1][2:-2]
            petsid = row[2].split('.')[0].split('_')[-2]
            petiid = row[2].split('.')[0].split('_')[-1]
            t1sid = row[4].split('.')[0].split('_')[-2]
            t1iid = row[4].split('.')[0].split('_')[-1]
            uid = 'PET_{0}_{1}_T1_{2}_{3}'.format(petsid, petiid, t1sid, t1iid)
            path = '{0}/{1}_{2}_{3}.xfm'.format(outputpath, study, rid, uid)
            if petiid.startswith('I') and petsid.startswith('S'):
                print(study, rid, uid, sep=', ')
            else:
                print('PET - {0}'.format(row[2]))

            try:
                shutil.copyfile(row[0], path)
            except Exception as e:
                print('Error copy. {0}'.format(e))
Пример #48
0
__author__ = 'sulantha'
import glob, subprocess, re
from Utils.DbUtils import DbUtils
import os
from distutils import file_util, dir_util
import shutil

DBClient = DbUtils()

getAllTodoSQL = "SELECT XFM_NAME FROM Coregistration WHERE END = 0 AND SKIP = 0 AND START = 0 AND PET_SCANTYPE = 'AV45'"
res = DBClient.executeAllResults(getAllTodoSQL)
totalC = 0
done_c = 0
for xfm_name in res:
    xfm_id = xfm_name[0].split('_', 3)[-1]
    print(xfm_id)

    xfm_file = glob.glob('/data/data03/MANUAL_XFM/{0}.xfm'.format(xfm_name[0]))
    #checkSQL = "SELECT * FROM MANUAL_XFM WHERE XFM_UNIQUEID = '{0}'".format(xfm_id)
    #res2 = DBClient.executeAllResults(checkSQL)

    if len(xfm_file) > 0:
        done_c += 1
        print('Already done.  - {0}'.format(xfm_id))
        markDoneSQL = "UPDATE Coregistration SET START=1, END=1, USER='******' WHERE XFM_NAME LIKE '%{0}'".format(
            xfm_id)
        print(markDoneSQL)
        #DBClient.executeNoResult(markDoneSQL)
    else:
        print('Not done.  - {0}'.format(xfm_id))
    totalC += 1
class ADNI_V1_AV1451:
    def __init__(self):
        self.DBClient = DbUtils()
        self.MatchDBClient = DbUtils(database=pc.ADNI_dataMatchDBName)
        self.PETHelper = PETHelper()

    def process(self, processingItem):
        processingItemObj = ProcessingItemObj(processingItem)
        matching_t1 = ADNI_T1_Helper().getMatchingT1(processingItemObj)
        if not matching_t1:
            PipelineLogger.log('root', 'error', 'PET cannot be processed no matching T1 found. - {0} - {1} - {2}.'.format(processingItemObj.subject_rid, processingItemObj.modality, processingItemObj.scan_date))
            return 0

        processed = ADNI_T1_Helper().checkProcessed(matching_t1)
        if not processed:
            PipelineLogger.log('root', 'error', 'PET cannot be processed due to matching T1 not being processed - {0}'.format(matching_t1))
            return 0
        else:
            PipelineLogger.log('root', 'INFO', '+++++++++ PET ready to be processed. Will check for xfm. - {0} - {1}'.format(processingItemObj.subject_rid, processingItemObj.scan_date))
            if processingItemObj.manual_xfm == '':
                manualXFM = self.PETHelper.getManualXFM(processingItemObj, matching_t1)
                processingItemObj.manual_xfm = manualXFM
            elif processingItemObj.manual_xfm == 'Req_man_reg':
                coregDone = self.PETHelper.checkIfAlreadyDone(processingItemObj, matching_t1)
                if coregDone:
                    manualXFM = coregDone
                    setPPTableSQL = "UPDATE {0}_{1}_Pipeline SET MANUAL_XFM = '{2}' WHERE RECORD_ID = {3}".format(processingItemObj.study, processingItemObj.modality, manualXFM, processingItemObj.table_id)
                    self.DBClient.executeNoResult(setPPTableSQL)
                else:
                    self.PETHelper.requestCoreg(processingItemObj, matching_t1)
                    PipelineLogger.log('root', 'INFO', 'Manual XFM was not found. Request to create one may have added.  - {0} - {1}'.format(processingItemObj.subject_rid, processingItemObj.scan_date))
                    return 0
            else:
                manualXFM = processingItemObj.manual_xfm
            if manualXFM:
                self.processPET(processingItemObj, processed)
            else:
                PipelineLogger.log('root', 'INFO', 'Manual XFM was not found. Request to create one may have added.  - {0} - {1}'.format(processingItemObj.subject_rid, processingItemObj.scan_date))
                return 0

    def getScanType(self, processingItemObj):
        r = self.DBClient.executeAllResults("SELECT SCAN_TYPE FROM Conversion WHERE STUDY = '{0}' AND RID = '{1}' "
                                        "AND SCAN_DATE = '{2}' AND S_IDENTIFIER = '{3}' "
                                        "AND I_IDENTIFIER = '{4}'".format(processingItemObj.study,
                                                                          processingItemObj.subject_rid,
                                                                          processingItemObj.scan_date,
                                                                          processingItemObj.s_identifier,
                                                                          processingItemObj.i_identifier))
        return r[0][0]

    def processPET(self, processingItemObj, matchT1Path):
        petFileName = '{0}/{1}_{2}{3}{4}{5}_{6}.mnc'.format(processingItemObj.converted_folder, processingItemObj.study,
                                                        processingItemObj.subject_rid, processingItemObj.scan_date.replace('-', ''),
                                                        processingItemObj.s_identifier, processingItemObj.i_identifier,
                                                        self.getScanType(processingItemObj))
        processedFolder = '{0}/processed'.format(processingItemObj.root_folder)
        logDir = '{0}/logs'.format(processingItemObj.root_folder)
        PipelineLogger.log('manager', 'info', 'PET processing starting for {0}'.format(petFileName))
        try:
            distutils.dir_util.mkpath(logDir)
        except Exception as e:
            PipelineLogger.log('manager', 'error', 'Error in creating log folder \n {0}'.format(e))
            return 0

        id = '{0}{1}{2}{3}'.format(processingItemObj.subject_rid, processingItemObj.scan_date.replace('-', ''), processingItemObj.s_identifier, processingItemObj.i_identifier)
        paramStrd = ast.literal_eval(processingItemObj.parameters)
        paramStrt = ' '.join(['[\"{0}\"]=\"{1}\"'.format(k, v) for k,v in paramStrd.items()])
        paramStr = '({0})'.format(paramStrt)
        petCMD = "source /opt/minc-1.9.15/minc-toolkit-config.sh; Pipelines/ADNI_AV1451/ADNI_V1_AV1451_Process {0} {1} {2} {3} {4} {5} '{6}' {7} {8}".format(id, petFileName, processedFolder, matchT1Path, processingItemObj.manual_xfm, logDir, paramStr,socket.gethostname(), 50500)
        try:
            processedFolder_del = '{0}/processed_del'.format(processingItemObj.root_folder)
            os.rename(processedFolder, processedFolder_del)
            shutil.rmtree(processedFolder_del)
        except Exception as e:
            PipelineLogger.log('manager', 'error', 'Error in deleting old processing folder. \n {0}'.format(e))
        try:
            distutils.dir_util.mkpath(processedFolder)
        except Exception as e:
            PipelineLogger.log('manager', 'error', 'Error in creating processing folder. \n {0}'.format(e))
            return 0

        ### This section is new for ADNI Pre processing - Per scanner type blurring. Only required if
        ### the images are aquired from different scanners and need to get to same PSF.
        blur_x, blur_y, blur_z = self.PETHelper.getBlurringParams(processingItemObj)
        ### End pre processing.

        PipelineLogger.log('manager', 'debug', 'Command : {0}'.format(petCMD))
        p = subprocess.Popen(petCMD, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, executable='/bin/bash')
        out, err = p.communicate()
        PipelineLogger.log('manager', 'debug', 'Process Log Output : \n{0}'.format(out))
        PipelineLogger.log('manager', 'debug', 'Process Log Err : \n{0}'.format(err))

        QSubJobHandler.submittedJobs[id] = QSubJob(id, '02:00:00', processingItemObj, 'av1451')
        return 1
Пример #50
0
 def __init__(self):
     self.DBClient = DbUtils()
     self.MatchDBClient = DbUtils(database=pc.ADNI_dataMatchDBName)
     self.PETHelper = PETHelper()
__author__ = 'sulantha'
import glob, subprocess, re
from Utils.DbUtils import DbUtils
import os
from distutils import file_util, dir_util
import shutil

DBClient = DbUtils()
IID_list = ['I546612', 'I620366', 'I535767', 'I560359', 'I581738']
for iid in IID_list:
    getDataFolderSQL = "SELECT RAW_FOLDER FROM Sorting WHERE I_IDENTIFIER = '{0}'".format(iid)
    res = DBClient.executeAllResults(getDataFolderSQL)
    if len(res) == 0:
        pass
    else:
        rawFolder = res[0][0]
        dataFolder = os.path.abspath(os.path.join(rawFolder, '../'))
        shutil.rmtree(dataFolder)
        print(dataFolder)
    delsql = "DELETE FROM Sorting WHERE I_IDENTIFIER = '{0}'".format(iid)
    DBClient.executeNoResult(delsql)

    delsql = "DELETE FROM Conversion WHERE I_IDENTIFIER = '{0}'".format(iid)
    DBClient.executeNoResult(delsql)

    getProSQL = "SELECT RECORD_ID, STUDY, MODALITY FROM Processing WHERE I_IDENTIFIER = '{0}'".format(iid)
    res2 = DBClient.executeAllResults(getProSQL)
    if len(res2) == 0:
       pass
    else:
        P_ID = res2[0][0]
Пример #52
0
__author__ = 'sulantha'
import datetime
from Utils.DbUtils import DbUtils

DBClient = DbUtils()
with open('/data/data03/sulantha/Downloads/fdg_list.csv', 'r') as file:
    next(file)
    for line in file:
        row = line.split(',')
        rid = row[0]
        date = row[1].strip()
        dateT = datetime.datetime.strptime(date, '%Y-%m-%d')
        dateS = dateT.strftime('%Y-%m-%d')
        findSQL = "SELECT * FROM Processing WHERE RID = {0} AND MODALITY = 'FDG' AND SCAN_DATE = '{1}'".format(
            rid, dateS)
        res = DBClient.executeAllResults(findSQL)
        print('{0}-{1} {2}'.format(
            rid, len(res), '############'
            if len(res) is 0 else '')) if len(res) is 0 else None
        processingSQL = "UPDATE Processing SET SKIP = 0 WHERE RID = {0} AND MODALITY = 'FDG' AND SCAN_DATE = '{1}'".format(
            rid, dateS)
        DBClient.executeNoResult(processingSQL)
Пример #53
0
 def __init__(self):
     self.DBClient = DbUtils()
Пример #54
0
 def __init__(self):
     self.tableName = 'Sorting'
     self.DBClient = DbUtils()
     self.sqlBuilder = SQLBuilder()
Пример #55
0
class ADNI_V2_AV45:
    def __init__(self):
        self.DBClient = DbUtils()
        self.MatchDBClient = DbUtils(database=pc.ADNI_dataMatchDBName)
        self.PETHelper = PETHelper()

    def process(self, processingItem):
        processingItemObj = ProcessingItemObj(processingItem)
        matching_t1 = ADNI_T1_Helper().getMatchingT1(processingItemObj)
        if not matching_t1:
            PipelineLogger.log('root', 'error', 'PET cannot be processed no matching T1 found. - {0} - {1} - {2}.'.format(processingItemObj.subject_rid, processingItemObj.modality, processingItemObj.scan_date))
            return 0

        processed = ADNI_T1_Helper().checkProcessed(matching_t1)
        if not processed:
            PipelineLogger.log('root', 'error', 'PET cannot be processed due to matching T1 not being processed - {0}'.format(matching_t1))
            return 0
        else:
            PipelineLogger.log('root', 'INFO', '+++++++++ PET ready to be processed. Will check for initial xfm. - {0} - {1}'.format(processingItemObj.subject_rid, processingItemObj.scan_date))
            if processingItemObj.manual_xfm == 'Req_man_reg':
                coregDone = self.PETHelper.checkIfAlreadyDone(processingItemObj, matching_t1)
                if coregDone:
                    manualXFM = coregDone
                    setPPTableSQL = "UPDATE {0}_{1}_Pipeline SET MANUAL_XFM = '{2}' WHERE RECORD_ID = {3}".format(processingItemObj.study, processingItemObj.modality, manualXFM, processingItemObj.table_id)
                    self.DBClient.executeNoResult(setPPTableSQL)
                    self.processPET(processingItemObj, processed)
                else:
                    self.PETHelper.requestCoreg(processingItemObj, matching_t1)
                    PipelineLogger.log('root', 'INFO', 'Manual XFM was not found. Request to create one may have added.  - {0} - {1}'.format(processingItemObj.subject_rid, processingItemObj.scan_date))
                    return 0
            else:
                self.processPET(processingItemObj, processed)

    def getScanType(self, processingItemObj):
        r = self.DBClient.executeAllResults("SELECT SCAN_TYPE FROM Conversion WHERE STUDY = '{0}' AND RID = '{1}' "
                                        "AND SCAN_DATE = '{2}' AND S_IDENTIFIER = '{3}' "
                                        "AND I_IDENTIFIER = '{4}'".format(processingItemObj.study,
                                                                          processingItemObj.subject_rid,
                                                                          processingItemObj.scan_date,
                                                                          processingItemObj.s_identifier,
                                                                          processingItemObj.i_identifier))
        return r[0][0]

    def processPET(self, processingItemObj, matchT1Path):
        petFileName = '{0}/{1}_{2}{3}{4}{5}_{6}.mnc'.format(processingItemObj.converted_folder, processingItemObj.study,
                                                        processingItemObj.subject_rid, processingItemObj.scan_date.replace('-', ''),
                                                        processingItemObj.s_identifier, processingItemObj.i_identifier,
                                                        self.getScanType(processingItemObj))
        processedFolder = '{0}/processed'.format(processingItemObj.root_folder)
        logDir = '{0}/logs'.format(processingItemObj.root_folder)
        PipelineLogger.log('manager', 'info', 'PET processing starting for {0}'.format(petFileName))
        try:
            distutils.dir_util.mkpath(logDir)
        except Exception as e:
            PipelineLogger.log('manager', 'error', 'Error in creating log folder \n {0}'.format(e))
            return 0

        id = '{0}{1}{2}{3}'.format(processingItemObj.subject_rid, processingItemObj.scan_date.replace('-', ''), processingItemObj.s_identifier, processingItemObj.i_identifier)
        paramStrd = ast.literal_eval(processingItemObj.parameters)
        paramStrt = ' '.join(['[\"{0}\"]=\"{1}\"'.format(k, v) for k,v in paramStrd.items()])
        paramStr = '({0})'.format(paramStrt)
        petCMD = "source /opt/minc-toolkit/minc-toolkit-config.sh; Pipelines/ADNI_AV45/ADNI_V2_AV45_Process {0} {1} {2} {3} {4} {5} '{6}' {7} {8}".format(id, petFileName, processedFolder, matchT1Path, 'auto' if processingItemObj.manual_xfm == '' else processingItemObj.manual_xfm, logDir, paramStr,socket.gethostname(), 50500)
        try:
            processedFolder_del = '{0}/processed_del'.format(processingItemObj.root_folder)
            os.rename(processedFolder, processedFolder_del)
            shutil.rmtree(processedFolder_del)
        except Exception as e:
            PipelineLogger.log('manager', 'error', 'Error in deleting old processing folder. \n {0}'.format(e))
        try:
            distutils.dir_util.mkpath(processedFolder)
        except Exception as e:
            PipelineLogger.log('manager', 'error', 'Error in creating processing folder. \n {0}'.format(e))
            return 0

        PipelineLogger.log('manager', 'debug', 'Command : {0}'.format(petCMD))
        p = subprocess.Popen(petCMD, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, executable='/bin/bash')
        out, err = p.communicate()
        PipelineLogger.log('manager', 'debug', 'Process Log Output : \n{0}'.format(out))
        PipelineLogger.log('manager', 'debug', 'Process Log Err : \n{0}'.format(err))

        QSubJobHandler.submittedJobs[id] = QSubJob(id, '02:00:00', processingItemObj, 'av45')
        return 1
__author__ = 'sulantha'
import glob, subprocess, re
from Utils.DbUtils import DbUtils
import os
from distutils import file_util, dir_util
import shutil

DBClient = DbUtils()

getAllTodoSQL = "SELECT XFM_NAME FROM Coregistration WHERE END = 0 AND SKIP = 0 AND START = 0 AND PET_SCANTYPE = 'AV45'"
res = DBClient.executeAllResults(getAllTodoSQL)
totalC = 0
done_c = 0
for xfm_name in res:
    xfm_id = xfm_name[0].split('_', 3)[-1]
    print(xfm_id)

    xfm_file = glob.glob('/data/data03/MANUAL_XFM/{0}.xfm'.format(xfm_name[0]))
    #checkSQL = "SELECT * FROM MANUAL_XFM WHERE XFM_UNIQUEID = '{0}'".format(xfm_id)
    #res2 = DBClient.executeAllResults(checkSQL)

    if len(xfm_file)>0:
        done_c +=1
        print('Already done.  - {0}'.format(xfm_id))
        markDoneSQL = "UPDATE Coregistration SET START=1, END=1, USER='******' WHERE XFM_NAME LIKE '%{0}'".format(xfm_id)
        print(markDoneSQL)
        #DBClient.executeNoResult(markDoneSQL)
    else:
        print('Not done.  - {0}'.format(xfm_id))
    totalC +=1
__author__ = 'sulantha'
import datetime
from Utils.DbUtils import DbUtils
import glob
DBClient = DbUtils()
outLines = []
with open('/data/data03/sulantha/Downloads/av45_list.csv', 'r') as file:
    next(file)
    for line in file:
        row = line.split(',')
        rid = row[0]
        date = row[1].strip()
        dateT = datetime.datetime.strptime(date, '%m/%d/%Y')
        dateS = dateT.strftime('%Y-%m-%d')
        findSQLV1 = "SELECT * FROM Processing WHERE RID = {0} AND MODALITY = 'AV45' AND SCAN_DATE = '{1}' AND VERSION = '{2}'".format(rid, dateS, 'V1')
        resv1 = DBClient.executeAllResults(findSQLV1)
        findSQLV2 = "SELECT * FROM Processing WHERE RID = {0} AND MODALITY = 'AV45' AND SCAN_DATE = '{1}' AND VERSION = '{2}'".format(rid, dateS, 'V2')
        resv2 = DBClient.executeAllResults(findSQLV2)

        if len(resv1) is 0:
            v1Path = ''
        elif len(resv1) == 1:
            if resv1[0][12] == 1:
                v1Path = '{0}/processed/final/*tal_nlin_pbavg_ref_cerGM_wmnorm_085.mnc'.format(resv1[0][8])
                v1Path = glob.glob(v1Path)[0]
            else:
                v1Path = ''
        else:
            v1Path = ''
            for result in resv1:
                if result[12] == 1:
class ADNI_T1_Helper:
    def __init__(self):
        self.DBClient = DbUtils()
        self.MatchDBClient = DbUtils(database=pc.ADNI_dataMatchDBName)

    def getMatchingT1(self, processingItemObj):
        modalityID = '{0}{1}{2}{3}{4}{5}{6}'.format(processingItemObj.study, processingItemObj.version,
                                                    processingItemObj.subject_rid, processingItemObj.modality,
                                                    processingItemObj.scan_date.replace('-', ''),
                                                    processingItemObj.s_identifier, processingItemObj.i_identifier)
        getFromMatchTableSQL = "SELECT * FROM MatchT1 WHERE MODALITY_ID = '{0}'".format(modalityID)
        existingMatchedRec = self.DBClient.executeAllResults(getFromMatchTableSQL)
        if len(existingMatchedRec) == 1:
            getConvSQL = "SELECT * FROM Conversion WHERE RECORD_ID = '{0}'".format(existingMatchedRec[0][3])
            return self.DBClient.executeAllResults(getConvSQL)[0]
        else:

            if processingItemObj.modality == 'FMRI':  # For MRI and fMRI images
                getRecordSQL = "SELECT * FROM MRILIST WHERE subject LIKE '%_%_{0}' AND seriesid = {1} AND imageuid = {2}".format(processingItemObj.subject_rid, processingItemObj.s_identifier.replace('S', ''), processingItemObj.i_identifier.replace('I', ''))
            else:  # By Default, for PET images
                getRecordSQL = "SELECT * FROM PET_META_LIST WHERE subject LIKE '%_%_{0}' AND seriesid = {1} AND imageid = {2}".format(processingItemObj.subject_rid, processingItemObj.s_identifier.replace('S', ''), processingItemObj.i_identifier.replace('I', ''))

            petrecord = self.MatchDBClient.executeAllResults(getRecordSQL)
            if not petrecord:
                PipelineLogger.log('root', 'error', 'Cannot find PET record : {0} - {1} - {2}'.format(processingItemObj.subject_rid, processingItemObj.s_identifier.replace('S', ''), processingItemObj.i_identifier.replace('I', '')))
                return None
            visit_code = pc.ADNI_visitCode_Dict[petrecord[0][2]]

            getMRIRecordsSQL = "SELECT * FROM MPRAGEMETA WHERE subjectid LIKE '%_%_{0}'".format(processingItemObj.subject_rid)

            mrirecords = self.MatchDBClient.executeAllResults(getMRIRecordsSQL)
            if not mrirecords:
                PipelineLogger.log('root', 'error', '################################  - Error !!!!! Cannot find any MRI records : {0} - Please check ADNI recs. ################################'.format(processingItemObj.subject_rid))
                return None

            # getMRISecondarySQL = "SELECT * FROM MRILIST WHERE subject LIKE '%_%_{0}'".format(processingItemObj.subject_rid)
            # mriSecondaryRecords = self.MatchDBClient.executeAllResults(getMRISecondarySQL)
            # t_mrirecords = mrirecords
            # for record in mriSecondaryRecords:
            #     distint = 1
            #     for i in t_mrirecords:
            #         if record[7] == i[7] and record[8] == i[8]:
            #             distint = 0
            #     if distint:
            #         mrirecords.append(record)

            matchedT1Recs = []
            for rec in mrirecords:
                if pc.ADNI_visitCode_Dict[rec[2]] == visit_code:
                    matchedT1Recs.append(rec)
            if len(matchedT1Recs) == 0:
                PipelineLogger.log('root', 'error', 'Cannot match visit codes for : {0} - {1} - {2} - Searching based on scan date. +/- 60 days from PET date'.format(processingItemObj.subject_rid, processingItemObj.modality, visit_code))
                pet_date = datetime.strptime(processingItemObj.scan_date, '%Y-%m-%d')
                sortedRecs = sorted(mrirecords, key=lambda x:abs(datetime.strptime(x[5], '%Y-%m-%d') - pet_date))
                closestDate = [k for k,g in itertools.groupby(sortedRecs, key=lambda x:abs(datetime.strptime(x[5], '%Y-%m-%d') - pet_date))][0]
                PipelineLogger.log('root', 'error', 'PET MRI Matching based on dates - match visit codes for : {0} - {1} - {2} - Distance between MRI/PET : {3} days.'.format(processingItemObj.subject_rid, processingItemObj.modality, visit_code, closestDate))
                closestMatchedRecs = [list(g) for k,g in itertools.groupby(sortedRecs, key=lambda x:abs(datetime.strptime(x[5], '%Y-%m-%d') - pet_date))][0]
                matchedT1Recs = closestMatchedRecs
            if len(matchedT1Recs) == 0:
                PipelineLogger.log('root', 'error', 'Cannot match visit codes for : {0} - {1} - {2}'.format(processingItemObj.subject_rid, processingItemObj.modality, visit_code))
                return None

            matchedT1withScanDescriptions = []
            for rec in matchedT1Recs:
                getScanFromConversionSQL = "SELECT * FROM Conversion WHERE STUDY = '{0}' AND S_IDENTIFIER = '{1}' AND I_IDENTIFIER = '{2}' AND SKIP = 0".format(processingItemObj.study,'S{0}'.format(rec[7]), 'I{0}'.format(rec[8]))
                t1_conversion = self.DBClient.executeAllResults(getScanFromConversionSQL)
                if len(t1_conversion) > 0 :
                    matchedT1withScanDescriptions.append(t1_conversion[0])
                else:
                    PipelineLogger.log('root', 'error', 'Corresponding MRI was not found in the system : {0} - {1} - {2}'.format(processingItemObj.subject_rid, 'S{0}'.format(rec[7]), 'I{0}'.format(rec[8])))
                    continue
            if len(matchedT1withScanDescriptions) < 1:
                PipelineLogger.log('root', 'error', 'Matched T1s are not in the database. : Matched T1 s - \n {0}'.format(matchedT1Recs))
                return None
            else:
                if len(matchedT1withScanDescriptions) == 1:
                    ## ONLY ONE MATCHED T1. GOOD> CHECK IF THE T1 is a good scan type and not a bluff !!!
                    if matchedT1withScanDescriptions[0][3] in pc.ADNI_T1_match_accepted_scantypes:
                        self.addToMatchT1Table(processingItemObj, modalityID, matchedT1withScanDescriptions[0])
                        return matchedT1withScanDescriptions[0]
                    else:
                        PipelineLogger.log('root', 'error', 'Matched T1s is not accepted scan type. : Matched T1 s - \n {0}'.format(matchedT1withScanDescriptions[0]))
                        return None

                else:
                    #### MORE THAN ONE FOUND. SELECT ONE BASED ON SCAN TYPE PRIORITY
                    sortedList = sorted(matchedT1withScanDescriptions, key=lambda x: (pc.ADNI_T1_match_scantype_priorityList.index(x[3]), -x[5]))
                    self.addToMatchT1Table(processingItemObj, modalityID, sortedList[0])
                    return sortedList[0]

    def checkProcessed(self, t1Record):
        subject_id = t1Record[2]
        version = t1Record[11]
        s_id = t1Record[6]
        i_id = t1Record[7]
        checkProcessedSQL = "SELECT * FROM Processing WHERE RID = '{0}' AND VERSION = '{1}' AND S_IDENTIFIER = '{2}' AND I_IDENTIFIER = '{3}'".format(subject_id, version, s_id, i_id)
        result = self.DBClient.executeAllResults(checkProcessedSQL)[0]
        if len(result) < 1:
            PipelineLogger.log('root', 'error', 'Matched T1 is not added to the processing table. {0} - {1} - {2}'.format(subject_id, s_id, i_id))
            return False
        else:
            if result[12] == 1 and result[13] == 1:
                return result[8]
            else:
                PipelineLogger.log('root', 'error', 'Matched T1 is not process or QC failed. {0} - {1} - {2}'.format(subject_id, s_id, i_id))
                self.startProcessOFT1(result)
                return False

    def addToMatchT1Table(self, processingItemObj, modalityID, t1Record):
        pet_date = datetime.strptime(processingItemObj.scan_date, '%Y-%m-%d')
        mri_date =datetime.combine(t1Record[4], datetime.min.time())
        date_diff = abs(mri_date - pet_date)
        t1ID = '{0}{1}{2}_x_{3}_x_{4}{5}{6}'.format(t1Record[1], t1Record[11], t1Record[2], t1Record[3], t1Record[4].strftime('%Y-%m-%d').replace('-', ''), t1Record[6], t1Record[7])
        conversionID = t1Record[0]
        sql = "INSERT IGNORE INTO MatchT1 VALUES (Null, '{0}', '{1}', '{2}', {3})".format(modalityID, t1ID, conversionID, date_diff.days)
        self.DBClient.executeNoResult(sql)

    def startProcessOFT1(self, processTableEntry):
        recordId = processTableEntry[0]
        study = processTableEntry[1]
        sql = "UPDATE {0}_T1_Pipeline SET SKIP = 0 WHERE PROCESSING_TID = {1}".format(study, recordId)
        self.DBClient.executeNoResult(sql)