コード例 #1
0
def test_outlier_timepoints():

    import os
    import pickle
    import pkg_resources as p
    
    from qap.temporal_qc import outlier_timepoints

    func_motion = p.resource_filename("qap", os.path.join(test_sub_dir, \
                                      "rest_1", \
                                      "func_motion_correct", \
                                      "rest_calc_tshift_resample_" \
                                      "volreg.nii.gz"))
                                  
    func_mask = p.resource_filename("qap", os.path.join(test_sub_dir, \
                                    "rest_1", \
                                    "functional_brain_mask", \
                                    "rest_calc_tshift_resample_volreg" \
                                    "_mask.nii.gz"))

    ref_out = p.resource_filename("qap", os.path.join(test_sub_dir, \
                                  "rest_1", \
                                  "outlier_timepoints", \
                                  "outlier_timepoints_ref_out.p"))
                                    
    out_list = outlier_timepoints(func_motion, func_mask)

    with open(ref_out, "r") as f:
        ref_list = pickle.load(f)
        
    
    assert out_list == ref_list
コード例 #2
0
def test_outlier_timepoints():

    import os
    import pickle
    import pkg_resources as p

    from qap.temporal_qc import outlier_timepoints

    func_motion = p.resource_filename("qap", os.path.join(test_sub_dir, \
                                      "rest_1", \
                                      "func_motion_correct", \
                                      "rest_calc_tshift_resample_" \
                                      "volreg.nii.gz"))

    func_mask = p.resource_filename("qap", os.path.join(test_sub_dir, \
                                    "rest_1", \
                                    "functional_brain_mask", \
                                    "rest_calc_tshift_resample_volreg" \
                                    "_mask.nii.gz"))

    ref_out = p.resource_filename("qap", os.path.join(test_sub_dir, \
                                  "rest_1", \
                                  "outlier_timepoints", \
                                  "outlier_timepoints_ref_out.p"))

    out_list = outlier_timepoints(func_motion, func_mask)

    with open(ref_out, "r") as f:
        ref_list = pickle.load(f)

    assert out_list == ref_list
コード例 #3
0
def test_outlier_timepoints_no_mask():

    import os
    import pickle
    import pkg_resources as p
    
    from qap.temporal_qc import outlier_timepoints

    func_reorient = p.resource_filename("qap", os.path.join(test_sub_dir, \
                                        "func_reorient.nii.gz"))
                                  
    ref_out = p.resource_filename("qap", os.path.join(test_sub_dir, \
                                  "outlier_timepoints_output_nomask.p"))
                                    
    out_list = outlier_timepoints(func_reorient)

    with open(ref_out, "r") as f:
        ref_list = pickle.load(f)
        
    assert out_list == ref_list
def qap_functional_temporal(
        func_timeseries, func_brain_mask, bg_func_brain_mask, fd_file,
        subject_id, session_id, scan_id, site_name=None, starter=None):
    """ Calculate the functional temporal QAP measures for a functional scan.

    - The inclusion of the starter node allows several QAP measure pipelines
      which are not dependent on one another to be executed as one pipeline.
      This allows the MultiProc Nipype plugin to efficiently manage
      resources when parallelizing.

    :type func_timeseries: str
    :param func_timeseries: Filepath to the 4D functional timeseries.
    :type func_brain_mask: str
    :param func_brain_mask: Filepath to the binary mask defining the brain
                            within the functional image.
    :type bg_func_brain_mask: str
    :param bg_func_brain_mask: Filepath to the inversion of the functional
                               brain mask.
    :type fd_file: str
    :param fd_file: File containing the RMSD values (calculated previously).
    :type subject_id: str
    :param subject_id: The participant ID.
    :type session_id: str
    :param session_id: The session ID.
    :type scan_id: str
    :param scan_id: The scan ID.
    :type site_name: str
    :param site_name: (default: None) The name of the site where the scan was
                      acquired.
    :type starter: str
    :param starter: (default: None) If this function is being pulled into a
                    Nipype pipeline, this is the dummy input for the function
                    node.
    :rtype: dict
    :return: A dictionary mapping out the QAP measure values for the current
             participant.
    """

    import numpy as np
    from time import strftime

    import qap
    from qap.temporal_qc import outlier_timepoints, quality_timepoints, \
                                global_correlation, calculate_percent_outliers
    from qap.dvars import calc_dvars

    # DVARS
    dvars = calc_dvars(func_timeseries, func_brain_mask)
    dvars_outliers, dvars_IQR = calculate_percent_outliers(dvars)

    mean_dvars = dvars.mean(0)
    mean_dvars = mean_dvars[0]

    # Mean FD (Jenkinson)
    fd = np.loadtxt(fd_file)
    meanfd_outliers, meanfd_IQR = calculate_percent_outliers(fd)

    # 3dTout
    outliers = outlier_timepoints(func_timeseries, mask_file=func_brain_mask)
    # calculate the outliers of the outliers! AAHH!
    outlier_perc_out, outlier_IQR = calculate_percent_outliers(outliers)

    # 3dTout (outside of brain)
    oob_outliers = outlier_timepoints(func_timeseries,
        mask_file=bg_func_brain_mask)
    oob_outlier_perc_out, oob_outlier_IQR = \
        calculate_percent_outliers(oob_outliers)

    # 3dTqual
    quality = quality_timepoints(func_timeseries)
    quality_outliers, quality_IQR = calculate_percent_outliers(quality)

    # GCOR
    gcor = global_correlation(func_timeseries, func_brain_mask)

    # Compile
    id_string = "%s %s %s" % (subject_id, session_id, scan_id)
    qc = {
            id_string:
            {
              "QAP_pipeline_id": "QAP version %s" % qap.__version__,
              "Time": strftime("%Y-%m-%d %H:%M:%S"),
              "Participant": str(subject_id),
              "Session": str(session_id),
              "Series": str(scan_id),
              "functional_temporal":
              {
                 "Std. DVARS (Mean)": mean_dvars,
                 "Std. DVARS (Std Dev)": np.std(dvars),
                 "Std. DVARS (Median)": np.median(dvars),
                 "Std. DVARs IQR": dvars_IQR,
                 "Std. DVARS percent outliers": dvars_outliers,
                 "RMSD (Mean)": np.mean(fd),
                 "RMSD (Std Dev)": np.std(fd),
                 "RMSD (Median)": np.median(fd),
                 "RMSD IQR": meanfd_IQR,
                 "RMSD percent outliers": meanfd_outliers,
                 "Fraction of Outliers (Mean)": np.mean(outliers),
                 "Fraction of Outliers (Std Dev)": np.std(outliers),
                 "Fraction of Outliers (Median)": np.median(outliers),
                 "Fraction of Outliers IQR": outlier_IQR,
                 "Fraction of Outliers percent outliers": outlier_perc_out,
                 "Fraction of OOB Outliers (Mean)": np.mean(oob_outliers),
                 "Fraction of OOB Outliers (Std Dev)": np.std(oob_outliers),
                 "Fraction of OOB Outliers (Median)": np.median(oob_outliers),
                 "Fraction of OOB Outliers IQR": oob_outlier_IQR,
                 "Fraction of OOB Outliers percent outliers": oob_outlier_perc_out,
                 "Quality (Mean)": np.mean(quality),
                 "Quality (Std Dev)": np.std(quality),
                 "Quality (Median)": np.median(quality),
                 "Quality IQR": quality_IQR,
                 "Quality percent outliers": quality_outliers,
                 "GCOR": gcor
              }
            }
    }

    if site_name:
        qc[id_string]['Site'] = str(site_name)

    for key in qc[id_string]["functional_temporal"].keys():
        qc[id_string]["functional_temporal"][key] = \
            str(qc[id_string]["functional_temporal"][key])

    return qc
コード例 #5
0
def qap_functional_temporal(func_timeseries,
                            func_brain_mask,
                            bg_func_brain_mask,
                            fd_file,
                            subject_id,
                            session_id,
                            scan_id,
                            site_name=None,
                            starter=None):
    """ Calculate the functional temporal QAP measures for a functional scan.

    - The inclusion of the starter node allows several QAP measure pipelines
      which are not dependent on one another to be executed as one pipeline.
      This allows the MultiProc Nipype plugin to efficiently manage
      resources when parallelizing.

    :type func_timeseries: str
    :param func_timeseries: Filepath to the 4D functional timeseries.
    :type func_brain_mask: str
    :param func_brain_mask: Filepath to the binary mask defining the brain
                            within the functional image.
    :type bg_func_brain_mask: str
    :param bg_func_brain_mask: Filepath to the inversion of the functional
                               brain mask.
    :type fd_file: str
    :param fd_file: File containing the RMSD values (calculated previously).
    :type subject_id: str
    :param subject_id: The participant ID.
    :type session_id: str
    :param session_id: The session ID.
    :type scan_id: str
    :param scan_id: The scan ID.
    :type site_name: str
    :param site_name: (default: None) The name of the site where the scan was
                      acquired.
    :type starter: str
    :param starter: (default: None) If this function is being pulled into a
                    Nipype pipeline, this is the dummy input for the function
                    node.
    :rtype: dict
    :return: A dictionary mapping out the QAP measure values for the current
             participant.
    """

    import numpy as np
    from time import strftime

    import qap
    from qap.temporal_qc import outlier_timepoints, quality_timepoints, \
                                global_correlation, calculate_percent_outliers
    from qap.dvars import calc_dvars

    # DVARS
    dvars = calc_dvars(func_timeseries, func_brain_mask)
    dvars_outliers, dvars_IQR = calculate_percent_outliers(dvars)

    mean_dvars = dvars.mean(0)
    mean_dvars = mean_dvars[0]

    # Mean FD (Jenkinson)
    fd = np.loadtxt(fd_file)
    meanfd_outliers, meanfd_IQR = calculate_percent_outliers(fd)

    # 3dTout
    outliers = outlier_timepoints(func_timeseries, mask_file=func_brain_mask)
    # calculate the outliers of the outliers! AAHH!
    outlier_perc_out, outlier_IQR = calculate_percent_outliers(outliers)

    # 3dTout (outside of brain)
    oob_outliers = outlier_timepoints(func_timeseries,
                                      mask_file=bg_func_brain_mask)
    oob_outlier_perc_out, oob_outlier_IQR = \
        calculate_percent_outliers(oob_outliers)

    # 3dTqual
    quality = quality_timepoints(func_timeseries)
    quality_outliers, quality_IQR = calculate_percent_outliers(quality)

    # GCOR
    gcor = global_correlation(func_timeseries, func_brain_mask)

    # Compile
    id_string = "%s %s %s" % (subject_id, session_id, scan_id)
    qc = {
        id_string: {
            "QAP_pipeline_id": "QAP version %s" % qap.__version__,
            "Time": strftime("%Y-%m-%d %H:%M:%S"),
            "Participant": str(subject_id),
            "Session": str(session_id),
            "Series": str(scan_id),
            "functional_temporal": {
                "Std. DVARS (Mean)": mean_dvars,
                "Std. DVARS (Std Dev)": np.std(dvars),
                "Std. DVARS (Median)": np.median(dvars),
                "Std. DVARs IQR": dvars_IQR,
                "Std. DVARS percent outliers": dvars_outliers,
                "RMSD (Mean)": np.mean(fd),
                "RMSD (Std Dev)": np.std(fd),
                "RMSD (Median)": np.median(fd),
                "RMSD IQR": meanfd_IQR,
                "RMSD percent outliers": meanfd_outliers,
                "Fraction of Outliers (Mean)": np.mean(outliers),
                "Fraction of Outliers (Std Dev)": np.std(outliers),
                "Fraction of Outliers (Median)": np.median(outliers),
                "Fraction of Outliers IQR": outlier_IQR,
                "Fraction of Outliers percent outliers": outlier_perc_out,
                "Fraction of OOB Outliers (Mean)": np.mean(oob_outliers),
                "Fraction of OOB Outliers (Std Dev)": np.std(oob_outliers),
                "Fraction of OOB Outliers (Median)": np.median(oob_outliers),
                "Fraction of OOB Outliers IQR": oob_outlier_IQR,
                "Fraction of OOB Outliers percent outliers":
                oob_outlier_perc_out,
                "Quality (Mean)": np.mean(quality),
                "Quality (Std Dev)": np.std(quality),
                "Quality (Median)": np.median(quality),
                "Quality IQR": quality_IQR,
                "Quality percent outliers": quality_outliers,
                "GCOR": gcor
            }
        }
    }

    if site_name:
        qc[id_string]['Site'] = str(site_name)

    for key in qc[id_string]["functional_temporal"].keys():
        qc[id_string]["functional_temporal"][key] = \
            str(qc[id_string]["functional_temporal"][key])

    return qc
コード例 #6
0
def run_worker(inputs_queue, outputs_queue, index):

    # get something from the pile
    while True:
        inputs = inputs_queue.get()
        # stop condition
        if inputs == FLAG_ALL_DONE:
            outputs_queue.put(FLAG_WORKER_FINISHED_PROCESSING)
            break

        subj_id = inputs[0]
        fmri_file = inputs[1]
        rp_file = inputs[2]
        root_output = inputs[3]

        # define working directory
        working_directory = os.path.join(root_output, subj_id, "outputs")
        if os.path.isdir(working_directory):
            shutil.rmtree(working_directory)
        os.makedirs(working_directory)

        try:

            # get data array
            fmri_file_data = nibabel.load(fmri_file).get_data()

            # step 1: get movement snap and parameters
            snap_mvt, displacement_file = time_serie_mq(fmri_file,
                                                        rp_file,
                                                        "SPM",
                                                        working_directory,
                                                        time_axis=-1,
                                                        slice_axis=-2,
                                                        mvt_thr=1.5,
                                                        rot_thr=0.5)

            # step 2: get efc score (entropy focus criterion)
            r_efc = efc(fmri_file_data)

            # step 3: get masks from afni
            mask_file = os.path.join(working_directory, "mask.nii")
            cmd = ["3dAutomask", "-prefix", mask_file, fmri_file]
            subprocess.check_call(cmd)
            mask_data = nibabel.load(mask_file).get_data()

            # step 4: get fber score (foreground to background energy ratio)
            r_fber = fber(fmri_file_data, mask_data)

            #step 5: get smoothness of voxels score
            r_fwhm = fwhm(fmri_file, mask_file)

            # step 6: detect outlier timepoints in each volume
            outliers = outlier_timepoints(fmri_file, mask_file)
            mean_outliers = numpy.mean(outliers)

            # step 7: ghost scores
            gsrs = ghost_all(fmri_file_data, mask_data)

            # step 8: quality timepoints
            qt = quality_timepoints(fmri_file, automask=True)
            mean_qt = numpy.mean(qt)

            # step 9: spike detection
            snap_spikes, spikes_file = spike_detector(fmri_file,
                                                      working_directory)

            with open(spikes_file) as _file:
                spikes_dict = json.load(_file)

            # final step: save scores in dict
            scores = {
                "efc": "{0}".format(r_efc),
                "fber": "{0}".format(r_fber),
                "fwhm": "{0}".format(r_fwhm),
                "outliers": "{0}".format(outliers),
                "mean_outliers": "{0}".format(mean_outliers),
                "x_gsr": "{0}".format(gsrs[0]),
                "y_gsr": "{0}".format(gsrs[1]),
                "quality": "{0}".format(qt),
                "mean_quality": "{0}".format(mean_qt)
            }

            scores.update(spikes_dict)

            scores_file = os.path.join(working_directory, "qa_scores.json")
            with open(scores_file, "w") as _file:
                json.dump(scores, _file, indent=4)

            outputs_queue.put("{0} - Success".format(subj_id))
        except:
            outputs_queue.put("{0} - FAIL:".format(subj_id))
            traceback.print_exc()
コード例 #7
0
def run_worker(inputs_queue, outputs_queue, index):

    # get something from the pile
    while True:
        inputs = inputs_queue.get()
        # stop condition
        if inputs == FLAG_ALL_DONE:
            outputs_queue.put(FLAG_WORKER_FINISHED_PROCESSING)
            break

        subj_id = inputs[0]
        fmri_file = inputs[1]
        rp_file = inputs[2]
        root_output = inputs[3]

        # define working directory
        working_directory = os.path.join(root_output,
                                         subj_id,
                                         "outputs")
        if os.path.isdir(working_directory):
                shutil.rmtree(working_directory)
        os.makedirs(working_directory)

        try:

            # get data array
            fmri_file_data = nibabel.load(fmri_file).get_data()

            # step 1: get movement snap and parameters
            snap_mvt, displacement_file = time_serie_mq(fmri_file,
                                                        rp_file,
                                                        "SPM",
                                                        working_directory,
                                                        time_axis=-1,
                                                        slice_axis=-2,
                                                        mvt_thr=1.5,
                                                        rot_thr=0.5)

            # step 2: get efc score (entropy focus criterion)
            r_efc = efc(fmri_file_data)

            # step 3: get masks from afni
            mask_file = os.path.join(working_directory, "mask.nii")
            cmd = ["3dAutomask", "-prefix", mask_file, fmri_file]
            subprocess.check_call(cmd)
            mask_data = nibabel.load(mask_file).get_data()

            # step 4: get fber score (foreground to background energy ratio)
            r_fber = fber(fmri_file_data, mask_data)

            #step 5: get smoothness of voxels score
            r_fwhm = fwhm(fmri_file, mask_file)

            # step 6: detect outlier timepoints in each volume
            outliers = outlier_timepoints(fmri_file, mask_file)
            mean_outliers = numpy.mean(outliers)

            # step 7: ghost scores
            gsrs = ghost_all(fmri_file_data, mask_data)

            # step 8: quality timepoints
            qt = quality_timepoints(fmri_file, automask=True)
            mean_qt = numpy.mean(qt)

            # step 9: spike detection
            snap_spikes, spikes_file = spike_detector(
                fmri_file, working_directory)

            with open(spikes_file) as _file:
                spikes_dict = json.load(_file)

            # final step: save scores in dict
            scores = {"efc": "{0}".format(r_efc),
                      "fber": "{0}".format(r_fber),
                      "fwhm": "{0}".format(r_fwhm),
                      "outliers": "{0}".format(outliers),
                      "mean_outliers": "{0}".format(mean_outliers),
                      "x_gsr": "{0}".format(gsrs[0]),
                      "y_gsr": "{0}".format(gsrs[1]),
                      "quality": "{0}".format(qt),
                      "mean_quality": "{0}".format(mean_qt)}

            scores.update(spikes_dict)

            scores_file = os.path.join(working_directory, "qa_scores.json")
            with open(scores_file, "w") as _file:
                json.dump(scores, _file, indent=4)

            outputs_queue.put("{0} - Success".format(subj_id))
        except:
            outputs_queue.put("{0} - FAIL:".format(subj_id))
            traceback.print_exc()