Exemplo n.º 1
0
def convert_dicoms(subjs, dicom_dir_template, outputdir, queue=None, heuristic_func=None, extension=None):
    """Submit conversion jobs to SGE cluster
    """
    if heuristic_func == None:
        heuristic_func = infotodict
    for sid in subjs:
        sdir = dicom_dir_template % sid
        tdir = os.path.join(outputdir, sid)
        infofile = os.path.join(tdir, "%s.auto.txt" % sid)
        editfile = os.path.join(tdir, "%s.edit.txt" % sid)
        if os.path.exists(editfile):
            info = load_json(editfile)
        else:
            infofile = os.path.join(tdir, "%s.auto.txt" % sid)
            info = heuristic_func(sdir, os.path.join(tdir, "dicominfo.txt"))
            save_json(infofile, info)
        cfgfile = os.path.join(tdir, "%s.auto.cfg" % sid)
        if write_cfg(cfgfile, info, sid, tdir, extension):
            convertcmd = ["unpacksdcmdir", "-src", sdir, "-targ", tdir, "-generic", "-cfg", cfgfile, "-skip-moco"]
            convertcmd = " ".join(convertcmd)
            if queue:
                outcmd = 'ezsub.py -n sg-%s -q %s -c "%s"' % (sid, queue, convertcmd)
            else:
                outcmd = convertcmd
            os.system(outcmd)
Exemplo n.º 2
0
def create_mindcontrol_entries(mindcontrol_base_dir, output_dir, subject, stats):
    import os
    from nipype.utils.filemanip import save_json
    
    metric_split = {"brainmask": ["ICV", "CortexVol", "TotalGrayVol"],
                    "wm": [ "Right-WM-hypointensities","Left-WM-hypointensities"],
                    "aparcaseg":[],
                    "ribbon":[]}
    volumes = ["brainmask.mgz", "wm.mgz", "aparc+aseg.mgz", "ribbon.mgz", "T1.mgz" ]
    volumes_list = [os.path.join(output_dir, subject, volume) for volume in volumes]

    all_entries = []
    
    for idx, entry_type in enumerate(["brainmask", "wm", "aparcaseg"]):
        entry = {"entry_type": entry_type, 
                 "subject_id": subject, 
                 "name": subject}
        base_img = os.path.relpath(volumes_list[-1], mindcontrol_base_dir)
        overlay_img = os.path.relpath(volumes_list[idx], mindcontrol_base_dir)
        entry["check_masks"] = [base_img.replace(".mgz",".nii.gz"), overlay_img.replace(".mgz",".nii.gz")]
        entry["metrics"] = {}
        for metric_name in metric_split[entry_type]:
            entry["metrics"][metric_name] = stats.pop(metric_name)
        if not len(metric_split[entry_type]):
            entry["metrics"] = stats
        all_entries.append(entry)
    
    output_json = os.path.abspath("mindcontrol_entries.json")
    save_json(output_json, all_entries)
    return output_json
Exemplo n.º 3
0
    def make_lga_json(self, centroids=None):
        if centroids is None:
            centroids = self.run_centroids()

        outdir = os.path.split(os.path.split(self.filename_list[0])[0])[0]
        save_json(os.path.join(outdir, "centroid_lga.json"), centroids)
        print("The json file is generated in the directory: ", outdir)
Exemplo n.º 4
0
def save_metadata(so, tr):
    import os
    from nipype.utils.filemanip import save_json
    metadata_dict = {"so" : so, "tr" : tr}
    metadata_file = os.path.join(os.getcwd(), 'metadata.json')
    save_json(metadata_file, metadata_dict)
    return metadata_file
Exemplo n.º 5
0
	def aggregate_outputs(self,runtime = None,  needed_outputs=None):

		outputs = self._outputs()

		outfile = os.path.join(os.getcwd(),'stat_result.json')

		if runtime is None:
			try:
				stats = load_json(outfile)['stat']
			except IOError:
				return self.run().outputs
		else:
			stats = []
			for line in runtime.stdout.split('\n'):
				if line:
					values = line.split()
					if len(values) > 1:
						stats.append([float(val) for val in values])
					else:
						stats.extend([float(val) for val in values])

			if len(stats) == 1:
				stats = stats[0]
			of = os.path.join(os.getcwd(),'TS.1D')
			f = open(of,'w')

			for st in stats:
				f.write(str(st) + '\n')
			f.close()
			save_json(outfile,dict(stat=of))
		outputs.stats =of
		
		return outputs
Exemplo n.º 6
0
    def aggregate_outputs(self, runtime=None, needed_outputs=None):

        outputs = self._outputs()

        outfile = os.path.join(os.getcwd(), 'stat_result.json')

        if runtime is None:
            try:
                stats = load_json(outfile)['stat']
            except IOError:
                return self.run().outputs
        else:
            stats = []
            for line in runtime.stdout.split('\n'):
                if line:
                    values = line.split()
                    if len(values) > 1:
                        stats.append([float(val) for val in values])
                    else:
                        stats.extend([float(val) for val in values])

            if len(stats) == 1:
                stats = stats[0]
            of = os.path.join(os.getcwd(), 'TS.1D')
            f = open(of, 'w')

            for st in stats:
                f.write(str(st) + '\n')
            f.close()
            save_json(outfile, dict(stat=of))
        outputs.stats = of

        return outputs
Exemplo n.º 7
0
    def aggregate_outputs(self, runtime=None, needed_outputs=None):

        outputs = self._outputs()

        outfile = os.path.join(os.getcwd(), 'stat_result.json')

        if runtime is None:
            try:
                min_val = load_json(outfile)['stat']
            except IOError:
                return self.run().outputs
        else:
            min_val = []
            for line in runtime.stdout.split('\n'):
                if line:
                    values = line.split()
                    if len(values) > 1:
                        min_val.append([float(val) for val in values])
                    else:
                        min_val.extend([float(val) for val in values])

            if len(min_val) == 1:
                min_val = min_val[0]
            save_json(outfile, dict(stat=min_val))
        outputs.min_val = min_val

        return outputs
Exemplo n.º 8
0
	def aggregate_outputs(self,runtime = None,  needed_outputs=None):

		outputs = self._outputs()

		outfile = os.path.join(os.getcwd(),'stat_result.json')

		if runtime is None:
			try:
				min_val = load_json(outfile)['stat']
			except IOError:
				return self.run().outputs
		else:
			min_val = []
			for line in runtime.stdout.split('\n'):
				if line:
					values = line.split()
					if len(values) > 1:
						min_val.append([float(val) for val in values])
					else:
						min_val.extend([float(val) for val in values])

			if len(min_val) == 1:
				min_val = min_val[0]
			save_json(outfile,dict(stat=min_val))
		outputs.min_val = min_val
		
		return outputs
Exemplo n.º 9
0
def create_report_json(dwi_corrected_file,
                       eddy_rms,
                       eddy_report,
                       color_fa_file,
                       anat_mask_file,
                       outlier_indices,
                       eddy_qc_file,
                       outpath=op.abspath('./report.json')):

    report = {}
    report['dwi_corrected'] = createSprite4D(dwi_corrected_file)

    b0, colorFA, mask = createB0_ColorFA_Mask_Sprites(dwi_corrected_file,
                                                      color_fa_file,
                                                      anat_mask_file)
    report['b0'] = b0
    report['colorFA'] = colorFA
    report['anat_mask'] = mask
    report['outlier_volumes'] = outlier_indices.tolist()

    with open(eddy_report, 'r') as f:
        report['eddy_report'] = f.readlines()

    report['eddy_params'] = np.genfromtxt(eddy_rms).tolist()
    eddy_qc = load_json(eddy_qc_file)
    report['eddy_quad'] = eddy_qc
    save_json(outpath, report)
    return outpath
Exemplo n.º 10
0
def get_data(globStr):
    from nipype.utils.filemanip import load_json, save_json
    from glob import glob
    import os
    mse_folders = sorted(glob("/data/henry7/PBR/subjects/{}".format(globStr)))

    #status_file = sorted(glob("/data/henry7/PBR/subjects/mse{}/nii/status.json".format(globStr))) #static is in root
    output_data = []
    for i, foo in enumerate(mse_folders):
        #status = load_json(foo)
        mseid = foo.split('/')[-1]
        print(mseid)
        nii_folders = sorted(glob("/data/henry7/PBR/subjects/{}/nii/status.json".format(mseid)))
        dcm_folders = sorted(glob("/working/henry_temp/PBR_dicoms/{}".format(mseid)))
        #msid = status["t1_files"][0].split('/')[-1].split('-')[0]
        if len(nii_folders) == 0:
            nii = False
        else:
            nii = True
            nii_status = load_json(os.path.join("/data/henry7/PBR/subjects/", mseid, "nii/status.json"))
            dti_count = len(nii_status["dti_files"]) # nested list, why?
            flair_count = len(nii_status["flair_files"])
            gad_count = len(nii_status["gad_files"])
            mt_count = len(nii_status["mt_files"])
            noddi_count = len(nii_status["noddi_files"])

            if "psir_files" in nii_status:
                psir_count = len(nii_status["psir_files"])
            else:
                psir_count = 0
            rsfmri_files = len(nii_status["rsfmri_files"])
            t1_count = len(nii_status["t1_files"])
            t2_count = len(nii_status["t2_files"])

        if len(dcm_folders) == 0:
            dcm = False
        else:
            dcm = True
        output_data.append({"foo": foo,
                            "mse": mseid,
                            #"msid": msid,
                            "nii_folder": nii,
                            "dicom_folder": dcm
                            #"part": None
                            })
        if nii is True:
            output_data[-1]["dti_files"] = dti_count
            output_data[-1]["flair_files"] = flair_count
            output_data[-1]["gad_files"] = gad_count
            output_data[-1]["mt_files"] = mt_count
            output_data[-1]["noddi_files"] = noddi_count
            output_data[-1]["psir_files"] = psir_count
            output_data[-1]["rsfmri_files"] = rsfmri_files
            output_data[-1]["t1_files"] = t1_count
            output_data[-1]["t2_files"] = t2_count
            # output_data[-1]["test_mse"] = mseid

    save_json(os.path.join(os.path.realpath('.'), "status.json"), output_data)
    return output_data
Exemplo n.º 11
0
 def to_json(self):
     D = {}
     for key, ent in self.entries.iteritems():
         if isinstance(ent,Entry):
             D[key]= self.types[key](ent.get())
         else:
             D[key]=self.types[key](self.vars[key].get())
     save_json(self._name,D)    
Exemplo n.º 12
0
def test_json():
    # Simple roundtrip test of json files, just a sanity check.
    adict = dict(a='one', c='three', b='two')
    fd, name = mkstemp(suffix='.json')
    save_json(name, adict)  # save_json closes the file
    new_dict = load_json(name)
    os.unlink(name)
    yield assert_equal, sorted(adict.items()), sorted(new_dict.items())
Exemplo n.º 13
0
def test_json():
    # Simple roundtrip test of json files, just a sanity check.
    adict = dict(a='one', c='three', b='two')
    fd, name = mkstemp(suffix='.json')
    save_json(name, adict) # save_json closes the file
    new_dict = load_json(name)
    os.unlink(name)
    yield assert_equal, sorted(adict.items()), sorted(new_dict.items())
Exemplo n.º 14
0
def convert_dicoms(sid, dicom_dir_template, outputdir, queue=None, heuristic_func=None,
                   extension = None,embed=False,no_moco=False):

    import os
    from nipype.utils.filemanip import load_json,save_json
    from glob import glob
    sdir = dicom_dir_template%sid
    tdir = os.path.join(outputdir, sid)
    infofile =  os.path.join(tdir,'%s.auto.txt' % sid)
    editfile =  os.path.join(tdir,'%s.edit.txt' % sid)
    if os.path.exists(editfile) and heuristic_func:
        info = load_json(editfile)
    elif not heuristic_func:
        pass
    else:
        infofile =  os.path.join(tdir,'%s.auto.txt' % sid)
        info = heuristic_func(sdir, os.path.join(tdir,'dicominfo.txt'))
        save_json(infofile, info)

    if heuristic_func:
        for key in info:
            if not os.path.exists(os.path.join(tdir,key)):
                os.mkdir(os.path.join(tdir,key))
            for idx, ext in enumerate(info[key]):
                convertcmd = ['dcmstack', sdir,'--dest-dir', os.path.join(tdir,key),
                              '--file-ext', '*-%d-*'%ext, '--force-read', '-v', '--output-name', key+'%03d'%(idx+1)]
                if embed:
                    convertcmd.append('--embed-meta')
                convertcmd = ' '.join(convertcmd)
                print convertcmd
                os.system(convertcmd)
    else:
        if not no_moco:
            convertcmd = ['dcmstack', sdir, '--dest-dir', os.path.join(outputdir,sid),
                          '--force-read','-v']
            if embed:
                convertcmd.append('--embed-meta')
            convertcmd = ' '.join(convertcmd)
            print convertcmd
            os.system(convertcmd)
        else:
            import numpy as np
            from bips.workflows.workflow19 import isMoco
            foo = np.genfromtxt(os.path.join(tdir,'dicominfo.txt'),dtype=str)
            for f in foo:
                if not isMoco(glob(os.path.join(sdir,f[1]))[0]):
                    convertcmd = ['dcmstack', sdir, '--dest-dir', os.path.join(outputdir,sid),
                          '--force-read','-v','--file-ext','*-%s-*'%f[2]]
                    if embed:
                        convertcmd.append('--embed-meta')
                    convertcmd = ' '.join(convertcmd)
                    print convertcmd
                    os.system(convertcmd)
                else:
                    print "skipping moco run %s"%f[1]
    return 1
Exemplo n.º 15
0
def save_config(config,path=os.path.abspath('config.json')):
    data = config.get()
    d = {}
    for key, item in data.iteritems():
        if isinstance(item,Data):
            d[key] = item.get_fields()
        else:
            d[key] = item
    save_json(filename=path,data=d)
    return path
Exemplo n.º 16
0
def csv_to_obj(obj_file, csv_file):
    from nipype.utils.filemanip import load_json, save_json, fname_presuffix
    import os
    import pandas as pd

    out_obj = fname_presuffix(csv_file, newpath = os.path.abspath("."), suffix="_xfm", use_ext=False)+".json"
    foo = load_json(obj_file)
    df = pd.read_csv(csv_file)
    df["y"] = -1*df.y
    df["x"] = -1*df.x
    #print(df.shape)#.values[:,:,:,0].tolist()
    foo["vertices"] = df.values[:,:2].tolist()
    save_json(out_obj, foo)
    return out_obj
Exemplo n.º 17
0
    def aggregate_outputs(self, runtime=None, needed_outputs=None):
        outputs = self._outputs()

        if runtime is None:
            try:
                out_info = load_json(self.inputs.out_file)[
                    self.inputs.json_attr][self.inputs.json_var]
            except IOError:
                return self.run().outputs
        else:
            out_info = []
            for line in runtime.stdout.split('\n'):
                if line:
                    values = line.split()

                    if self.inputs.json_type == 'float':
                        if len(values) > 1:
                            #out_info.append([float(val) for val in values])
                            out_info.append([val for val in values])
                        else:
                            #out_info.extend([float(val) for val in values])
                            out_info.extend([val for val in values])

                    elif self.inputs.json_type == 'integer':
                        if len(values) > 1:
                            #out_info.append([int(val) for val in values])
                            out_info.append([val for val in values])
                        else:
                            #out_info.extend([int(val) for val in values])
                            out_info.extend([val for val in values])

                    else:
                        if len(values) > 1:
                            out_info.append([val for val in values])
                        else:
                            out_info.extend([val for val in values])

            if len(out_info) == 1:
                out_info = out_info[0]
            if os.path.exists(self.inputs.out_file):
                update_minchd_json(self.inputs.out_file, out_info,
                                   self.inputs.json_var, self.inputs.json_attr)
            else:
                save_json(
                    self.inputs.out_file,
                    dict(((self.inputs.json_var,
                           dict(((self.inputs.json_attr, out_info), ))), )))

        outputs.out_file = out_info
        return outputs
Exemplo n.º 18
0
 def _save_to_file(self):
     """Save `self.Configuration_File` to the file `self.filedir`+`self.filename`."""
     path = os.path.join(self.filedir, self.filename)
     #f = open(path, 'w')
     #f.write(self.value + '\n')
     #f.close()
     data = self._config.get()
     d = {}
     for key, item in data.iteritems():
         if isinstance(item,Data):
             d[key] = item.get_fields()
         else:
             d[key] = item
     save_json(filename=path,data=d)
     self.saved = True
def create_mindcontrol_entries(output_dir, subject, stats):
    import os
    from nipype.utils.filemanip import save_json

    cortical_wm = "CerebralWhiteMatterVol"  # for later FS version
    if not stats.get(cortical_wm):
        cortical_wm = "CorticalWhiteMatterVol"
        if not stats.get(cortical_wm):
            cortical_wm = "CorticalWhiteMatter"

    metric_split = {
        "brainmask": ["eTIV", "CortexVol", "TotalGrayVol"],
        "wm": [
            cortical_wm, "WM-hypointensities", "Right-WM-hypointensities",
            "Left-WM-hypointensities"
        ],
        "aparcaseg": []
    }

    volumes = {
        'aparcaseg': ['T1.nii.gz', 'aparc+aseg.nii.gz'],
        'brainmask': ['T1.nii.gz', 'brainmask.nii.gz'],
        'wm': ['T1.nii.gz', 'ribbon.nii.gz', 'wm.nii.gz']
    }

    all_entries = []

    for idx, entry_type in enumerate(["brainmask", "wm", "aparcaseg"]):
        entry = {
            "entry_type": entry_type,
            "subject_id": subject,
            "name": subject
        }
        volumes_list = [
            os.path.join(subject, 'mri', volume)
            for volume in volumes[entry_type]
        ]
        entry["check_masks"] = volumes_list
        entry["metrics"] = {}
        for metric_name in metric_split[entry_type]:
            entry["metrics"][metric_name] = stats.pop(metric_name)
        if not len(metric_split[entry_type]):
            entry["metrics"] = stats
        all_entries.append(entry)

    output_json = os.path.abspath("mindcontrol_entries.json")
    save_json(output_json, all_entries)
    return output_json
Exemplo n.º 20
0
 def _save_hashfile(self, hashfile, hashed_inputs):
     try:
         save_json(hashfile, hashed_inputs)
     except (IOError, TypeError):
         err_type = sys.exc_info()[0]
         if err_type is TypeError:
             # XXX - SG current workaround is to just
             # create the hashed file and not put anything
             # in it
             fd = open(hashfile,'wt')
             fd.writelines(str(hashed_inputs))
             fd.close()
             logger.warn('Unable to write a particular type to the json '\
                             'file')
         else:
             logger.critical('Unable to open the file in write mode: %s'% \
                                 hashfile)
Exemplo n.º 21
0
def run_workflow(msid):
    print("jim_substraction msID [-o <output directory>]")
    config = get_config()
    # msid = sys.argv[1]
    print("msID is: ", msid, "\n")
    """
    # This is not implemented so far
    #TODO
    if sys.argv.__len__() == 4:
        out_dir = sys.argv[3]
        print("Output directory is: ", out_dir)
    """

    status = load_json(
        os.path.join(config["output_directory"], msid, 't1Ants_reg_long',
                     'status.json'))
    fixed_list = status["fixed_image"]
    # warped_list = status["warped_brain"]
    warped_list = status["affined_brain"]
    mseIDs = status["mseIDs"]

    if len(fixed_list) + 1 != len(mseIDs) or len(warped_list) + 1 != len(
            mseIDs):
        raise NotImplementedError(
            "The script assuming the list is one dimension, please modify it")

    for i, fixed in enumerate(fixed_list):
        print(fixed, warped_list[i])
        wf = create_jim_workflow(config, fixed, warped_list[i])

        wf.config = {
            "execution": {
                "crashdump_dir":
                os.path.join(
                    config["crash_directory"],
                    os.path.split(fixed)[1][0:-7] + '-' +
                    os.path.split(warped_list[i])[1][0:-7], 'jim_substraction')
            }
        }
        wf.run()

    outputs = create_status(config, msid, mseIDs)
    save_json(
        os.path.join(config["james_output_dir"], msid, 'substraction',
                     'status.json'), outputs)
    return None
Exemplo n.º 22
0
 def aggregate_outputs(self, runtime=None):
     outputs = self._outputs()
     outfile = os.path.join(os.getcwd(), 'stat_result.json')
     if runtime is None:
         out_stat = load_json(outfile)['stat']
     else:
         out_stat = []
         for line in runtime.stdout.split('\n'):
             if line:
                 values = line.split()
                 if len(values)>1:
                     out_stat.append([float(val) for val in values])
                 else:
                     out_stat.extend([float(val) for val in values])
         if len(out_stat)==1:
             out_stat = out_stat[0]
         save_json(outfile, dict(stat=out_stat))
     outputs.out_stat = out_stat
     return outputs
Exemplo n.º 23
0
def safe_upload_to_deposition(data_file, title, description, creators):
    manifest = get_data_manifest_deposition()

    m = hashlib.md5()
    data = np.load(data_file)

    for value in data:
        m.update(str(value).encode("utf-8"))

    new_manifest_hash = m.hexdigest()
    new_manifest_data = {}

    #check filenames in manifest
    manifest_files = get_deposition_files(manifest["id"])
    manifest_filenames = [mf["filename"] for mf in manifest_files]

    assert new_manifest_hash + ".json" not in manifest_filenames, "This data already exists!"

    # upload the file to a new deposition

    new_dep = create_empty_deposition()
    metadata = create_deposition_metadata(
        title, "dataset",
        description + " md5-data-hash: %s" % new_manifest_hash, creators)
    update_deposition_metadata(new_dep["id"], metadata)
    print("Going to upload large file ...")
    upload_large_file_to_deposition(new_dep["links"]["bucket"], data_file)

    # record that deposition to the manifest data

    new_manifest_data["metadata"] = metadata
    new_manifest_data["md5_hash"] = new_manifest_hash
    new_manifest_data["deposition"] = new_dep
    manifest_save_dir = tempfile.mkdtemp()
    manifest_filename = op.join(manifest_save_dir, new_manifest_hash + ".json")
    save_json(manifest_filename, new_manifest_data)
    print("Going to update the data manifest ...")
    # save that to the manifest deposition
    upload_file_to_deposition(manifest["id"], manifest_filename)
    print("Done with all uploads")
    return new_dep
Exemplo n.º 24
0
def parse_dcm_dir(dcmdir, outfile=os.path.abspath('dicominfo.json')):
    import dicom
    from glob import glob
    from nipype.utils.filemanip import save_json

    # grab all dicoms in the dir
    files = glob(os.path.join(dcmdir, '*.dcm'))

    # initialize a dict that will be a summary in json format.

    info = {}

    # info's keys are 'SeriesNumber_ProtocolName' and PatientName
    # items are dicts with keys "dicoms": list of dicoms
    # "TE" and "TR" floats
    # if for some reason there is a mismatch, raise error for now
    for d in files:
        sortdcm(d, info)

    save_json(outfile, info)
    return outfile
Exemplo n.º 25
0
def parse_dcm_dir(dcmdir,outfile=os.path.abspath('dicominfo.json')):
    import dicom
    from glob import glob
    from nipype.utils.filemanip import save_json

    # grab all dicoms in the dir
    files = glob(os.path.join(dcmdir,'*.dcm'))

    # initialize a dict that will be a summary in json format.
    
    info = {}

    # info's keys are 'SeriesNumber_ProtocolName' and PatientName
    # items are dicts with keys "dicoms": list of dicoms
    # "TE" and "TR" floats
    # if for some reason there is a mismatch, raise error for now
    for d in files: 
        sortdcm(d,info)

    save_json(outfile,info)
    return outfile    
Exemplo n.º 26
0
def convert_dicoms(sid, dicom_dir_template, outputdir, queue=None, heuristic_func=None,
                   extension = None,embed=False):

    import os
    from nipype.utils.filemanip import load_json,save_json

    sdir = dicom_dir_template%sid
    tdir = os.path.join(outputdir, sid)
    infofile =  os.path.join(tdir,'%s.auto.txt' % sid)
    editfile =  os.path.join(tdir,'%s.edit.txt' % sid)
    if os.path.exists(editfile) and heuristic_func:
        info = load_json(editfile)
    elif not heuristic_func:
        pass
    else:
        infofile =  os.path.join(tdir,'%s.auto.txt' % sid)
        info = heuristic_func(sdir, os.path.join(tdir,'dicominfo.txt'))
        save_json(infofile, info)

    if heuristic_func:
        for key in info:
            if not os.path.exists(os.path.join(tdir,key)):
                os.mkdir(os.path.join(tdir,key))
            for idx, ext in enumerate(info[key]):
                convertcmd = ['dcmstack', sdir,'--dest-dir', os.path.join(tdir,key),
                              '--file-ext', '*-%d-*'%ext, '--force-read', '-v', '--output-name', key+'%03d'%(idx+1)]
                if embed:
                    convertcmd.append('--embed-meta')
                convertcmd = ' '.join(convertcmd)
                print convertcmd
                os.system(convertcmd)
    else:
        convertcmd = ['dcmstack', sdir, '--dest-dir', os.path.join(outputdir,sid),
                      '--force-read','-v']
        if embed:
            convertcmd.append('--embed-meta')
        convertcmd = ' '.join(convertcmd)
        print convertcmd
        os.system(convertcmd)
    return 1
Exemplo n.º 27
0
 def aggregate_outputs(self, runtime=None, needed_outputs=None):
     outputs = self._outputs()
     # local caching for backward compatibility
     outfile = os.path.join(os.getcwd(), 'stat_result.json')
     if runtime is None:
         try:
             out_stat = load_json(outfile)['stat']
         except IOError:
             return self.run().outputs
     else:
         out_stat = []
         for line in runtime.stdout.split('\n'):
             if line:
                 values = line.split()
                 if len(values) > 1:
                     out_stat.append([float(val) for val in values])
                 else:
                     out_stat.extend([float(val) for val in values])
         if len(out_stat) == 1:
             out_stat = out_stat[0]
         save_json(outfile, dict(stat=out_stat))
     outputs.out_stat = out_stat
     return outputs
Exemplo n.º 28
0
 def aggregate_outputs(self, runtime=None, needed_outputs=None):
     outputs = self._outputs()
     # local caching for backward compatibility
     outfile = os.path.join(os.getcwd(), 'stat_result.json')
     if runtime is None:
         try:
             out_stat = load_json(outfile)['stat']
         except IOError:
             return self.run().outputs
     else:
         out_stat = []
         for line in runtime.stdout.split('\n'):
             if line:
                 values = line.split()
                 if len(values) > 1:
                     out_stat.append([float(val) for val in values])
                 else:
                     out_stat.extend([float(val) for val in values])
         if len(out_stat) == 1:
             out_stat = out_stat[0]
         save_json(outfile, dict(stat=out_stat))
     outputs.out_stat = out_stat
     return outputs
Exemplo n.º 29
0
def run_workflow(bids_dir):
    subjects_dir = os.path.join(bids_dir, "derivatives", "freesurfer")
    mindcontrol_base_dir = os.path.join(bids_dir, "derivatives", "mindcontrol_freesurfer")
    mindcontrol_outdir = mindcontrol_base_dir
    workflow_working_dir = os.path.join(mindcontrol_base_dir, "scratch"+"_"+str(uuid.uuid4()))

    subject_paths = glob(op.join(subjects_dir, "*"))

    subjects = []
    for path in subject_paths:
        subject = path.split('/')[-1]
        # check if mri dir exists, and don't add fsaverage
        if op.exists(op.join(path, 'mri')) and subject != 'fsaverage':
            subjects.append(subject)


    input_node = Node(IdentityInterface(fields=['subject_id',"subjects_dir",
                                            "mindcontrol_base_dir", "output_dir"]), name='inputnode')
    input_node.iterables=("subject_id", subjects)
    input_node.inputs.subjects_dir = subjects_dir
    input_node.inputs.mindcontrol_base_dir = mindcontrol_base_dir #this is where start_static_server is running
    input_node.inputs.output_dir = mindcontrol_outdir #this is in the freesurfer/ directory under the base_dir

    dg_node=Node(Function(input_names=["subjects_dir", "subject", "volumes"],
                          output_names=["volume_paths"], 
                          function=data_grabber), 
                 name="datagrab")
    #dg_node.inputs.subjects_dir = subjects_dir
    dg_node.inputs.volumes = volumes


    mriconvert_node = MapNode(MRIConvert(out_type="niigz"), 
                              iterfield=["in_file"], 
                              name='convert')

    get_stats_node = Node(Function(input_names=["subjects_dir", "subject"],
                                   output_names = ["output_dict"],
                                   function=parse_stats), name="get_freesurfer_stats")

    write_mindcontrol_entries = Node(Function(input_names = ["mindcontrol_base_dir",
                                                             "output_dir",
                                                             "subject",
                                                             "stats"],
                                              output_names=["output_json"],
                                              function=create_mindcontrol_entries), 
                                     name="get_mindcontrol_entries")

    datasink_node = Node(DataSink(),
                         name='datasink')
    subst = [('out_file',''),('_subject_id_',''),('_out','')]  + [("_convert%d" % index, "") for index in range(len(volumes))] 
    datasink_node.inputs.substitutions = subst

    wf = Workflow(name="MindPrepFS")
    wf.base_dir = workflow_working_dir
    wf.connect(input_node,"subject_id", dg_node,"subject")
    wf.connect(input_node,"subjects_dir", dg_node, "subjects_dir")
    wf.connect(input_node, "subject_id", get_stats_node, "subject")
    wf.connect(input_node, "subjects_dir", get_stats_node, "subjects_dir")
    wf.connect(input_node, "subject_id", write_mindcontrol_entries, "subject")
    wf.connect(input_node, "mindcontrol_base_dir", write_mindcontrol_entries, "mindcontrol_base_dir")
    wf.connect(input_node, "output_dir", write_mindcontrol_entries, "output_dir")
    wf.connect(get_stats_node, "output_dict", write_mindcontrol_entries, "stats")
    wf.connect(input_node, "output_dir", datasink_node, "base_directory")
    wf.connect(dg_node,"volume_paths", mriconvert_node, "in_file")
    wf.connect(mriconvert_node,'out_file',datasink_node,'out_file')
    wf.connect(write_mindcontrol_entries, "output_json", datasink_node, "out_file.@json")
    wf.run()

    shutil.rmtree(workflow_working_dir)
    from nipype.utils.filemanip import load_json, save_json

    files = glob(os.path.join(mindcontrol_base_dir, "*", "mindcontrol_entries.json"))
    output = []
    for f in files:
        output += load_json(f)
    save_json(os.path.join(mindcontrol_base_dir, "all_entries.json"), output)
Exemplo n.º 30
0
def convert_dicoms(sid, dicom_dir_template, outputdir, queue=None, heuristic_func=None,
                   extension = None,embed=False,no_moco=False):

    import os
    from nipype.utils.filemanip import load_json,save_json
    from glob import glob
    sdir = dicom_dir_template%sid
    tdir = os.path.join(outputdir, sid)
    infofile =  os.path.join(tdir,'%s.auto.txt' % sid)
    editfile =  os.path.join(tdir,'%s.edit.txt' % sid)
    if os.path.exists(editfile) and heuristic_func:
        info = load_json(editfile)
    elif not heuristic_func:
        pass
    else:
        infofile =  os.path.join(tdir,'%s.auto.txt' % sid)
        info = heuristic_func(sdir, os.path.join(tdir,'dicominfo.txt'))
        save_json(infofile, info)

    if heuristic_func:
        import dicom
        import dcmstack
        for key in info:
            if not os.path.exists(os.path.join(tdir,key)):
                os.mkdir(os.path.join(tdir,key))
            for idx, ext in enumerate(info[key]):
                src = glob(os.path.join(sdir,'*-%d-*'%ext))
                print key
                dcm = dcmstack.DicomStack()
                added_success = True
                for f in src:
                    try:
                        dcm.add_dcm(dicom.read_file(f,force=True))
                        
                    except:
                        added_success = False
                        print "error adding %s to stack"%f
                if added_success:
                    a = dcm.to_nifti(embed_meta = embed)
                    a.to_filename(os.path.join(tdir,key,key+'%03d'%(idx+1)))
                #convertcmd = ['dcmstack', sdir,'--dest-dir', os.path.join(tdir,key),
                #              '--file-ext', '*-%d-*'%ext, '--force-read', '-v', '--output-name', key+'%03d'%(idx+1)]
                #if embed:
                #    convertcmd.append('--embed-meta')
                #convertcmd = ' '.join(convertcmd)
                #print convertcmd
                #os.system(convertcmd)
    else:
        if not no_moco:
            convertcmd = ['dcmstack', sdir, '--dest-dir', os.path.join(outputdir,sid),
                          '--force-read','-v']
            if embed:
                convertcmd.append('--embed-meta')
            convertcmd = ' '.join(convertcmd)
            print convertcmd
            os.system(convertcmd)
        else:
            import numpy as np
            def isMoco(dcmfile):
                """Determine if a dicom file is a mocoseries
                """
                import subprocess
                print dcmfile
                cmd = ['mri_probedicom', '--i', dcmfile, '--t', '8', '103e']
                proc  = subprocess.Popen(cmd,
                                         stdout=subprocess.PIPE,
                                         stderr=subprocess.PIPE)
                stdout, stderr = proc.communicate()
                return stdout.strip().startswith('MoCoSeries')

            foo = np.genfromtxt(os.path.join(tdir,'dicominfo.txt'),dtype=str)
            for f in foo:
                if not isMoco(glob(os.path.join(sdir,f[1]))[0]):
                    convertcmd = ['dcmstack', sdir, '--dest-dir', os.path.join(outputdir,sid),
                          '--force-read','-v','--file-ext','*-%s-*'%f[2]]
                    if embed:
                        convertcmd.append('--embed-meta')
                    convertcmd = ' '.join(convertcmd)
                    print convertcmd
                    os.system(convertcmd)
                else:
                    print "skipping moco run %s"%f[1]
    return 1
Exemplo n.º 31
0
        if len(status['t1_files']) == 0:
            t1_check['missing'][row['msid']].append(row['mse'])
        elif len(status['t1_files']) > 1:
            t1_check['more_than_one'][row['msid']].append(row['mse'])

    for ms in msid_sorted:
        if t1_check['missing'].get(ms) == []:
            t1_check['missing'].pop(ms, None)
        if t1_check['more_than_one'].get(ms) == []:
            t1_check['more_than_one'].pop(ms, None)
        if t1_check['no_alignment'].get(ms) == []:
            t1_check['no_alignment'].pop(ms, None)

    outdir = '/data/henry7/james/antje_cohort'
    save_json(os.path.join(outdir, 'T1_status.json'), t1_check)

    msid_for_t2flair = list(set(msid_sorted) - set(no_alignment))
    print(msid_for_t2flair)
    t2flair_check = {}
    t2flair_check['missing'] = {}
    t2flair_check['more_than_one'] = {}

    for ms in msid_for_t2flair:
        t2flair_check['missing'][ms] = []
        t2flair_check['more_than_one'][ms] = []

    for index, row in df.iterrows():
        print(index, row['msid'], row['mse'])
        status_dir = os.path.join(config["output_directory"], row['mse'],
                                  'alignment', 'status.json')
Exemplo n.º 32
0
def plot_connectogram(conn_matrix, conn_model, atlas_name, dir_path, ID,
                      NETWORK, label_names):
    import json
    from pynets.thresholding import normalize
    from pathlib import Path
    from random import sample
    from string import ascii_uppercase, ascii_lowercase
    link_comm = True

    conn_matrix = normalize(conn_matrix)
    G = nx.from_numpy_matrix(conn_matrix)

    def doClust(X, clust_levels):
        ##get the linkage diagram
        Z = linkage(
            X,
            'ward',
        )
        ##choose # cluster levels
        cluster_levels = range(1, int(clust_levels))
        ##init array to store labels for each level
        clust_levels_tmp = int(clust_levels) - 1
        label_arr = np.zeros((int(clust_levels_tmp), int(X.shape[0])))
        ##iterate thru levels
        for c in cluster_levels:
            fl = fcluster(Z, c, criterion='maxclust')
            #print(fl)
            label_arr[c - 1, :] = fl
        return label_arr, clust_levels_tmp

    if NETWORK is not None:
        clust_levels = 3
        [label_arr, clust_levels_tmp] = doClust(conn_matrix, clust_levels)
    else:
        if link_comm == True:
            from pynets.netstats import link_communities
            #G_lin = nx.line_graph(G)
            ##Plot link communities
            node_comm_aff_mat = link_communities(conn_matrix,
                                                 type_clustering='single')
            clust_levels = len(node_comm_aff_mat)
            clust_levels_tmp = int(clust_levels) - 1
            mask_mat = np.squeeze(
                np.array([node_comm_aff_mat == 0]).astype('int'))
            label_arr = node_comm_aff_mat * np.expand_dims(
                np.arange(1, clust_levels + 1), axis=1) + mask_mat
        #else:
        ##Plot node communities
        #from pynets.netstats import community_louvain
        #[ci, q] = community_louvain(conn_matrix, gamma=0.75)
        #clust_levels = len(np.unique(ci))
        #clust_levels_tmp = int(clust_levels) - 1

    def get_node_label(node_idx, labels, clust_levels_tmp):
        def get_letters(n, random=False, uppercase=False):
            """Return n letters of the alphabet."""
            letters = (ascii_uppercase if uppercase else ascii_lowercase)
            return json.dumps(
                (sample(letters, n) if random else list(letters[:n])))

        abet = get_letters(clust_levels_tmp)
        node_labels = labels[:, node_idx]
        return ".".join([
            "{}{}".format(abet[i], int(l)) for i, l in enumerate(node_labels)
        ]) + ".{}".format(label_names[node_idx])

    output = []
    for node_idx, connections in enumerate(G.adjacency_list()):
        weight_vec = []
        for i in connections:
            wei = G.get_edge_data(node_idx, int(i))['weight']
            #wei = G_lin.get_edge_data(node_idx,int(i))['weight']
            weight_vec.append(wei)
        entry = {}
        nodes_label = get_node_label(node_idx, label_arr, clust_levels_tmp)
        entry["name"] = nodes_label
        entry["size"] = len(connections)
        entry["imports"] = [
            get_node_label(int(d) - 1, label_arr, clust_levels_tmp)
            for d in connections
        ]
        entry["weights"] = weight_vec
        output.append(entry)

    if NETWORK != None:
        json_file_name = str(
            ID
        ) + '_' + NETWORK + '_connectogram_' + conn_model + '_network.json'
        connectogram_plot = dir_path + '/' + json_file_name
        connectogram_js_sub = dir_path + '/' + str(
            ID) + '_' + NETWORK + '_connectogram_' + conn_model + '_network.js'
        connectogram_js_name = str(
            ID) + '_' + NETWORK + '_connectogram_' + conn_model + '_network.js'
    else:
        json_file_name = str(ID) + '_connectogram_' + conn_model + '.json'
        connectogram_plot = dir_path + '/' + json_file_name
        connectogram_js_sub = dir_path + '/' + str(
            ID) + '_connectogram_' + conn_model + '.js'
        connectogram_js_name = str(ID) + '_connectogram_' + conn_model + '.js'
    save_json(connectogram_plot, output)

    ##Copy index.html and json to dir_path
    #conn_js_path = '/Users/PSYC-dap3463/Applications/PyNets/pynets/connectogram.js'
    #index_html_path = '/Users/PSYC-dap3463/Applications/PyNets/pynets/index.html'
    conn_js_path = Path(__file__).parent / "connectogram.js"
    index_html_path = Path(__file__).parent / "index.html"
    replacements_html = {'connectogram.js': str(connectogram_js_name)}
    with open(index_html_path) as infile, open(str(dir_path + '/index.html'),
                                               'w') as outfile:
        for line in infile:
            for src, target in replacements_html.items():
                line = line.replace(src, target)
            outfile.write(line)
    replacements_js = {'template.json': str(json_file_name)}
    with open(conn_js_path) as infile, open(connectogram_js_sub,
                                            'w') as outfile:
        for line in infile:
            for src, target in replacements_js.items():
                line = line.replace(src, target)
            outfile.write(line)
Exemplo n.º 33
0
def plot_connectogram(conn_matrix, conn_model, atlas_select, dir_path, ID,
                      network, label_names):
    import json
    from pathlib import Path
    from networkx.readwrite import json_graph
    from pynets.thresholding import normalize
    from pynets.netstats import most_important
    from scipy.cluster.hierarchy import linkage, fcluster
    from nipype.utils.filemanip import save_json

    # Advanced Settings
    comm = 'nodes'
    pruned = False
    #color_scheme = 'interpolateCool'
    #color_scheme = 'interpolateGnBu'
    #color_scheme = 'interpolateOrRd'
    #color_scheme = 'interpolatePuRd'
    #color_scheme = 'interpolateYlOrRd'
    #color_scheme = 'interpolateReds'
    #color_scheme = 'interpolateGreens'
    color_scheme = 'interpolateBlues'
    # Advanced Settings

    conn_matrix = normalize(conn_matrix)
    G = nx.from_numpy_matrix(conn_matrix)
    if pruned is True:
        [G, pruned_nodes] = most_important(G)
        conn_matrix = nx.to_numpy_array(G)

        pruned_nodes.sort(reverse=True)
        for j in pruned_nodes:
            del label_names[label_names.index(label_names[j])]

    def doClust(X, clust_levels):
        # get the linkage diagram
        Z = linkage(X, 'ward')
        # choose # cluster levels
        cluster_levels = range(1, int(clust_levels))
        # init array to store labels for each level
        clust_levels_tmp = int(clust_levels) - 1
        label_arr = np.zeros((int(clust_levels_tmp), int(X.shape[0])))
        # iterate thru levels
        for c in cluster_levels:
            fl = fcluster(Z, c, criterion='maxclust')
            #print(fl)
            label_arr[c - 1, :] = fl
        return label_arr, clust_levels_tmp

    if comm == 'nodes' and len(conn_matrix) > 40:
        from pynets.netstats import modularity_louvain_und_sign

        gamma = nx.density(nx.from_numpy_array(conn_matrix))
        try:
            [node_comm_aff_mat,
             q] = modularity_louvain_und_sign(conn_matrix, gamma=float(gamma))
            print("%s%s%s%s%s" %
                  ('Found ', str(len(np.unique(node_comm_aff_mat))),
                   ' communities with γ=', str(gamma), '...'))
        except:
            print(
                'WARNING: Louvain community detection failed. Proceeding with single community affiliation vector...'
            )
            node_comm_aff_mat = np.ones(conn_matrix.shape[0]).astype('int')
        clust_levels = len(node_comm_aff_mat)
        clust_levels_tmp = int(clust_levels) - 1
        mask_mat = np.squeeze(np.array([node_comm_aff_mat == 0]).astype('int'))
        label_arr = node_comm_aff_mat * np.expand_dims(
            np.arange(1, clust_levels + 1), axis=1) + mask_mat
    elif comm == 'links' and len(conn_matrix) > 40:
        from pynets.netstats import link_communities
        # Plot link communities
        link_comm_aff_mat = link_communities(conn_matrix,
                                             type_clustering='single')
        print("%s%s%s" %
              ('Found ', str(len(link_comm_aff_mat)), ' communities...'))
        clust_levels = len(link_comm_aff_mat)
        clust_levels_tmp = int(clust_levels) - 1
        mask_mat = np.squeeze(np.array([link_comm_aff_mat == 0]).astype('int'))
        label_arr = link_comm_aff_mat * np.expand_dims(
            np.arange(1, clust_levels + 1), axis=1) + mask_mat
    elif len(conn_matrix) > 20:
        print(
            'Graph too small for reliable plotting of communities. Plotting by fcluster instead...'
        )
        if len(conn_matrix) >= 250:
            clust_levels = 7
        elif len(conn_matrix) >= 200:
            clust_levels = 6
        elif len(conn_matrix) >= 150:
            clust_levels = 5
        elif len(conn_matrix) >= 100:
            clust_levels = 4
        elif len(conn_matrix) >= 50:
            clust_levels = 3
        else:
            clust_levels = 2
        [label_arr, clust_levels_tmp] = doClust(conn_matrix, clust_levels)

    def get_node_label(node_idx, labels, clust_levels_tmp):
        from collections import OrderedDict

        def write_roman(num):
            roman = OrderedDict()
            roman[1000] = "M"
            roman[900] = "CM"
            roman[500] = "D"
            roman[400] = "CD"
            roman[100] = "C"
            roman[90] = "XC"
            roman[50] = "L"
            roman[40] = "XL"
            roman[10] = "X"
            roman[9] = "IX"
            roman[5] = "V"
            roman[4] = "IV"
            roman[1] = "I"

            def roman_num(num):
                for r in roman.keys():
                    x, y = divmod(num, r)
                    yield roman[r] * x
                    num -= (r * x)
                    if num > 0:
                        roman_num(num)
                    else:
                        break

            return "".join([a for a in roman_num(num)])

        rn_list = []
        node_idx = node_idx - 1
        node_labels = labels[:, node_idx]
        for k in [int(l) for i, l in enumerate(node_labels)]:
            rn_list.append(json.dumps(write_roman(k)))
        abet = rn_list
        node_lab_alph = ".".join([
            "{}{}".format(abet[i], int(l)) for i, l in enumerate(node_labels)
        ]) + ".{}".format(label_names[node_idx])
        return node_lab_alph

    output = []

    adj_dict = {}
    for i in list(G.adjacency()):
        source = list(i)[0]
        target = list(list(i)[1])
        adj_dict[source] = target

    for node_idx, connections in adj_dict.items():
        weight_vec = []
        for i in connections:
            wei = G.get_edge_data(node_idx, int(i))['weight']
            weight_vec.append(wei)
        entry = {}
        nodes_label = get_node_label(node_idx, label_arr, clust_levels_tmp)
        entry["name"] = nodes_label
        entry["size"] = len(connections)
        entry["imports"] = [
            get_node_label(int(d) - 1, label_arr, clust_levels_tmp)
            for d in connections
        ]
        entry["weights"] = weight_vec
        output.append(entry)

    if network:
        json_file_name = "%s%s%s%s%s%s" % (str(ID), '_', network,
                                           '_connectogram_', conn_model,
                                           '_network.json')
        json_fdg_file_name = "%s%s%s%s%s%s" % (str(ID), '_', network, '_fdg_',
                                               conn_model, '_network.json')
        connectogram_plot = "%s%s%s" % (dir_path, '/', json_file_name)
        fdg_js_sub = "%s%s%s%s%s%s%s%s" % (dir_path, '/', str(ID), '_',
                                           network, '_fdg_', conn_model,
                                           '_network.js')
        fdg_js_sub_name = "%s%s%s%s%s%s" % (str(ID), '_', network, '_fdg_',
                                            conn_model, '_network.js')
        connectogram_js_sub = "%s%s%s%s%s%s%s%s" % (dir_path, '/', str(
            ID), '_', network, '_connectogram_', conn_model, '_network.js')
        connectogram_js_name = "%s%s%s%s%s%s" % (
            str(ID), '_', network, '_connectogram_', conn_model, '_network.js')
    else:
        json_file_name = "%s%s%s%s" % (str(ID), '_connectogram_', conn_model,
                                       '.json')
        json_fdg_file_name = "%s%s%s%s" % (str(ID), '_fdg_', conn_model,
                                           '.json')
        connectogram_plot = "%s%s%s" % (dir_path, '/', json_file_name)
        connectogram_js_sub = "%s%s%s%s%s%s" % (
            dir_path, '/', str(ID), '_connectogram_', conn_model, '.js')
        fdg_js_sub = "%s%s%s%s%s%s" % (dir_path, '/', str(ID), '_fdg_',
                                       conn_model, '.js')
        fdg_js_sub_name = "%s%s%s%s" % (str(ID), '_fdg_', conn_model, '.js')
        connectogram_js_name = "%s%s%s%s" % (str(ID), '_connectogram_',
                                             conn_model, '.js')
    save_json(connectogram_plot, output)

    # Force-directed graphing
    G = nx.from_numpy_matrix(np.round(conn_matrix.astype('float64'), 6))
    data = json_graph.node_link_data(G)
    data.pop('directed', None)
    data.pop('graph', None)
    data.pop('multigraph', None)
    for k in range(len(data['links'])):
        data['links'][k]['value'] = data['links'][k].pop('weight')
    for k in range(len(data['nodes'])):
        data['nodes'][k]['id'] = str(data['nodes'][k]['id'])
    for k in range(len(data['links'])):
        data['links'][k]['source'] = str(data['links'][k]['source'])
        data['links'][k]['target'] = str(data['links'][k]['target'])

    # Add community structure
    for k in range(len(data['nodes'])):
        data['nodes'][k]['group'] = str(label_arr[0][k])

    # Add node labels
    for k in range(len(data['nodes'])):
        data['nodes'][k]['name'] = str(label_names[k])

    out_file = "%s%s%s" % (dir_path, '/', str(json_fdg_file_name))
    save_json(out_file, data)

    # Copy index.html and json to dir_path
    #conn_js_path = '/Users/PSYC-dap3463/Applications/PyNets/pynets/connectogram.js'
    #index_html_path = '/Users/PSYC-dap3463/Applications/PyNets/pynets/index.html'
    conn_js_path = str(Path(__file__).parent / "connectogram.js")
    index_html_path = str(Path(__file__).parent / "index.html")
    fdg_replacements_js = {"FD_graph.json": str(json_fdg_file_name)}
    replacements_html = {
        'connectogram.js': str(connectogram_js_name),
        'fdg.js': str(fdg_js_sub_name)
    }
    fdg_js_path = str(Path(__file__).parent / "fdg.js")
    with open(index_html_path) as infile, open(str(dir_path + '/index.html'),
                                               'w') as outfile:
        for line in infile:
            for src, target in replacements_html.items():
                line = line.replace(src, target)
            outfile.write(line)

    replacements_js = {
        'template.json': str(json_file_name),
        'interpolateCool': str(color_scheme)
    }
    with open(conn_js_path) as infile, open(connectogram_js_sub,
                                            'w') as outfile:
        for line in infile:
            for src, target in replacements_js.items():
                line = line.replace(src, target)
            outfile.write(line)

    with open(fdg_js_path) as infile, open(fdg_js_sub, 'w') as outfile:
        for line in infile:
            for src, target in fdg_replacements_js.items():
                line = line.replace(src, target)
            outfile.write(line)

    return
Exemplo n.º 34
0
def convert_dicoms(sid,
                   dicom_dir_template,
                   outputdir,
                   queue=None,
                   heuristic_func=None,
                   extension=None,
                   embed=False,
                   no_moco=False):

    import os
    from nipype.utils.filemanip import load_json, save_json

    sdir = dicom_dir_template % sid
    tdir = os.path.join(outputdir, sid)
    infofile = os.path.join(tdir, '%s.auto.txt' % sid)
    editfile = os.path.join(tdir, '%s.edit.txt' % sid)
    if os.path.exists(editfile) and heuristic_func:
        info = load_json(editfile)
    elif not heuristic_func:
        pass
    else:
        infofile = os.path.join(tdir, '%s.auto.txt' % sid)
        info = heuristic_func(sdir, os.path.join(tdir, 'dicominfo.txt'))
        save_json(infofile, info)

    if heuristic_func:
        for key in info:
            if not os.path.exists(os.path.join(tdir, key)):
                os.mkdir(os.path.join(tdir, key))
            for idx, ext in enumerate(info[key]):
                convertcmd = [
                    'dcmstack', sdir, '--dest-dir',
                    os.path.join(tdir, key), '--file-ext',
                    '*-%d-*' % ext, '--force-read', '-v', '--output-name',
                    key + '%03d' % (idx + 1)
                ]
                if embed:
                    convertcmd.append('--embed-meta')
                convertcmd = ' '.join(convertcmd)
                print convertcmd
                os.system(convertcmd)
    else:
        if not no_moco:
            convertcmd = [
                'dcmstack', sdir, '--dest-dir',
                os.path.join(outputdir, sid), '--force-read', '-v'
            ]
            if embed:
                convertcmd.append('--embed-meta')
            convertcmd = ' '.join(convertcmd)
            print convertcmd
            os.system(convertcmd)
        else:
            import numpy as np
            from bips.workflows.workflow19 import isMoco
            foo = np.genfromtxt(os.path.join(tdir, 'dicominfo.txt'), dtype=str)
            for f in foo:
                if not isMoco(os.path.join(sdir, f[1])):
                    convertcmd = [
                        'dcmstack', sdir, '--dest-dir',
                        os.path.join(outputdir, sid), '--force-read', '-v',
                        '--file-ext',
                        '*-%s-*' % f[2]
                    ]
                    if embed:
                        convertcmd.append('--embed-meta')
                    convertcmd = ' '.join(convertcmd)
                    print convertcmd
                    os.system(convertcmd)
    return 1
                    tmp.set(**f)
                    foo.append(tmp)
                self.set(**{key:foo})

    def _check_fired(self):
        dg = self.create_dataflow()
        dg.run()

if __name__ == "__main__":    
    a = Data(['func','struct'])
    a.fields = []
    subs = DataBase()
    subs.name = 'subjects'
    subs.values = ['sub01','sub02','sub03']
    subs.iterable = True
    a.fields.append(subs)
    a.template_args = {"func":[["subjects"]], "struct":[["subjects"]]}
    a.base_directory = os.path.abspath('.')
    #a.configure_traits()
    dg = a.create_dataflow()
    d = a.get_fields()
    from nipype.utils.filemanip import save_json
    from .bips.workflows.base import load_json
    save_json("test.json",d)
    foo = load_json("test.json")
    b = Data()
    b.set_fields(foo)
    b.configure_traits()
    

Exemplo n.º 36
0
class JSONSink(IOBase):
    """ ReportSink module to write outputs to a pdf and save to json file

This interface allows arbitrary creation of input attributes. The names of 
these attributes define the Report structure to create for display of images,
tables, and filenames.

This interface **cannot** be used in a MapNode as the inputs are
defined only when the connect statement is executed.

If an input ends with a .png or .jpg, the image will be displayed in the report
If an input is a list enclosed in more than 2 brackets, 
a table will be displayed:
        ex: [[ [['Month','Day'],[7,10],[12,25]] ]] --> 3x2 table with 
        'Month' and 'Day' as column headers, 7,10 in the first row
        and 12,25 in the second
Anything else is displayed as text

Examples
--------

>>> rs = ReportSink()
>>> rs.inputs.base_directory = 'results_dir'
>>> rs.inputs.subject = 'Subject 5'
>>> rs.inputs.table = [[ [['Month','Day'],[7,10],[12,25]] ]]
>>> rs.inputs.image = 'structural.png'
>>> rs.run() # doctest: +SKIP

"""
    input_spec = JSONSinkInputSpec

    def __init__(self, orderfields=None, infields=None, **kwargs):
        """
Parameters
----------
infields : list of str
Indicates the input fields to be dynamically created
"""

        super(JSONSink, self).__init__(**kwargs)
        undefined_traits = {}
        # used for mandatory inputs check
        self._infields = infields
        if infields:
            for key in infields:
                self.inputs.add_trait(key, traits.Any)
                self.inputs._outputs[key] = Undefined
                undefined_traits[key] = Undefined

        self._orderfields = orderfields

        self.inputs.trait_set(trait_change_notify=False, **undefined_traits)

    def _substitute(self, pathstr):
        pathstr_ = pathstr
        if isdefined(self.inputs.substitutions):
            for key, val in self.inputs.substitutions:
                oldpathstr = pathstr
                pathstr = pathstr.replace(key, val)
                if pathstr != oldpathstr:
                    iflogger.debug('sub.str: %s -> %s using %r -> %r' %
                                   (oldpathstr, pathstr, key, val))
        if isdefined(self.inputs.regexp_substitutions):
            for key, val in self.inputs.regexp_substitutions:
                oldpathstr = pathstr
                pathstr, _ = re.subn(key, val, pathstr)
                if pathstr != oldpathstr:
                    iflogger.debug('sub.regexp: %s -> %s using %r -> %r' %
                                   (oldpathstr, pathstr, key, val))
        if pathstr_ != pathstr:
            iflogger.info('sub: %s -> %s' % (pathstr_, pathstr))
        return pathstr

    def _get_dst(self, src):
        path, fname = os.path.split(src)
        if self.inputs.parameterization:
            dst = path
            if isdefined(self.inputs.strip_dir):
                dst = dst.replace(self.inputs.strip_dir, '')

            if not isdefined(self.inputs.container):
                folders = [
                    folder for folder in dst.split(os.path.sep)
                    if folder.startswith('_')
                ]
            else:
                folders = [
                    folder for folder in dst.split(os.path.sep)
                    if (folder.startswith('_')
                        and not self.inputs.container in folder)
                ]

            dst = os.path.sep.join(folders).replace('_', '')

        else:
            if fname:
                dst = fname
            else:
                dst = path.split(os.path.sep)[-1]
        try:
            if dst[0] == os.path.sep(dst):
                dst = dst[1:]
        except:
            pass
        return dst

    def _list_outputs(self):
        """Execute this module.
"""
        outdir = self.inputs.base_directory
        if not isdefined(outdir):
            outdir = os.path.abspath('.')

        if isdefined(self.inputs.container):
            print "container defined", self.inputs.container
            outdir = os.path.join(outdir, self.inputs.container)
            print outdir

        cwd = os.getcwd()
        dst = self._get_dst(os.path.join(cwd, self.inputs.json_name + '.json'))
        print "dst = ", dst
        outdir = os.path.join(outdir, dst)
        print "new outdir = ", outdir
        outdir = self._substitute(outdir)
        print "substituted outdir = ", outdir

        if not os.path.exists(outdir):
            try:
                os.makedirs(outdir)
            except OSError, inst:
                if 'File exists' in inst:
                    pass
                else:
                    raise (inst)

        # Begin Report
        #rep = report(os.path.abspath(os.path.join(outdir,self.inputs.report_name+'.pdf')),self.inputs.report_name)

        # Loop through all inputs
        #for key, files in self.inputs._outputs.items():

        if self._orderfields:
            order = self._orderfields
        else:
            order = self.inputs._outputs_order

        for key in order:
            files = self.inputs._outputs[key]
            if not isdefined(files):
                continue
            iflogger.debug("key: %s files: %s" % (key, str(files)))
            files = filename_to_list(files)
            tempoutdir = outdir

        # save json
        outfile = os.path.join(outdir, self.inputs.json_name + '.json')
        #try:
        save_json(outfile, self.inputs._outputs)
        print "json file ", outfile
        return None
Exemplo n.º 37
0
    def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None):
        """
        Core routine for detecting outliers
        """
        if not cwd:
            cwd = os.getcwd()
        # read in motion parameters
        mc_in = np.loadtxt(motionfile)
        mc = deepcopy(mc_in)
        if self.inputs.parameter_source == 'SPM':
            pass
        elif self.inputs.parameter_source == 'FSL':
            mc = mc[:, [3, 4, 5, 0, 1, 2]]
        elif self.inputs.parameter_source == 'Siemens':
            Exception("Siemens PACE format not implemented yet")
        else:
            Exception("Unknown source for movement parameters")

        if self.inputs.use_norm:
            # calculate the norm of the motion parameters
            normval = self._calc_norm(mc, self.inputs.use_differences[0])
            tidx = find_indices(normval > self.inputs.norm_threshold)
            ridx = find_indices(normval < 0)
        else:
            if self.inputs.use_differences[0]:
                mc = np.concatenate((np.zeros((1, 6)), np.diff(mc_in, n=1, axis=0)), axis=0)
            traval = mc[:, 0:3]  # translation parameters (mm)
            rotval = mc[:, 3:6]  # rotation parameters (rad)
            tidx = find_indices(np.sum(abs(traval) > self.inputs.translation_threshold, 1) > 0)
            ridx = find_indices(np.sum(abs(rotval) > self.inputs.rotation_threshold, 1) > 0)

        # read in functional image
        if isinstance(imgfile, str):
            nim = load(imgfile)
        elif isinstance(imgfile, list):
            if len(imgfile) == 1:
                nim = load(imgfile[0])
            else:
                images = [load(f) for f in imgfile]
                nim = funcs.concat_images(images)

        # compute global intensity signal
        (x, y, z, timepoints) = nim.get_shape()

        data = nim.get_data()
        g = np.zeros((timepoints, 1))
        masktype = self.inputs.mask_type
        if  masktype == 'spm_global':  # spm_global like calculation
            intersect_mask = self.inputs.intersect_mask
            if intersect_mask:
                mask = np.ones((x, y, z), dtype=bool)
                for t0 in range(timepoints):
                    vol = data[:, :, :, t0]
                    mask = mask * (vol > (self._nanmean(vol) / 8))
                for t0 in range(timepoints):
                    vol = data[:, :, :, t0]
                    g[t0] = self._nanmean(vol[mask])
                if len(find_indices(mask)) < (np.prod((x, y, z)) / 10):
                    intersect_mask = False
                    g = np.zeros((timepoints, 1))
            if not intersect_mask:
                for t0 in range(timepoints):
                    vol = data[:, :, :, t0]
                    mask = vol > (self._nanmean(vol) / 8)
                    g[t0] = self._nanmean(vol[mask])
        elif masktype == 'file':  # uses a mask image to determine intensity
            mask = load(self.inputs.mask_file).get_data()
            mask = mask > 0.5
            for t0 in range(timepoints):
                vol = data[:, :, :, t0]
                g[t0] = self._nanmean(vol[mask])
        elif masktype == 'thresh':  # uses a fixed signal threshold
            for t0 in range(timepoints):
                vol = data[:, :, :, t0]
                mask = vol > self.inputs.mask_threshold
                g[t0] = self._nanmean(vol[mask])
        else:
            mask = np.ones((x, y, z))
            g = self._nanmean(data[mask > 0, :], 1)

        # compute normalized intensity values
        gz = signal.detrend(g, axis=0)       # detrend the signal
        if self.inputs.use_differences[1]:
            gz = np.concatenate((np.zeros((1, 1)), np.diff(gz, n=1, axis=0)), axis=0)
        gz = (gz - np.mean(gz)) / np.std(gz)    # normalize the detrended signal
        iidx = find_indices(abs(gz) > self.inputs.zintensity_threshold)

        outliers = np.unique(np.union1d(iidx, np.union1d(tidx, ridx)))
        artifactfile, intensityfile, statsfile, normfile, plotfile = self._get_output_filenames(imgfile, cwd)

        # write output to outputfile
        np.savetxt(artifactfile, outliers, fmt='%d', delimiter=' ')
        np.savetxt(intensityfile, g, fmt='%.2f', delimiter=' ')
        if self.inputs.use_norm:
            np.savetxt(normfile, normval, fmt='%.4f', delimiter=' ')

        if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
            import matplotlib.pyplot as plt
            fig = plt.figure()
            if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
                plt.subplot(211)
            else:
                plt.subplot(311)
            self._plot_outliers_with_wave(gz, iidx, 'Intensity')
            if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
                plt.subplot(212)
                self._plot_outliers_with_wave(normval, np.union1d(tidx, ridx), 'Norm (mm)')
            else:
                diff = ''
                if self.inputs.use_differences[0]:
                    diff = 'diff'
                plt.subplot(312)
                self._plot_outliers_with_wave(traval, tidx, 'Translation (mm)' + diff)
                plt.subplot(313)
                self._plot_outliers_with_wave(rotval, ridx, 'Rotation (rad)' + diff)
            plt.savefig(plotfile)
            plt.close(fig)

        motion_outliers = np.union1d(tidx, ridx)
        stats = [{'motion_file': motionfile,
                  'functional_file': imgfile},
                 {'common_outliers': len(np.intersect1d(iidx, motion_outliers)),
                  'intensity_outliers': len(np.setdiff1d(iidx, motion_outliers)),
                  'motion_outliers': len(np.setdiff1d(motion_outliers, iidx)),
                  },
                 {'motion': [{'using differences': self.inputs.use_differences[0]},
                              {'mean': np.mean(mc_in, axis=0).tolist(),
                               'min': np.min(mc_in, axis=0).tolist(),
                               'max': np.max(mc_in, axis=0).tolist(),
                               'std': np.std(mc_in, axis=0).tolist()},
                              ]},
                 {'intensity': [{'using differences': self.inputs.use_differences[1]},
                                 {'mean': np.mean(gz, axis=0).tolist(),
                                  'min': np.min(gz, axis=0).tolist(),
                                  'max': np.max(gz, axis=0).tolist(),
                                  'std': np.std(gz, axis=0).tolist()},
                                 ]},
                 ]
        if self.inputs.use_norm:
            stats.insert(3, {'motion_norm': {'mean': np.mean(normval, axis=0).tolist(),
                                             'min': np.min(normval, axis=0).tolist(),
                                             'max': np.max(normval, axis=0).tolist(),
                                             'std': np.std(normval, axis=0).tolist(),
                                    }})
        save_json(statsfile, stats)
Exemplo n.º 38
0
def cal_FN(f, lst_edit):
    kappa_array = np.linspace(0.05, 1.0, 20)
    if len(lst_edit) > 1:
        raise ValueError("More than one lst_edits files, please check")
    elif len(lst_edit) == 0:
        raise ValueError(
            "No lst_edits file is found, please check your folder")

    print("The lst_edit is in the directory: ", lst_edit)
    lga_data = [load_data(f_kappa) for f_kappa in f]
    print("There are {} elements in lga data".format(len(lga_data)))
    # print("Lga 0 is: ", lga_data[0])
    print("Check if it's 1 for mse3727: ", lga_data[0][52, 102, 47],
          lga_data[0][52, 103, 47])

    Lesion = GetCenterLesion(lst_edit)
    lesions = Lesion.run_centroids()
    ref_lesion = 'reference'
    if len(lesions[ref_lesion]['missing']) != 0:
        raise ValueError(
            "There is some centroid missed from the algorithm. "
            "Please make sure all the center points are found in the lesion.")
    else:
        print("Awesome, all the centroids are present in the lesion.")

    ref_lesion_to_kappas = {}

    for k, num in enumerate(kappa_array):
        num_str = str(num)
        kappa_name = '_kappa_' + num_str
        lesion_items = sorted(lesions[ref_lesion]['present'].items())
        print("Lesion items are: ", lesion_items)
        FN = 0
        TP = 0
        lesions[ref_lesion]['FalseNegatives'] = []
        lesions[ref_lesion]['TruePositives_to_lga'] = []
        for i, coord in lesion_items:
            print(i, coord)
            if lga_data[k][coord[0], coord[1], coord[2]] == 0:
                # if lga_data[coord['xyz'][0], coord['xyz'][1], coord['xyz'][2]] == 0:
                lesions[ref_lesion]['FalseNegatives'].append(i)
                FN += 1
                print("Oops, FN plus 1! FN is: ", FN)
            else:
                lesions[ref_lesion]['TruePositives_to_lga'].append(i)
                TP += 1
                print("Yay! TP plus 1! TP is: ", TP)

        print("FN, TP and kappa are: ", FN, TP, kappa_name)
        lesions[ref_lesion]['NumOfFN'] = FN
        lesions[ref_lesion]['NumOfTP_to_lga'] = TP
        print("After adding FN and TP, the lesion dictionary is: ", lesions)
        lesions_temp = deepcopy(lesions)
        ref_lesion_to_kappas[kappa_name] = lesions_temp
        print("After making a bigger dict to ref_lesion_to_kappas: "******"After all kappas, the ref_lesion_to_kappas looks: ",
          ref_lesion_to_kappas)
    outdir = os.path.split(os.path.split(f[0])[0])[0]
    save_json(os.path.join(outdir, "centroid_lst_edit.json"),
              ref_lesion_to_kappas)
    print("The json file is generated in the directory: ", outdir)
    return [FN, TP]
Exemplo n.º 39
0
def plot_connectogram(
    conn_matrix,
    conn_model,
    dir_path,
    ID,
    subnet,
    labels,
    comm="nodes",
    color_scheme="interpolateBlues",
    prune=False,
):
    """
    Plot a connectogram for a given connectivity matrix.

    Parameters
    ----------
    conn_matrix : array
        NxN matrix.
    conn_model : str
       Connectivity estimation model (e.g. corr for correlation, cov for
       covariance, sps for precision covariance, partcorr for
       partial correlation). sps type is used by default.
    dir_path : str
        Path to directory containing subject derivative data for given run.
    ID : str
        A subject id or other unique identifier.
    subnet : str
        Resting-state network based on Yeo-7 and Yeo-17 naming
        (e.g. 'Default') used to filter nodes in the study of brain subgraphs.
    labels : list
        List of string labels corresponding to ROI nodes.
    comm : str, optional default: 'nodes'
        Communitity setting, either 'nodes' or 'links'
    color_scheme : str, optional, default: 'interpolateBlues'
        Color scheme in json.
    prune : bool
        Indicates whether to prune final graph of disconnected nodes/isolates.

    """
    import json
    from pathlib import Path
    from networkx.readwrite import json_graph
    from pynets.core.thresholding import normalize
    from pynets.statistics.individual.algorithms import most_important, \
        link_communities, community_resolution_selection

    # from scipy.cluster.hierarchy import linkage, fcluster
    from nipype.utils.filemanip import save_json

    conn_matrix = normalize(conn_matrix)
    G = nx.from_numpy_matrix(np.abs(conn_matrix))
    if prune is True:
        [G, pruned_nodes] = most_important(G)
        conn_matrix = nx.to_numpy_array(G)

        pruned_nodes.sort(reverse=True)
        for j in pruned_nodes:
            del labels[labels.index(labels[j])]

    if comm == "nodes" and len(conn_matrix) > 40:
        G = nx.from_numpy_matrix(np.abs(conn_matrix))
        _, node_comm_aff_mat, resolution, num_comms = \
            community_resolution_selection(G)
        clust_levels = len(node_comm_aff_mat)
        clust_levels_tmp = int(clust_levels) - 1
        mask_mat = np.squeeze(np.array([node_comm_aff_mat == 0]).astype("int"))
        label_arr = (node_comm_aff_mat *
                     np.expand_dims(np.arange(1, clust_levels + 1), axis=1) +
                     mask_mat)
    elif comm == "links" and len(conn_matrix) > 40:
        # Plot link communities
        link_comm_aff_mat = link_communities(conn_matrix,
                                             type_clustering="single")[0]
        print(f"{'Found '}{str(len(link_comm_aff_mat))}{' communities...'}")
        clust_levels = len(link_comm_aff_mat)
        clust_levels_tmp = int(clust_levels) - 1
        mask_mat = np.squeeze(np.array([link_comm_aff_mat == 0]).astype("int"))
        label_arr = (link_comm_aff_mat *
                     np.expand_dims(np.arange(1, clust_levels + 1), axis=1) +
                     mask_mat)
    else:
        return

    def _get_node_label(node_idx, labels, clust_levels_tmp):
        """
        Tag a label to a given node based on its community/cluster assignment
        """
        from collections import OrderedDict

        def _write_roman(num):
            """
            Create community/cluster assignments using a Roman-Numeral
            generator.
            """
            roman = OrderedDict()
            roman[1000] = "M"
            roman[900] = "CM"
            roman[500] = "D"
            roman[400] = "CD"
            roman[100] = "C"
            roman[90] = "XC"
            roman[50] = "L"
            roman[40] = "XL"
            roman[10] = "X"
            roman[9] = "IX"
            roman[5] = "V"
            roman[4] = "IV"
            roman[1] = "I"

            def roman_num(num):
                """

                :param num:
                """
                for r in roman.keys():
                    x, y = divmod(num, r)
                    yield roman[r] * x
                    num -= r * x
                    if num > 0:
                        roman_num(num)
                    else:
                        break

            return "".join([a for a in roman_num(num)])

        rn_list = []
        node_idx = node_idx - 1
        node_labels = labels[:, node_idx]
        for k in [int(l) for i, l in enumerate(node_labels)]:
            rn_list.append(json.dumps(_write_roman(k)))
        abet = rn_list
        node_lab_alph = ".".join([
            "{}{}".format(abet[i], int(l)) for i, l in enumerate(node_labels)
        ]) + ".{}".format(labels[node_idx])
        return node_lab_alph

    output = []

    adj_dict = {}
    for i in list(G.adjacency()):
        source = list(i)[0]
        target = list(list(i)[1])
        adj_dict[source] = target

    for node_idx, connections in adj_dict.items():
        weight_vec = []
        for i in connections:
            wei = G.get_edge_data(node_idx, int(i))["weight"]
            weight_vec.append(wei)
        entry = {}
        nodes_label = _get_node_label(node_idx, label_arr, clust_levels_tmp)
        entry["name"] = nodes_label
        entry["size"] = len(connections)
        entry["imports"] = [
            _get_node_label(int(d) - 1, label_arr, clust_levels_tmp)
            for d in connections
        ]
        entry["weights"] = weight_vec
        output.append(entry)

    if subnet:
        json_file_name = (
            f"{str(ID)}{'_'}{subnet}{'_connectogram_'}{conn_model}"
            f"{'_network.json'}")
        json_fdg_file_name = (
            f"{str(ID)}{'_'}{subnet}{'_fdg_'}{conn_model}{'_network.json'}")
        connectogram_plot = f"{dir_path}{'/'}{json_file_name}"
        fdg_js_sub = f"{dir_path}{'/'}{str(ID)}{'_'}{subnet}{'_fdg_'}" \
                     f"{conn_model}{'_network.js'}"
        fdg_js_sub_name = f"{str(ID)}{'_'}{subnet}{'_fdg_'}{conn_model}" \
                          f"{'_network.js'}"
        connectogram_js_sub = (
            f"{dir_path}/{str(ID)}_{subnet}_connectogram_{conn_model}"
            f"_network.js")
        connectogram_js_name = (
            f"{str(ID)}{'_'}{subnet}{'_connectogram_'}{conn_model}"
            f"{'_network.js'}")
    else:
        json_file_name = f"{str(ID)}{'_connectogram_'}{conn_model}{'.json'}"
        json_fdg_file_name = f"{str(ID)}{'_fdg_'}{conn_model}{'.json'}"
        connectogram_plot = f"{dir_path}{'/'}{json_file_name}"
        connectogram_js_sub = (
            f"{dir_path}{'/'}{str(ID)}{'_connectogram_'}{conn_model}{'.js'}")
        fdg_js_sub = f"{dir_path}{'/'}{str(ID)}{'_fdg_'}{conn_model}{'.js'}"
        fdg_js_sub_name = f"{str(ID)}{'_fdg_'}{conn_model}{'.js'}"
        connectogram_js_name = f"{str(ID)}{'_connectogram_'}{conn_model}" \
                               f"{'.js'}"
    save_json(connectogram_plot, output)

    # Force-directed graphing
    G = nx.from_numpy_matrix(np.round(
        np.abs(conn_matrix).astype("float64"), 6))
    data = json_graph.node_link_data(G)
    data.pop("directed", None)
    data.pop("graph", None)
    data.pop("multigraph", None)
    for k in range(len(data["links"])):
        data["links"][k]["value"] = data["links"][k].pop("weight")
    for k in range(len(data["nodes"])):
        data["nodes"][k]["id"] = str(data["nodes"][k]["id"])
    for k in range(len(data["links"])):
        data["links"][k]["source"] = str(data["links"][k]["source"])
        data["links"][k]["target"] = str(data["links"][k]["target"])

    # Add community structure
    for k in range(len(data["nodes"])):
        data["nodes"][k]["group"] = str(label_arr[0][k])

    # Add node labels
    for k in range(len(data["nodes"])):
        data["nodes"][k]["name"] = str(labels[k])

    out_file = f"{dir_path}{'/'}{str(json_fdg_file_name)}"
    save_json(out_file, data)

    # Copy index.html and json to dir_path
    conn_js_path = str(Path(__file__).parent / "connectogram.js")
    index_html_path = str(Path(__file__).parent / "index.html")
    fdg_replacements_js = {"FD_graph.json": str(json_fdg_file_name)}
    replacements_html = {
        "connectogram.js": str(connectogram_js_name),
        "fdg.js": str(fdg_js_sub_name),
    }
    fdg_js_path = str(Path(__file__).parent / "fdg.js")
    with open(index_html_path) as infile, open(str(dir_path + "/index.html"),
                                               "w") as outfile:
        for line in infile:
            for src, target in replacements_html.items():
                line = line.replace(src, target)
            outfile.write(line)

    replacements_js = {
        "template.json": str(json_file_name),
        "interpolateCool": str(color_scheme),
    }
    with open(conn_js_path) as infile, open(connectogram_js_sub, "w") as \
        outfile:
        for line in infile:
            for src, target in replacements_js.items():
                line = line.replace(src, target)
            outfile.write(line)

    with open(fdg_js_path) as infile, open(fdg_js_sub, "w") as outfile:
        for line in infile:
            for src, target in fdg_replacements_js.items():
                line = line.replace(src, target)
            outfile.write(line)

    return
Exemplo n.º 40
0
def plot_connectogram(conn_matrix, conn_model, atlas, dir_path, ID, network, labels):
    """
    Plot a connectogram for a given connectivity matrix.

    Parameters
    ----------
    conn_matrix : array
        NxN matrix.
    conn_model : str
       Connectivity estimation model (e.g. corr for correlation, cov for covariance, sps for precision covariance,
       partcorr for partial correlation). sps type is used by default.
    atlas : str
        Name of atlas parcellation used.
    dir_path : str
        Path to directory containing subject derivative data for given run.
    ID : str
        A subject id or other unique identifier.
    network : str
        Resting-state network based on Yeo-7 and Yeo-17 naming (e.g. 'Default') used to filter nodes in the study of
        brain subgraphs.
    labels : list
        List of string labels corresponding to ROI nodes.
    """
    import json
    from pathlib import Path
    from networkx.readwrite import json_graph
    from pynets.core.thresholding import normalize
    from pynets.stats.netstats import most_important
    # from scipy.cluster.hierarchy import linkage, fcluster
    from nipype.utils.filemanip import save_json

    # Advanced Settings
    comm = 'nodes'
    pruned = False
    #color_scheme = 'interpolateCool'
    #color_scheme = 'interpolateGnBu'
    #color_scheme = 'interpolateOrRd'
    #color_scheme = 'interpolatePuRd'
    #color_scheme = 'interpolateYlOrRd'
    #color_scheme = 'interpolateReds'
    #color_scheme = 'interpolateGreens'
    color_scheme = 'interpolateBlues'
    # Advanced Settings

    conn_matrix = normalize(conn_matrix)
    G = nx.from_numpy_matrix(np.abs(conn_matrix))
    if pruned is True:
        [G, pruned_nodes] = most_important(G)
        conn_matrix = nx.to_numpy_array(G)

        pruned_nodes.sort(reverse=True)
        for j in pruned_nodes:
            del labels[labels.index(labels[j])]

    # def _doClust(X, clust_levels):
    #     """
    #     Create Ward cluster linkages.
    #     """
    #     # get the linkage diagram
    #     Z = linkage(X, 'ward')
    #     # choose # cluster levels
    #     cluster_levels = range(1, int(clust_levels))
    #     # init array to store labels for each level
    #     clust_levels_tmp = int(clust_levels) - 1
    #     label_arr = np.zeros((int(clust_levels_tmp), int(X.shape[0])))
    #     # iterate thru levels
    #     for c in cluster_levels:
    #         fl = fcluster(Z, c, criterion='maxclust')
    #         #print(fl)
    #         label_arr[c-1, :] = fl
    #     return label_arr, clust_levels_tmp

    if comm == 'nodes' and len(conn_matrix) > 40:
        from pynets.stats.netstats import community_resolution_selection
        G = nx.from_numpy_matrix(np.abs(conn_matrix))
        _, node_comm_aff_mat, resolution, num_comms = community_resolution_selection(G)
        clust_levels = len(node_comm_aff_mat)
        clust_levels_tmp = int(clust_levels) - 1
        mask_mat = np.squeeze(np.array([node_comm_aff_mat == 0]).astype('int'))
        label_arr = node_comm_aff_mat * np.expand_dims(np.arange(1, clust_levels+1), axis=1) + mask_mat
    elif comm == 'links' and len(conn_matrix) > 40:
        from pynets.stats.netstats import link_communities
        # Plot link communities
        link_comm_aff_mat = link_communities(conn_matrix, type_clustering='single')
        print("%s%s%s" % ('Found ', str(len(link_comm_aff_mat)), ' communities...'))
        clust_levels = len(link_comm_aff_mat)
        clust_levels_tmp = int(clust_levels) - 1
        mask_mat = np.squeeze(np.array([link_comm_aff_mat == 0]).astype('int'))
        label_arr = link_comm_aff_mat * np.expand_dims(np.arange(1, clust_levels+1), axis=1) + mask_mat
    else:
        return
    # elif len(conn_matrix) > 20:
    #     print('Graph too small for reliable plotting of communities. Plotting by fcluster instead...')
    #     if len(conn_matrix) >= 250:
    #         clust_levels = 7
    #     elif len(conn_matrix) >= 200:
    #         clust_levels = 6
    #     elif len(conn_matrix) >= 150:
    #         clust_levels = 5
    #     elif len(conn_matrix) >= 100:
    #         clust_levels = 4
    #     elif len(conn_matrix) >= 50:
    #         clust_levels = 3
    #     else:
    #         clust_levels = 2
    #     [label_arr, clust_levels_tmp] = _doClust(conn_matrix, clust_levels)

    def _get_node_label(node_idx, labels, clust_levels_tmp):
        """
        Tag a label to a given node based on its community/cluster assignment
        """
        from collections import OrderedDict

        def _write_roman(num):
            """
            Create community/cluster assignments using a Roman-Numeral generator.
            """
            roman = OrderedDict()
            roman[1000] = "M"
            roman[900] = "CM"
            roman[500] = "D"
            roman[400] = "CD"
            roman[100] = "C"
            roman[90] = "XC"
            roman[50] = "L"
            roman[40] = "XL"
            roman[10] = "X"
            roman[9] = "IX"
            roman[5] = "V"
            roman[4] = "IV"
            roman[1] = "I"

            def roman_num(num):
                """

                :param num:
                """
                for r in roman.keys():
                    x, y = divmod(num, r)
                    yield roman[r] * x
                    num -= (r * x)
                    if num > 0:
                        roman_num(num)
                    else:
                        break
            return "".join([a for a in roman_num(num)])
        rn_list = []
        node_idx = node_idx - 1
        node_labels = labels[:, node_idx]
        for k in [int(l) for i, l in enumerate(node_labels)]:
            rn_list.append(json.dumps(_write_roman(k)))
        abet = rn_list
        node_lab_alph = ".".join(["{}{}".format(abet[i], int(l)) for i, l in enumerate(node_labels)]) + ".{}".format(
            labels[node_idx])
        return node_lab_alph

    output = []

    adj_dict = {}
    for i in list(G.adjacency()):
        source = list(i)[0]
        target = list(list(i)[1])
        adj_dict[source] = target

    for node_idx, connections in adj_dict.items():
        weight_vec = []
        for i in connections:
            wei = G.get_edge_data(node_idx,int(i))['weight']
            weight_vec.append(wei)
        entry = {}
        nodes_label = _get_node_label(node_idx, label_arr, clust_levels_tmp)
        entry["name"] = nodes_label
        entry["size"] = len(connections)
        entry["imports"] = [_get_node_label(int(d)-1, label_arr, clust_levels_tmp) for d in connections]
        entry["weights"] = weight_vec
        output.append(entry)

    if network:
        json_file_name = "%s%s%s%s%s%s" % (str(ID), '_', network, '_connectogram_', conn_model, '_network.json')
        json_fdg_file_name = "%s%s%s%s%s%s" % (str(ID), '_', network, '_fdg_', conn_model, '_network.json')
        connectogram_plot = "%s%s%s" % (dir_path, '/', json_file_name)
        fdg_js_sub = "%s%s%s%s%s%s%s%s" % (dir_path, '/', str(ID), '_', network, '_fdg_', conn_model, '_network.js')
        fdg_js_sub_name = "%s%s%s%s%s%s" % (str(ID), '_', network, '_fdg_', conn_model, '_network.js')
        connectogram_js_sub = "%s%s%s%s%s%s%s%s" % (dir_path, '/', str(ID), '_', network, '_connectogram_', conn_model,
                                                    '_network.js')
        connectogram_js_name = "%s%s%s%s%s%s" % (str(ID), '_', network, '_connectogram_', conn_model, '_network.js')
    else:
        json_file_name = "%s%s%s%s" % (str(ID), '_connectogram_', conn_model, '.json')
        json_fdg_file_name = "%s%s%s%s" % (str(ID), '_fdg_', conn_model, '.json')
        connectogram_plot = "%s%s%s" % (dir_path, '/', json_file_name)
        connectogram_js_sub = "%s%s%s%s%s%s" % (dir_path, '/', str(ID), '_connectogram_', conn_model, '.js')
        fdg_js_sub = "%s%s%s%s%s%s" % (dir_path, '/', str(ID), '_fdg_', conn_model, '.js')
        fdg_js_sub_name = "%s%s%s%s" % (str(ID), '_fdg_', conn_model, '.js')
        connectogram_js_name = "%s%s%s%s" % (str(ID), '_connectogram_', conn_model, '.js')
    save_json(connectogram_plot, output)

    # Force-directed graphing
    G = nx.from_numpy_matrix(np.round(np.abs(conn_matrix).astype('float64'), 6))
    data = json_graph.node_link_data(G)
    data.pop('directed', None)
    data.pop('graph', None)
    data.pop('multigraph', None)
    for k in range(len(data['links'])):
        data['links'][k]['value'] = data['links'][k].pop('weight')
    for k in range(len(data['nodes'])):
        data['nodes'][k]['id'] = str(data['nodes'][k]['id'])
    for k in range(len(data['links'])):
        data['links'][k]['source'] = str(data['links'][k]['source'])
        data['links'][k]['target'] = str(data['links'][k]['target'])

    # Add community structure
    for k in range(len(data['nodes'])):
        data['nodes'][k]['group'] = str(label_arr[0][k])

    # Add node labels
    for k in range(len(data['nodes'])):
        data['nodes'][k]['name'] = str(labels[k])

    out_file = "%s%s%s" % (dir_path, '/', str(json_fdg_file_name))
    save_json(out_file, data)

    # Copy index.html and json to dir_path
    conn_js_path = str(Path(__file__).parent/"connectogram.js")
    index_html_path = str(Path(__file__).parent/"index.html")
    fdg_replacements_js = {"FD_graph.json": str(json_fdg_file_name)}
    replacements_html = {'connectogram.js': str(connectogram_js_name), 'fdg.js': str(fdg_js_sub_name)}
    fdg_js_path = str(Path(__file__).parent/"fdg.js")
    with open(index_html_path) as infile, open(str(dir_path + '/index.html'), 'w') as outfile:
        for line in infile:
            for src, target in replacements_html.items():
                line = line.replace(src, target)
            outfile.write(line)

    replacements_js = {'template.json': str(json_file_name), 'interpolateCool': str(color_scheme)}
    with open(conn_js_path) as infile, open(connectogram_js_sub, 'w') as outfile:
        for line in infile:
            for src, target in replacements_js.items():
                line = line.replace(src, target)
            outfile.write(line)

    with open(fdg_js_path) as infile, open(fdg_js_sub, 'w') as outfile:
        for line in infile:
            for src, target in fdg_replacements_js.items():
                line = line.replace(src, target)
            outfile.write(line)

    return
Exemplo n.º 41
0
def correct_lesions(in_csv, lesion_file, ratio_file, ants_seg, dist_radius=5):

    # Initialize report, load csv data and lesion seg data
    report = dict(FP=[],
                  FN=[],
                  base_csv=in_csv,
                  ratio_file=ratio_file,
                  dist_radius=dist_radius)
    df = pd.read_csv(in_csv)
    img = nib.load(lesion_file)
    data, aff = img.get_data(), img.affine
    report["orig_lesion_volume"] = fslstats(lesion_file)
    report["orig_num_lesions"] = num_lesions(data)
    # detect false positives and return if none are detected
    # probably a coordinate system error, or clicks are bad
    report, data = detect_FP(df, data, aff, report)
    num_success = len([r for r in report["FP"] if r["caught"]])
    # coordinate system error ??
    if len(report["FP"]) and not num_success:
        print(
            "Coordinate system error? or can't find any FP -- is this labelled correctly?",
            len(report["FP"]), num_success)
        return None, None, None

    # Write the lesion file w/ the false positives removed.
    out_path = os.path.join(
        os.path.split(os.path.split(in_csv)[0])[0], "lst_edits")
    if not os.path.exists(out_path):
        os.makedirs(out_path)
    out_file = os.path.join(out_path, "no_FP_" + lesion_file.split("/")[-1])
    nib.Nifti1Image(data, aff).to_filename(out_file)

    # Load the ratio image
    ratio_img = nib.load(ratio_file)
    ratio, raffine = ratio_img.get_data(), ratio_img.affine

    # Get the tissue segmentation in the same space as the ratio file
    # Mask to exclude CSF
    def chopper(in_file, ratio_file):
        import tempfile
        from subprocess import check_call
        out_file = os.path.join(
            os.path.split(in_csv)[0],
            "antsSeg.nii.gz")  #tempfile.mktemp(suffix=".nii.gz")
        if not os.path.exists(out_file):
            cmd = "mri_convert -i {} -o {} --like {}".format(
                in_file, out_file, ratio_file)
            check_call(cmd.split(" "))
        return out_file

    ants_chopped = chopper(ants_seg, ratio_file)  #gets shit to alignment space
    ants_img = nib.load(ants_chopped)
    ants_data = ants_img.get_data()
    wm_mask = ants_data >= 1  #do not ?? exclude CSF
    ratio[wm_mask == 0] = 0

    # Prepare false negatives and find them
    fn = df[df.annotation == "FN"][["x", "y", "z"]].values
    to_indices = np.round(
        np.asarray(
            list(utils.move_streamlines(fn.tolist(),
                                        np.linalg.inv(aff))))).astype(int)
    entries, fn_image = find_FN(ratio, to_indices, fn, dist_radius)
    print(ratio.shape, data.shape)
    report["FN"] += entries
    data = data + fn_image  #add the found false neggies to data

    # Score the false negatives, reject if less than 40% are found
    report_file = fname_presuffix(
        in_csv,
        prefix="report_dr{}_".format(dist_radius),
        newpath=out_path,
        use_ext=False,
        suffix="_{}.json".format(
            os.path.split(ratio_file)[-1]))  #long name for prov.
    stats = report_stats(report)
    num_success = len([r for r in report["FN"] if r["caught"]])
    total = len(report["FN"])
    if float(total) > 0:
        print("success score", num_success / float(total))
        if num_success / float(total) < 0.4:
            print("notgood enough")
            if os.path.exists(report_file):
                os.remove(report_file)  #this is an old report file. remove it
            return None, None, None

    # Write the final image, save the report
    out_file = os.path.join(
        out_path, "no_FP_filled_FN_dr{}_".format(dist_radius) +
        ratio_file.split("/")[-1])
    nib.Nifti1Image(data, aff).to_filename(out_file)

    report["final_lesion_vol"] = fslstats(out_file)
    report["final_lesion_count"] = num_lesions(data)
    save_json(report_file, report)
    print("\n\n\n", "OUTPUT:", out_file, "\n\n\n")
    return out_file, report, stats
Exemplo n.º 42
0
    stats["images"] = [join(model_save_path, "with_hint.png"),
                      join(model_save_path, "without_hint.png"),
                      join(model_save_path, "without_brain.png")]
    stats["n_epoch"] = n_epochs
    stats["n_aug"] = n_aug

    return stats



# In[ ]:

from subprocess import check_call, Popen, PIPE

if __name__ == "__main__":

    stats_all = []
    for i in range(10):
        stats = run_everything("test_ak_%04d" % i, 100, 5)
        stats_all.append(stats)
        save_json("model_stats.json", stats_all)
        cmds = ['bash', "gitcmd.sh", "%04d" % i]
        proc = Popen(cmds, stdout = PIPE)
        proc.wait()
        print(proc.stdout.readlines())
        print("completed iteration", i,"\n\n")



# In[ ]:
Exemplo n.º 43
0
    def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None):
        """
        Core routine for detecting outliers
        """
        if not cwd:
            cwd = os.getcwd()
        # read in motion parameters
        mc_in = np.loadtxt(motionfile)
        mc = deepcopy(mc_in)
        if self.inputs.parameter_source == 'SPM':
            pass
        elif self.inputs.parameter_source == 'FSL':
            mc = mc[:, [3, 4, 5, 0, 1, 2]]
        elif self.inputs.parameter_source == 'Siemens':
            Exception("Siemens PACE format not implemented yet")
        else:
            Exception("Unknown source for movement parameters")

        if self.inputs.use_norm:
            # calculate the norm of the motion parameters
            normval = self._calc_norm(mc, self.inputs.use_differences[0])
            tidx = find_indices(normval > self.inputs.norm_threshold)
            ridx = find_indices(normval < 0)
        else:
            if self.inputs.use_differences[0]:
                mc = np.concatenate((np.zeros((1, 6)), np.diff(mc_in, n=1, axis=0)), axis=0)
            traval = mc[:, 0:3]  # translation parameters (mm)
            rotval = mc[:, 3:6]  # rotation parameters (rad)
            tidx = find_indices(np.sum(abs(traval) > self.inputs.translation_threshold, 1) > 0)
            ridx = find_indices(np.sum(abs(rotval) > self.inputs.rotation_threshold, 1) > 0)

        # read in functional image
        if isinstance(imgfile, str):
            nim = load(imgfile)
        elif isinstance(imgfile, list):
            if len(imgfile) == 1:
                nim = load(imgfile[0])
            else:
                images = [load(f) for f in imgfile]
                nim = funcs.concat_images(images)

        # compute global intensity signal
        (x, y, z, timepoints) = nim.get_shape()

        data = nim.get_data()
        g = np.zeros((timepoints, 1))
        masktype = self.inputs.mask_type
        if  masktype == 'spm_global':  # spm_global like calculation
            intersect_mask = self.inputs.intersect_mask
            if intersect_mask:
                mask = np.ones((x, y, z), dtype=bool)
                for t0 in range(timepoints):
                    vol = data[:, :, :, t0]
                    mask = mask * (vol > (self._nanmean(vol) / 8))
                for t0 in range(timepoints):
                    vol = data[:, :, :, t0]
                    g[t0] = self._nanmean(vol[mask])
                if len(find_indices(mask)) < (np.prod((x, y, z)) / 10):
                    intersect_mask = False
                    g = np.zeros((timepoints, 1))
            if not intersect_mask:
                for t0 in range(timepoints):
                    vol = data[:, :, :, t0]
                    mask = vol > (self._nanmean(vol) / 8)
                    g[t0] = self._nanmean(vol[mask])
        elif masktype == 'file':  # uses a mask image to determine intensity
            mask = load(self.inputs.mask_file).get_data()
            mask = mask > 0.5
            for t0 in range(timepoints):
                vol = data[:, :, :, t0]
                g[t0] = self._nanmean(vol[mask])
        elif masktype == 'thresh':  # uses a fixed signal threshold
            for t0 in range(timepoints):
                vol = data[:, :, :, t0]
                mask = vol > self.inputs.mask_threshold
                g[t0] = self._nanmean(vol[mask])
        else:
            mask = np.ones((x, y, z))
            g = self._nanmean(data[mask > 0, :], 1)

        # compute normalized intensity values
        gz = signal.detrend(g, axis=0)       # detrend the signal
        if self.inputs.use_differences[1]:
            gz = np.concatenate((np.zeros((1, 1)), np.diff(gz, n=1, axis=0)), axis=0)
        gz = (gz - np.mean(gz)) / np.std(gz)    # normalize the detrended signal
        iidx = find_indices(abs(gz) > self.inputs.zintensity_threshold)

        outliers = np.unique(np.union1d(iidx, np.union1d(tidx, ridx)))
        artifactfile, intensityfile, statsfile, normfile, plotfile = self._get_output_filenames(imgfile, cwd)

        # write output to outputfile
        np.savetxt(artifactfile, outliers, fmt='%d', delimiter=' ')
        np.savetxt(intensityfile, g, fmt='%.2f', delimiter=' ')
        if self.inputs.use_norm:
            np.savetxt(normfile, normval, fmt='%.4f', delimiter=' ')

        if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
            fig = plt.figure()
            if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
                plt.subplot(211)
            else:
                plt.subplot(311)
            self._plot_outliers_with_wave(gz, iidx, 'Intensity')
            if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
                plt.subplot(212)
                self._plot_outliers_with_wave(normval, np.union1d(tidx, ridx), 'Norm (mm)')
            else:
                diff = ''
                if self.inputs.use_differences[0]:
                    diff = 'diff'
                plt.subplot(312)
                self._plot_outliers_with_wave(traval, tidx, 'Translation (mm)' + diff)
                plt.subplot(313)
                self._plot_outliers_with_wave(rotval, ridx, 'Rotation (rad)' + diff)
            plt.savefig(plotfile)
            plt.close(fig)

        motion_outliers = np.union1d(tidx, ridx)
        stats = [{'motion_file': motionfile,
                  'functional_file': imgfile},
                 {'common_outliers': len(np.intersect1d(iidx, motion_outliers)),
                  'intensity_outliers': len(np.setdiff1d(iidx, motion_outliers)),
                  'motion_outliers': len(np.setdiff1d(motion_outliers, iidx)),
                  },
                 {'motion': [{'using differences': self.inputs.use_differences[0]},
                              {'mean': np.mean(mc_in, axis=0).tolist(),
                               'min': np.min(mc_in, axis=0).tolist(),
                               'max': np.max(mc_in, axis=0).tolist(),
                               'std': np.std(mc_in, axis=0).tolist()},
                              ]},
                 {'intensity': [{'using differences': self.inputs.use_differences[1]},
                                 {'mean': np.mean(gz, axis=0).tolist(),
                                  'min': np.min(gz, axis=0).tolist(),
                                  'max': np.max(gz, axis=0).tolist(),
                                  'std': np.std(gz, axis=0).tolist()},
                                 ]},
                 ]
        if self.inputs.use_norm:
            stats.insert(3, {'motion_norm': {'mean': np.mean(normval, axis=0).tolist(),
                                             'min': np.min(normval, axis=0).tolist(),
                                             'max': np.max(normval, axis=0).tolist(),
                                             'std': np.std(normval, axis=0).tolist(),
                                    }})
        save_json(statsfile, stats)