Beispiel #1
0
    def aggregate_outputs(self, runtime=None, needed_outputs=None):

        outputs = self._outputs()

        outfile = os.path.join(os.getcwd(), 'stat_result.json')

        if runtime is None:
            try:
                min_val = load_json(outfile)['stat']
            except IOError:
                return self.run().outputs
        else:
            min_val = []
            for line in runtime.stdout.split('\n'):
                if line:
                    values = line.split()
                    if len(values) > 1:
                        min_val.append([float(val) for val in values])
                    else:
                        min_val.extend([float(val) for val in values])

            if len(min_val) == 1:
                min_val = min_val[0]
            save_json(outfile, dict(stat=min_val))
        outputs.min_val = min_val

        return outputs
Beispiel #2
0
	def aggregate_outputs(self,runtime = None,  needed_outputs=None):

		outputs = self._outputs()

		outfile = os.path.join(os.getcwd(),'stat_result.json')

		if runtime is None:
			try:
				min_val = load_json(outfile)['stat']
			except IOError:
				return self.run().outputs
		else:
			min_val = []
			for line in runtime.stdout.split('\n'):
				if line:
					values = line.split()
					if len(values) > 1:
						min_val.append([float(val) for val in values])
					else:
						min_val.extend([float(val) for val in values])

			if len(min_val) == 1:
				min_val = min_val[0]
			save_json(outfile,dict(stat=min_val))
		outputs.min_val = min_val
		
		return outputs
Beispiel #3
0
def create_report_json(dwi_corrected_file,
                       eddy_rms,
                       eddy_report,
                       color_fa_file,
                       anat_mask_file,
                       outlier_indices,
                       eddy_qc_file,
                       outpath=op.abspath('./report.json')):

    report = {}
    report['dwi_corrected'] = createSprite4D(dwi_corrected_file)

    b0, colorFA, mask = createB0_ColorFA_Mask_Sprites(dwi_corrected_file,
                                                      color_fa_file,
                                                      anat_mask_file)
    report['b0'] = b0
    report['colorFA'] = colorFA
    report['anat_mask'] = mask
    report['outlier_volumes'] = outlier_indices.tolist()

    with open(eddy_report, 'r') as f:
        report['eddy_report'] = f.readlines()

    report['eddy_params'] = np.genfromtxt(eddy_rms).tolist()
    eddy_qc = load_json(eddy_qc_file)
    report['eddy_quad'] = eddy_qc
    save_json(outpath, report)
    return outpath
Beispiel #4
0
	def aggregate_outputs(self,runtime = None,  needed_outputs=None):

		outputs = self._outputs()

		outfile = os.path.join(os.getcwd(),'stat_result.json')

		if runtime is None:
			try:
				stats = load_json(outfile)['stat']
			except IOError:
				return self.run().outputs
		else:
			stats = []
			for line in runtime.stdout.split('\n'):
				if line:
					values = line.split()
					if len(values) > 1:
						stats.append([float(val) for val in values])
					else:
						stats.extend([float(val) for val in values])

			if len(stats) == 1:
				stats = stats[0]
			of = os.path.join(os.getcwd(),'TS.1D')
			f = open(of,'w')

			for st in stats:
				f.write(str(st) + '\n')
			f.close()
			save_json(outfile,dict(stat=of))
		outputs.stats =of
		
		return outputs
Beispiel #5
0
    def aggregate_outputs(self, runtime=None, needed_outputs=None):

        outputs = self._outputs()

        outfile = os.path.join(os.getcwd(), 'stat_result.json')

        if runtime is None:
            try:
                stats = load_json(outfile)['stat']
            except IOError:
                return self.run().outputs
        else:
            stats = []
            for line in runtime.stdout.split('\n'):
                if line:
                    values = line.split()
                    if len(values) > 1:
                        stats.append([float(val) for val in values])
                    else:
                        stats.extend([float(val) for val in values])

            if len(stats) == 1:
                stats = stats[0]
            of = os.path.join(os.getcwd(), 'TS.1D')
            f = open(of, 'w')

            for st in stats:
                f.write(str(st) + '\n')
            f.close()
            save_json(outfile, dict(stat=of))
        outputs.stats = of

        return outputs
Beispiel #6
0
def convert_dicoms(subjs, dicom_dir_template, outputdir, queue=None, heuristic_func=None, extension=None):
    """Submit conversion jobs to SGE cluster
    """
    if heuristic_func == None:
        heuristic_func = infotodict
    for sid in subjs:
        sdir = dicom_dir_template % sid
        tdir = os.path.join(outputdir, sid)
        infofile = os.path.join(tdir, "%s.auto.txt" % sid)
        editfile = os.path.join(tdir, "%s.edit.txt" % sid)
        if os.path.exists(editfile):
            info = load_json(editfile)
        else:
            infofile = os.path.join(tdir, "%s.auto.txt" % sid)
            info = heuristic_func(sdir, os.path.join(tdir, "dicominfo.txt"))
            save_json(infofile, info)
        cfgfile = os.path.join(tdir, "%s.auto.cfg" % sid)
        if write_cfg(cfgfile, info, sid, tdir, extension):
            convertcmd = ["unpacksdcmdir", "-src", sdir, "-targ", tdir, "-generic", "-cfg", cfgfile, "-skip-moco"]
            convertcmd = " ".join(convertcmd)
            if queue:
                outcmd = 'ezsub.py -n sg-%s -q %s -c "%s"' % (sid, queue, convertcmd)
            else:
                outcmd = convertcmd
            os.system(outcmd)
Beispiel #7
0
def cal_lDC(mse_list, flag=None):
    centroids_lst_edit = {}
    centroids_lga = {}
    data_dict = {}
    data_list = [['mse', 'kappa', 'TP_to_ref', 'TP_to_lga', 'FP', 'FN', 'LesionDC']]

    for mse in mse_list:
        dir_lst_edit = ''.join(glob('/data/henry7/PBR/subjects/{0}/lst/lga/ms*/centroid_lst_edit.json'.format(mse)))
        dir_lga = ''.join(glob('/data/henry7/PBR/subjects/{0}/lst/lga/ms*/centroid_lga.json'.format(mse)))
        centroids_lst_edit[mse] = load_json(dir_lst_edit)
        centroids_lga[mse] = load_json(dir_lga)
        # print(centroids_lga[mse])

        kappa_array = np.linspace(0.05, 1.0, 20)
        TP_lga = []
        FP = []
        TP_lst_edit = []
        FN = []
        kappa_name_list = []
        DSC_list = []
        data = [['mse', 'kappa', 'TP_to_ref', 'TP_to_lga', 'FP', 'FN', 'LesionDC']]

        for i, num in enumerate(kappa_array):
            num_str = str(num)
            kappa_name = '_kappa_' + num_str

            TP_lga.append(centroids_lga[mse][kappa_name]['NumOfTP_to_ref'])
            FP.append(centroids_lga[mse][kappa_name]['NumOfFP'])
            TP_lst_edit.append(centroids_lst_edit[mse][kappa_name]['reference']['NumOfTP_to_lga'])
            FN.append(centroids_lst_edit[mse][kappa_name]['reference']['NumOfFN'])
            kappa_name_list.append(kappa_name)

            DSC = (TP_lga[i] + TP_lst_edit[i]) / (TP_lst_edit[i] + TP_lga[i] + FP[i] + FN[i])
            DSC_list.append(DSC)

            data.append([mse, num_str, TP_lga[i], TP_lst_edit[i], FP[i], FN[i], DSC])
            data_list.append([mse, num_str, TP_lga[i], TP_lst_edit[i], FP[i], FN[i], DSC])
            # print(DSC_list)
        print(data)
        data_dict[mse] = deepcopy(data)

    if flag == 'dict':
        return data_dict
    elif flag == 'list':
        return data_list
    else:
        raise NameError("Please input a flag of either dict or list")
Beispiel #8
0
def get_data(globStr):
    from nipype.utils.filemanip import load_json, save_json
    from glob import glob
    import os
    mse_folders = sorted(glob("/data/henry7/PBR/subjects/{}".format(globStr)))

    #status_file = sorted(glob("/data/henry7/PBR/subjects/mse{}/nii/status.json".format(globStr))) #static is in root
    output_data = []
    for i, foo in enumerate(mse_folders):
        #status = load_json(foo)
        mseid = foo.split('/')[-1]
        print(mseid)
        nii_folders = sorted(glob("/data/henry7/PBR/subjects/{}/nii/status.json".format(mseid)))
        dcm_folders = sorted(glob("/working/henry_temp/PBR_dicoms/{}".format(mseid)))
        #msid = status["t1_files"][0].split('/')[-1].split('-')[0]
        if len(nii_folders) == 0:
            nii = False
        else:
            nii = True
            nii_status = load_json(os.path.join("/data/henry7/PBR/subjects/", mseid, "nii/status.json"))
            dti_count = len(nii_status["dti_files"]) # nested list, why?
            flair_count = len(nii_status["flair_files"])
            gad_count = len(nii_status["gad_files"])
            mt_count = len(nii_status["mt_files"])
            noddi_count = len(nii_status["noddi_files"])

            if "psir_files" in nii_status:
                psir_count = len(nii_status["psir_files"])
            else:
                psir_count = 0
            rsfmri_files = len(nii_status["rsfmri_files"])
            t1_count = len(nii_status["t1_files"])
            t2_count = len(nii_status["t2_files"])

        if len(dcm_folders) == 0:
            dcm = False
        else:
            dcm = True
        output_data.append({"foo": foo,
                            "mse": mseid,
                            #"msid": msid,
                            "nii_folder": nii,
                            "dicom_folder": dcm
                            #"part": None
                            })
        if nii is True:
            output_data[-1]["dti_files"] = dti_count
            output_data[-1]["flair_files"] = flair_count
            output_data[-1]["gad_files"] = gad_count
            output_data[-1]["mt_files"] = mt_count
            output_data[-1]["noddi_files"] = noddi_count
            output_data[-1]["psir_files"] = psir_count
            output_data[-1]["rsfmri_files"] = rsfmri_files
            output_data[-1]["t1_files"] = t1_count
            output_data[-1]["t2_files"] = t2_count
            # output_data[-1]["test_mse"] = mseid

    save_json(os.path.join(os.path.realpath('.'), "status.json"), output_data)
    return output_data
Beispiel #9
0
def test_json():
    # Simple roundtrip test of json files, just a sanity check.
    adict = dict(a='one', c='three', b='two')
    fd, name = mkstemp(suffix='.json')
    save_json(name, adict)  # save_json closes the file
    new_dict = load_json(name)
    os.unlink(name)
    yield assert_equal, sorted(adict.items()), sorted(new_dict.items())
Beispiel #10
0
def test_json():
    # Simple roundtrip test of json files, just a sanity check.
    adict = dict(a='one', c='three', b='two')
    fd, name = mkstemp(suffix='.json')
    save_json(name, adict) # save_json closes the file
    new_dict = load_json(name)
    os.unlink(name)
    yield assert_equal, sorted(adict.items()), sorted(new_dict.items())
def convert_dicoms(sid, dicom_dir_template, outputdir, queue=None, heuristic_func=None,
                   extension = None,embed=False,no_moco=False):

    import os
    from nipype.utils.filemanip import load_json,save_json
    from glob import glob
    sdir = dicom_dir_template%sid
    tdir = os.path.join(outputdir, sid)
    infofile =  os.path.join(tdir,'%s.auto.txt' % sid)
    editfile =  os.path.join(tdir,'%s.edit.txt' % sid)
    if os.path.exists(editfile) and heuristic_func:
        info = load_json(editfile)
    elif not heuristic_func:
        pass
    else:
        infofile =  os.path.join(tdir,'%s.auto.txt' % sid)
        info = heuristic_func(sdir, os.path.join(tdir,'dicominfo.txt'))
        save_json(infofile, info)

    if heuristic_func:
        for key in info:
            if not os.path.exists(os.path.join(tdir,key)):
                os.mkdir(os.path.join(tdir,key))
            for idx, ext in enumerate(info[key]):
                convertcmd = ['dcmstack', sdir,'--dest-dir', os.path.join(tdir,key),
                              '--file-ext', '*-%d-*'%ext, '--force-read', '-v', '--output-name', key+'%03d'%(idx+1)]
                if embed:
                    convertcmd.append('--embed-meta')
                convertcmd = ' '.join(convertcmd)
                print convertcmd
                os.system(convertcmd)
    else:
        if not no_moco:
            convertcmd = ['dcmstack', sdir, '--dest-dir', os.path.join(outputdir,sid),
                          '--force-read','-v']
            if embed:
                convertcmd.append('--embed-meta')
            convertcmd = ' '.join(convertcmd)
            print convertcmd
            os.system(convertcmd)
        else:
            import numpy as np
            from bips.workflows.workflow19 import isMoco
            foo = np.genfromtxt(os.path.join(tdir,'dicominfo.txt'),dtype=str)
            for f in foo:
                if not isMoco(glob(os.path.join(sdir,f[1]))[0]):
                    convertcmd = ['dcmstack', sdir, '--dest-dir', os.path.join(outputdir,sid),
                          '--force-read','-v','--file-ext','*-%s-*'%f[2]]
                    if embed:
                        convertcmd.append('--embed-meta')
                    convertcmd = ' '.join(convertcmd)
                    print convertcmd
                    os.system(convertcmd)
                else:
                    print "skipping moco run %s"%f[1]
    return 1
Beispiel #12
0
def csv_to_obj(obj_file, csv_file):
    from nipype.utils.filemanip import load_json, save_json, fname_presuffix
    import os
    import pandas as pd

    out_obj = fname_presuffix(csv_file, newpath = os.path.abspath("."), suffix="_xfm", use_ext=False)+".json"
    foo = load_json(obj_file)
    df = pd.read_csv(csv_file)
    df["y"] = -1*df.y
    df["x"] = -1*df.x
    #print(df.shape)#.values[:,:,:,0].tolist()
    foo["vertices"] = df.values[:,:2].tolist()
    save_json(out_obj, foo)
    return out_obj
Beispiel #13
0
    def aggregate_outputs(self, runtime=None, needed_outputs=None):
        outputs = self._outputs()

        if runtime is None:
            try:
                out_info = load_json(self.inputs.out_file)[
                    self.inputs.json_attr][self.inputs.json_var]
            except IOError:
                return self.run().outputs
        else:
            out_info = []
            for line in runtime.stdout.split('\n'):
                if line:
                    values = line.split()

                    if self.inputs.json_type == 'float':
                        if len(values) > 1:
                            #out_info.append([float(val) for val in values])
                            out_info.append([val for val in values])
                        else:
                            #out_info.extend([float(val) for val in values])
                            out_info.extend([val for val in values])

                    elif self.inputs.json_type == 'integer':
                        if len(values) > 1:
                            #out_info.append([int(val) for val in values])
                            out_info.append([val for val in values])
                        else:
                            #out_info.extend([int(val) for val in values])
                            out_info.extend([val for val in values])

                    else:
                        if len(values) > 1:
                            out_info.append([val for val in values])
                        else:
                            out_info.extend([val for val in values])

            if len(out_info) == 1:
                out_info = out_info[0]
            if os.path.exists(self.inputs.out_file):
                update_minchd_json(self.inputs.out_file, out_info,
                                   self.inputs.json_var, self.inputs.json_attr)
            else:
                save_json(
                    self.inputs.out_file,
                    dict(((self.inputs.json_var,
                           dict(((self.inputs.json_attr, out_info), ))), )))

        outputs.out_file = out_info
        return outputs
def get_msid(mseid):
    from nipype.utils.filemanip import load_json
    status = load_json(
        os.path.join(cc["output_directory"], mseid, 'alignment',
                     'status.json'))
    t1_files = status["t1_files"]
    if len(t1_files) == 0:
        raise ValueError("No T1 file is found.")
    elif len(t1_files) == 1:
        t1_file = ''.join(t1_files)
    else:
        t1_file = t1_files[0]

    msid = t1_file.split("/")[-1].split("-")[0]
    return msid
Beispiel #15
0
def obj_to_csv(obj_file):
    from nipype.utils.filemanip import load_json, save_json, fname_presuffix
    import os
    import pandas as pd

    out_coords = fname_presuffix(obj_file, newpath = os.path.abspath("."), suffix="_coords", use_ext=False)+".csv"
    print("out coords", out_coords)
    foo = load_json(obj_file)
    data = foo["vertices"]
    df = pd.DataFrame(data,columns=["x","y","z"])
    df["t"] = 1
    df["y"] = df["y"]*-1
    df["x"] = df["x"]*-1
    df.to_csv(out_coords, index=None, header = None)
    return out_coords
 def from_json(self):
     J = load_json(self._name)
     for key, ent in self.entries.iteritems():
         if isinstance(ent,Entry):
             ent.delete(0, END)
             try:
                 ent.insert(0, J[key])
             except:
                 continue     
         else:
             try:
                 if J[key]:
                     ent.select()
             except:
                 continue    
Beispiel #17
0
def upload_function():
    if request.method == 'POST':
        upload_putpath = 'uploaded_files/'
        if not os.path.exists(upload_putpath):
            os.makedirs(upload_putpath)
        json_path = os.path.join(upload_putpath, 'myuploads.json')

        if os.path.exists(json_path):
            myuploads = load_json(json_path)
            my_tasks = list(myuploads.keys())
        else:
            myuploads = {}
            my_tasks = []

        f_image = request.files['image_file']
        f_mask = request.files['mask_file']
        print("hello!!!")
        #fname_image, ext_image = os.path.splitext(secure_filename(f_image.filename))
        #fname_mask, ext_mask = os.path.splitext(secure_filename(f_mask.filename))
        fname_image = os.path.basename(secure_filename(f_image.filename))
        fname_mask = os.path.basename(secure_filename(f_mask.filename))

        image_savepath = upload_putpath + 'image_' + fname_image
        mask_savepath = upload_putpath + 'mask_' + fname_mask

        f_image.save(image_savepath)
        f_mask.save(mask_savepath)

        slice_direction = request.form['slice_direction']
        task_type = request.form['task_type']
        min_Nvox = request.form['min_Nvox']
        print(slice_direction, task_type, min_Nvox)

        if task_type in my_tasks:
            myuploads[task_type].append(fname_image)
        else:
            myuploads[task_type] = ['fname_image']

        save_json_pretty(os.path.join(upload_putpath, 'myuploads.json'),
                         myuploads)
        create_tiles(image_savepath, mask_savepath, slice_direction,
                     'tile_files/', int(min_Nvox), 1, False, None)

        if len(fname_image) > 0 and len(fname_mask) > 0:
            return 'file uploaded successfully'
        else:
            return "UHOH: please upload a valid file"
Beispiel #18
0
def upload_function():
    if request.method == 'POST':
        upload_putpath = 'uploaded_files/'
        if not os.path.exists(upload_putpath):
            os.makedirs(upload_putpath)
        json_path = os.path.join(upload_putpath, 'myuploads.json')

        if os.path.exists(json_path):
            myuploads = load_json(json_path)
        else:
            myuploads = {}

        f_image = request.files['image_file']
        f_mask = request.files['mask_file']
        slice_direction = request.form['slice_direction']
        task_type = request.form['task_type']
        min_Nvox = request.form['min_Nvox']
        ptid = request.form['patient_id']

        fname_image = os.path.basename(secure_filename(f_image.filename))
        fname_mask = os.path.basename(secure_filename(f_mask.filename))

        #Make the json entry
        myuploads['_'.join([task_type,ptid])] = {'patient_id':ptid,'mask_filename':secure_filename(f_mask.filename), \
        'image_filename':secure_filename(f_image.filename), \
        'Nvox_threshold':min_Nvox, 'task_type':task_type, \
        'slice_direction':slice_direction}

        #Save images in upload directory
        image_savepath = upload_putpath + ptid + '_image.nii.gz'
        mask_savepath = upload_putpath + ptid + '_mask.nii.gz'
        f_image.save(image_savepath)
        f_mask.save(mask_savepath)

        save_json_pretty(os.path.join(upload_putpath, 'myuploads.json'),
                         myuploads)

        #create tiles from the nifti image and save in tile directory
        create_tiles(image_savepath, mask_savepath, slice_direction,
                     os.path.join('tile_files', ptid, slice_direction),
                     int(min_Nvox), 1, False, None)

        if len(fname_image) > 0 and len(fname_mask) > 0:
            return 'file uploaded'
        else:
            return "UHOH: please upload a valid file"
def run_workflow(msid):
    print("jim_substraction msID [-o <output directory>]")
    config = get_config()
    # msid = sys.argv[1]
    print("msID is: ", msid, "\n")
    """
    # This is not implemented so far
    #TODO
    if sys.argv.__len__() == 4:
        out_dir = sys.argv[3]
        print("Output directory is: ", out_dir)
    """

    status = load_json(
        os.path.join(config["output_directory"], msid, 't1Ants_reg_long',
                     'status.json'))
    fixed_list = status["fixed_image"]
    # warped_list = status["warped_brain"]
    warped_list = status["affined_brain"]
    mseIDs = status["mseIDs"]

    if len(fixed_list) + 1 != len(mseIDs) or len(warped_list) + 1 != len(
            mseIDs):
        raise NotImplementedError(
            "The script assuming the list is one dimension, please modify it")

    for i, fixed in enumerate(fixed_list):
        print(fixed, warped_list[i])
        wf = create_jim_workflow(config, fixed, warped_list[i])

        wf.config = {
            "execution": {
                "crashdump_dir":
                os.path.join(
                    config["crash_directory"],
                    os.path.split(fixed)[1][0:-7] + '-' +
                    os.path.split(warped_list[i])[1][0:-7], 'jim_substraction')
            }
        }
        wf.run()

    outputs = create_status(config, msid, mseIDs)
    save_json(
        os.path.join(config["james_output_dir"], msid, 'substraction',
                     'status.json'), outputs)
    return None
Beispiel #20
0
def prepare_eddy_textfiles_fct(bval_file, acq_str, json_file=""):
    import os
    import numpy as np
    from nipype.utils.filemanip import load_json

    acq_file = os.path.abspath("acq.txt")
    index_file = os.path.abspath("index.txt")

    if "{TotalReadoutTime}" in acq_str:
        bids_json = load_json(json_file)
        acq_str = acq_str.format(TotalReadoutTime=bids_json["TotalReadoutTime"])

    with open(acq_file, "w") as fi:
        fi.write(acq_str)

    n_dirs = np.loadtxt(bval_file).shape[0]
    i = np.ones(n_dirs).astype(int)
    np.savetxt(index_file, i, fmt="%d", newline=' ')
    return acq_file, index_file
Beispiel #21
0
 def aggregate_outputs(self, runtime=None):
     outputs = self._outputs()
     outfile = os.path.join(os.getcwd(), 'stat_result.json')
     if runtime is None:
         out_stat = load_json(outfile)['stat']
     else:
         out_stat = []
         for line in runtime.stdout.split('\n'):
             if line:
                 values = line.split()
                 if len(values)>1:
                     out_stat.append([float(val) for val in values])
                 else:
                     out_stat.extend([float(val) for val in values])
         if len(out_stat)==1:
             out_stat = out_stat[0]
         save_json(outfile, dict(stat=out_stat))
     outputs.out_stat = out_stat
     return outputs
def convert_dicoms(sid, dicom_dir_template, outputdir, queue=None, heuristic_func=None,
                   extension = None,embed=False):

    import os
    from nipype.utils.filemanip import load_json,save_json

    sdir = dicom_dir_template%sid
    tdir = os.path.join(outputdir, sid)
    infofile =  os.path.join(tdir,'%s.auto.txt' % sid)
    editfile =  os.path.join(tdir,'%s.edit.txt' % sid)
    if os.path.exists(editfile) and heuristic_func:
        info = load_json(editfile)
    elif not heuristic_func:
        pass
    else:
        infofile =  os.path.join(tdir,'%s.auto.txt' % sid)
        info = heuristic_func(sdir, os.path.join(tdir,'dicominfo.txt'))
        save_json(infofile, info)

    if heuristic_func:
        for key in info:
            if not os.path.exists(os.path.join(tdir,key)):
                os.mkdir(os.path.join(tdir,key))
            for idx, ext in enumerate(info[key]):
                convertcmd = ['dcmstack', sdir,'--dest-dir', os.path.join(tdir,key),
                              '--file-ext', '*-%d-*'%ext, '--force-read', '-v', '--output-name', key+'%03d'%(idx+1)]
                if embed:
                    convertcmd.append('--embed-meta')
                convertcmd = ' '.join(convertcmd)
                print convertcmd
                os.system(convertcmd)
    else:
        convertcmd = ['dcmstack', sdir, '--dest-dir', os.path.join(outputdir,sid),
                      '--force-read','-v']
        if embed:
            convertcmd.append('--embed-meta')
        convertcmd = ' '.join(convertcmd)
        print convertcmd
        os.system(convertcmd)
    return 1
Beispiel #23
0
def art_output(art_file, intensity_file, stats_file):
    import numpy as np
    from nipype.utils.filemanip import load_json
    import os
    import matplotlib
    matplotlib.use('Agg')
    import matplotlib.pyplot as plt
    try:
        out = np.asarray(np.genfromtxt(art_file))
    except:
        out = np.asarray([])
    table = [["file", art_file], ["num outliers",
                                  str(out.shape)], ["timepoints",
                                                    str(out)]]
    stats = load_json(stats_file)
    for s in stats:
        for key, item in s.iteritems():
            if isinstance(item, dict):
                table.append(['+' + key, ''])
                for sub_key, sub_item in item.iteritems():
                    table.append(['  ' + sub_key, str(sub_item)])
            elif isinstance(item, list):
                table.append(['+' + key, ''])
                for s_item in item:
                    for sub_key, sub_item in s_item.iteritems():
                        table.append(['  ' + sub_key, str(sub_item)])
            else:
                table.append([key, str(item)])
    print table
    intensity = np.genfromtxt(intensity_file)
    intensity_plot = os.path.abspath('global_intensity.png')
    plt.figure(1, figsize=(8, 3))
    plt.xlabel('Volume')
    plt.ylabel("Global Intensity")
    plt.plot(intensity)
    plt.savefig(intensity_plot, bbox_inches='tight')
    plt.close()
    return table, out.tolist(), intensity_plot
Beispiel #24
0
 def aggregate_outputs(self, runtime=None, needed_outputs=None):
     outputs = self._outputs()
     # local caching for backward compatibility
     outfile = os.path.join(os.getcwd(), 'stat_result.json')
     if runtime is None:
         try:
             out_stat = load_json(outfile)['stat']
         except IOError:
             return self.run().outputs
     else:
         out_stat = []
         for line in runtime.stdout.split('\n'):
             if line:
                 values = line.split()
                 if len(values) > 1:
                     out_stat.append([float(val) for val in values])
                 else:
                     out_stat.extend([float(val) for val in values])
         if len(out_stat) == 1:
             out_stat = out_stat[0]
         save_json(outfile, dict(stat=out_stat))
     outputs.out_stat = out_stat
     return outputs
Beispiel #25
0
 def aggregate_outputs(self, runtime=None, needed_outputs=None):
     outputs = self._outputs()
     # local caching for backward compatibility
     outfile = os.path.join(os.getcwd(), 'stat_result.json')
     if runtime is None:
         try:
             out_stat = load_json(outfile)['stat']
         except IOError:
             return self.run().outputs
     else:
         out_stat = []
         for line in runtime.stdout.split('\n'):
             if line:
                 values = line.split()
                 if len(values) > 1:
                     out_stat.append([float(val) for val in values])
                 else:
                     out_stat.extend([float(val) for val in values])
         if len(out_stat) == 1:
             out_stat = out_stat[0]
         save_json(outfile, dict(stat=out_stat))
     outputs.out_stat = out_stat
     return outputs
Beispiel #26
0
def create_image(image, mask, output_file, size=1):
    mask_data = load_json(mask)
    image_data = plt.imread(image)

    mask_arr = np.zeros((image_data.shape[0], image_data.shape[1]))

    for ikey, vald in mask_data.items():
        for jkey, val in vald.items():
            mask_arr[jkey, ikey] = val

    mask_arr[mask_arr == 0] = np.nan

    fig = plt.figure(frameon=False)
    fig.set_size_inches(
        float(image_data.shape[1]) / image_data.shape[0] * size, 1 * size)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.set_axis_off()
    fig.add_axes(ax)

    ax.imshow(image_data)
    ax.imshow(mask_arr, cmap=plt.cm.autumn_r, alpha=0.5)

    plt.savefig(output_file)
    return output_file
Beispiel #27
0
def art_output(art_file,intensity_file,stats_file):
    import numpy as np
    from nipype.utils.filemanip import load_json
    import os
    import matplotlib
    matplotlib.use('Agg')
    import matplotlib.pyplot as plt
    try:
        out=np.asarray(np.genfromtxt(art_file))
    except:
        out=np.asarray([])
    table=[["file",art_file],["num outliers", str(out.shape)],["timepoints",str(out)]]
    stats = load_json(stats_file)
    for s in stats:
        for key, item in s.iteritems():
            if isinstance(item,dict):
                table.append(['+'+key,''])
                for sub_key,sub_item in item.iteritems():
                    table.append(['  '+sub_key,str(sub_item)])
            elif isinstance(item, list):
                table.append(['+'+key,''])
                for s_item in item:
                    for sub_key, sub_item in s_item.iteritems():
                        table.append(['  '+sub_key,str(sub_item)])
            else:
                table.append([key,str(item)])
    print table
    intensity = np.genfromtxt(intensity_file)
    intensity_plot = os.path.abspath('global_intensity.png')
    plt.figure(1,figsize = (8,3))
    plt.xlabel('Volume')
    plt.ylabel("Global Intensity")
    plt.plot(intensity)
    plt.savefig(intensity_plot,bbox_inches='tight')
    plt.close()
    return table, out.tolist(), intensity_plot
    pipl.inputs.inputspec.postb0 = postop_b0
    pipl.inputs.inputspec.postmask = postop_mask
    pipl.inputs.inputspec.postxfm = postop_xfm

    pipl.run()


from nipype.utils.filemanip import load_json

debug = True

diff_basepath = '/data/henry8/jordan/prepost/patients/edu_patients'
ptjson = '/home/kjordan/python_code/myscripts/prepost_pipeline/edu_patients.json'

pbr_basepath = '/data/henry8/jordan/prepost/diffpype'
mylist = load_json(ptjson)

step2_basepath = '/data/henry8/jordan/prepost/diffpype_step2'

workingdir = '/scratch/henry_temp/kesshi/wd_prepost_pipe/step2'
identity_xfm = '/home/kjordan/python_code/myscripts/prepost_pipeline/identity.aff'


i=0
UHOH=[]

if debug:
    mylist = mylist[0:1]


for pt in mylist:
Beispiel #29
0
    assert(args.base_exam != None)
    t1_files = get_t1(args.base_exam)
    if len(t1_files)>1 and (args.base_t1 == None):
        raise Exception("need to specify base t1 since we found >1 t1 file")

    if args.base_t1:
        main_t1 = [t for t in t1_files if args.base_t1 in t][0]
    else:
        main_t1 = t1_files[0]

    print("located a base t1", main_t1)
    msid = get_msid(main_t1)
    print("msid is", msid)
    msid_status = join(bd, msid, "align", "status.json")
    assert(exists(msid_status))
    msid_status = load_json(msid_status)
    base_mse = msid_status["mse_order"][0]

    if args.target_exam == None:
        args.target_exam = base_mse

    target_t1 = get_t1(args.target_exam)[0]
    target_msid = get_msid(target_t1)
    assert(target_msid == msid)

    #find baseline mse from align_long workflow
    if base_mse == args.target_exam:
        print("mapping to baseline")
        affines = msid_status["affines"]
        aff = [a for a in affines if get_name(main_t1) in a]
        assert(len(aff)==1)
Beispiel #30
0
def convert_dicoms(sid, dicom_dir_template, outputdir, queue=None, heuristic_func=None,
                   extension = None,embed=False,no_moco=False):

    import os
    from nipype.utils.filemanip import load_json,save_json
    from glob import glob
    sdir = dicom_dir_template%sid
    tdir = os.path.join(outputdir, sid)
    infofile =  os.path.join(tdir,'%s.auto.txt' % sid)
    editfile =  os.path.join(tdir,'%s.edit.txt' % sid)
    if os.path.exists(editfile) and heuristic_func:
        info = load_json(editfile)
    elif not heuristic_func:
        pass
    else:
        infofile =  os.path.join(tdir,'%s.auto.txt' % sid)
        info = heuristic_func(sdir, os.path.join(tdir,'dicominfo.txt'))
        save_json(infofile, info)

    if heuristic_func:
        import dicom
        import dcmstack
        for key in info:
            if not os.path.exists(os.path.join(tdir,key)):
                os.mkdir(os.path.join(tdir,key))
            for idx, ext in enumerate(info[key]):
                src = glob(os.path.join(sdir,'*-%d-*'%ext))
                print key
                dcm = dcmstack.DicomStack()
                added_success = True
                for f in src:
                    try:
                        dcm.add_dcm(dicom.read_file(f,force=True))
                        
                    except:
                        added_success = False
                        print "error adding %s to stack"%f
                if added_success:
                    a = dcm.to_nifti(embed_meta = embed)
                    a.to_filename(os.path.join(tdir,key,key+'%03d'%(idx+1)))
                #convertcmd = ['dcmstack', sdir,'--dest-dir', os.path.join(tdir,key),
                #              '--file-ext', '*-%d-*'%ext, '--force-read', '-v', '--output-name', key+'%03d'%(idx+1)]
                #if embed:
                #    convertcmd.append('--embed-meta')
                #convertcmd = ' '.join(convertcmd)
                #print convertcmd
                #os.system(convertcmd)
    else:
        if not no_moco:
            convertcmd = ['dcmstack', sdir, '--dest-dir', os.path.join(outputdir,sid),
                          '--force-read','-v']
            if embed:
                convertcmd.append('--embed-meta')
            convertcmd = ' '.join(convertcmd)
            print convertcmd
            os.system(convertcmd)
        else:
            import numpy as np
            def isMoco(dcmfile):
                """Determine if a dicom file is a mocoseries
                """
                import subprocess
                print dcmfile
                cmd = ['mri_probedicom', '--i', dcmfile, '--t', '8', '103e']
                proc  = subprocess.Popen(cmd,
                                         stdout=subprocess.PIPE,
                                         stderr=subprocess.PIPE)
                stdout, stderr = proc.communicate()
                return stdout.strip().startswith('MoCoSeries')

            foo = np.genfromtxt(os.path.join(tdir,'dicominfo.txt'),dtype=str)
            for f in foo:
                if not isMoco(glob(os.path.join(sdir,f[1]))[0]):
                    convertcmd = ['dcmstack', sdir, '--dest-dir', os.path.join(outputdir,sid),
                          '--force-read','-v','--file-ext','*-%s-*'%f[2]]
                    if embed:
                        convertcmd.append('--embed-meta')
                    convertcmd = ' '.join(convertcmd)
                    print convertcmd
                    os.system(convertcmd)
                else:
                    print "skipping moco run %s"%f[1]
    return 1
#!/usr/bin/env python
import numpy as np
import pbr
import os
from nipype.utils.filemanip import load_json
from pbr.workflows.nifti_conversion.utils import description_renamer
heuristic = load_json(os.path.join(os.path.split(pbr.__file__)[0], "heuristic.json"))["filetype_mapper"]
import pandas as pd
from subprocess import Popen, PIPE
import pandas as pd
import argparse
from os.path import join

get_numerical_msid = lambda x: str(int(x[-4:]))

def get_diff(x):
    get_year = lambda x: int(x[:4])
    x["year"] = x.date.map(get_year)
    return x.year.max() - x.year.min()
    
def get_cohort(counts, min_number_tp, min_time_difference):
    cohort = counts.loc[counts.times[counts.times>=min_time_difference].index][counts.mse>=min_number_tp]
    return cohort

def get_cohort_counts(counts, min_number_tp, min_time_difference):
    cohort = counts.loc[counts.times[counts.times>=min_time_difference].index][counts.mse>=min_number_tp]
    return {"exams": cohort.sum().mse, "subjects": cohort.count().mse, 
            "min_num_tp": min_number_tp, "min_time_diff": min_time_difference}

def get_exams(df_final, msids):
    out_idx = np.in1d(df_final.msid.values, msids)
    if mc_folder != "mindcontrol":
        raise Exception("This is not a mindcontrol folder")
    if roi_folder != "rois":
        raise Exception("this is not a mindcontrol folder")

    entry_finder = {"subject_id": subid, "entry_type": wf_name, "name": seq_name}
    print("entry finder is", entry_finder)
    return entry_finder


edit_type_dict = {"dura": run_dura_edit}

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--in_file', dest="in_file")
    parser.add_argument("-z", "--edit_type", dest="edit_type")
    parser.add_argument('-e', '--env', dest="env")
    config = load_json(os.path.join(os.path.split(__file__)[0], "config.json"))

    args = parser.parse_args()

    if args.env in ["development", "production"]:
        env = args.env
        meteor_port = config[env]["meteor_port"]
        if args.edit_type in edit_type_dict.keys():
            entry_finder = get_info_from_path(args.in_file)
            edit_type_dict[args.edit_type](entry_finder, args.in_file, meteor_port)
        else:
            raise Exception("edit type must be in", " ".join(edit_type_dict.keys()))

import argparse
import pbr
import os
from glob import glob
from pbr.base import _get_output
from subprocess import Popen, PIPE
from getpass import getpass
import json
from nipype.utils.filemanip import load_json
heuristic = load_json(os.path.join(os.path.split(pbr.__file__)[0], "heuristic.json"))["filetype_mapper"]
from pbr.workflows.nifti_conversion.utils import description_renamer
import pandas as pd
import shutil

password = getpass("mspacman password: "******"mse")[-1]
    cmd = ["ms_dcm_exam_info", "-t", num]
    proc = Popen(cmd, stdout=PIPE)
    lines = [description_renamer(" ".join(l.decode("utf-8").split()[1:-1])) for l in proc.stdout.readlines()[8:]]
    sequence_name = ""
    nii_type = sequence
    if nii_type:
        try:
            sequence_name = filter_files(lines, nii_type, heuristic)[0]
            print(sequence_name, "This is the {0}".format(sequence))
        except:
            pass
        return sequence_name
def upload_function():
   if request.method == 'POST':
      upload_putpath = 'uploaded_files/'
      if not os.path.exists(upload_putpath):
          os.makedirs(upload_putpath)
      json_path = 'static/myuploads.json'

      if os.path.exists(json_path):
          myuploads = load_json(json_path)
      else:
          myuploads = []

      f_image = request.files['image_file']
      f_mask = request.files['mask_file']
      slice_direction = request.form['slice_direction']
      task_type = request.form['task_type']
      min_Nvox = request.form['min_Nvox']
      ptid = request.form['patient_id']

      fname_image = os.path.basename(secure_filename(f_image.filename))
      fname_mask = os.path.basename(secure_filename(f_mask.filename))

      #Make the json entry
      myuploads.append({'patient_id':ptid,'mask_filename':secure_filename(f_mask.filename), \
      'image_filename':secure_filename(f_image.filename), \
      'Nvox_threshold':min_Nvox, 'task_type':task_type, \
      'slice_direction':slice_direction})

      #Save images in upload directory
      image_savepath = upload_putpath+ptid+'_image.nii.gz'
      mask_savepath = upload_putpath+ptid+'_mask.nii.gz'
      f_image.save(image_savepath)
      f_mask.save(mask_savepath)

      save_json_pretty(json_path, myuploads)

      #create tiles from the nifti image and save in tile directory
      create_tiles(image_savepath, mask_savepath, slice_direction,
                   os.path.join('tile_files', ptid, slice_direction),
                   int(min_Nvox), 1, False, None)

      tilepath = os.path.join('tile_files', ptid, slice_direction)
      tilelist = os.listdir(tilepath)
      tilelist = [x for x in tilelist if '.png' in x]
      tilepathlist = [os.path.join(tilepath, j) for j in tilelist]

      sliceNlist = [int(b.split('_')[-1].split('.')[0]) for b in tilelist]
      sliceNlist.sort()
      sorted_tlist = [('%s_%s.png' % (slice_direction, str(n))) for n in sliceNlist]
      sorted_tplist = [os.path.join(tilepath, j) for j in sorted_tlist]


      print(tilepathlist)

      app.add_url_rule('/'+tilepath, endpoint='css', view_func=app.send_static_file)


      mylist = []
      for item in sorted_tplist:
          with open(item, 'rb') as image_file:
              encoded_string = base64.b64encode(image_file.read()).decode('utf-8')
          mylist.append(encoded_string)


      if len(fname_image) > 0 and len(fname_mask) >0:
          return render_template("image.html", **{"example": mylist})
      else:
          return "UHOH: please upload a valid file"
Beispiel #35
0
def group_things(list_of_jsons):
    """Fields to save in output csv
- Subject id
- Num Outliers
- Mincost
- All tsnr values (0 for missing values)
"""
    import numpy as np
    from nipype.utils.filemanip import load_json
    from bips.workflows.gablab.wips.fmri.preprocessing.group_preproc_QA import extract_snr, extract_art
    #from bips.workflows.group_preproc_QA import extract_art

    snr_names = []
    snr_dict = {}

    for tmp in list_of_jsons:
        a = load_json(tmp)
        names = [b[0] for b in a['SNR_table'][0][1:]]
        snr_names += names
        snr_names = np.unique(snr_names).tolist()

    for name in snr_names:
        snr_dict[name] = []

    mincost = []
    common_outliers = []
    total_outliers = []
    intensity_outliers = []
    motion_outliers = []
    subject_id = []

    all_fields = [
        'subject_id', 'total_outliers', 'mincost', 'motion_outliers',
        'intensity_outliers', 'common_outliers'
    ] + snr_names
    dtype = [('subject_id', '|S20')] + [(str(n), 'f4') for n in all_fields[1:]]
    arr = np.zeros(len(list_of_jsons), dtype=dtype)

    for fi in list_of_jsons:
        f = load_json(fi)
        subject_id.append(f['subject_id'])
        mot, inten, com, out = extract_art(f['art'])
        motion_outliers.append(mot)
        intensity_outliers.append(inten)
        common_outliers.append(com)
        total_outliers.append(out)
        mincost.append(f['mincost'][0])
        for n in snr_names:
            t = extract_snr(f['SNR_table'], n)
            snr_dict[n].append(t)

    arr['subject_id'] = subject_id
    arr['total_outliers'] = total_outliers
    arr['mincost'] = mincost
    arr['motion_outliers'] = motion_outliers
    arr['intensity_outliers'] = intensity_outliers
    arr['common_outliers'] = common_outliers

    for key, item in snr_dict.iteritems():
        arr[key] = item

    import os
    from matplotlib.mlab import rec2csv
    outfile = os.path.abspath('grouped_metrics.csv')
    rec2csv(arr, outfile)
    return outfile
    t1_check['more_than_one'] = {}
    t1_check['no_alignment'] = {}
    no_alignment = []

    for ms in msid_sorted:
        t1_check['missing'][ms] = []
        t1_check['more_than_one'][ms] = []
        t1_check['no_alignment'][ms] = []

    for index, row in df.iterrows():
        print(index, row['msid'], row['mse'])

        status_dir = os.path.join(config["output_directory"], row['mse'],
                                  'alignment', 'status.json')
        try:
            status = load_json(status_dir)
        except:
            print("No alignment status.json file, please check that directory")
            t1_check['no_alignment'][row['msid']].append(row['mse'])
            no_alignment.append(row['msid'])

        if len(status['t1_files']) == 0:
            t1_check['missing'][row['msid']].append(row['mse'])
        elif len(status['t1_files']) > 1:
            t1_check['more_than_one'][row['msid']].append(row['mse'])

    for ms in msid_sorted:
        if t1_check['missing'].get(ms) == []:
            t1_check['missing'].pop(ms, None)
        if t1_check['more_than_one'].get(ms) == []:
            t1_check['more_than_one'].pop(ms, None)
Beispiel #37
0
def get_t1(mse):
    status = join(bd, mse, "nii", "status.json")
    assert(exists(status))
    info = load_json(status)
    return info["t1_files"]
def get_mseid(msid, mse_reversed, lesion_mse):
    from subprocess import call
    lesion_idx = mse_reversed.index(lesion_mse)
    print("lesion_idx is:", lesion_idx)
    mse_list1 = mse_reversed[lesion_idx:]
    print("mse_list1 is:", mse_list1)
    mse_list2_bc = mse_reversed[:lesion_idx]
    mse_list2_bc.reverse()
    mse_list2 = mse_list2_bc
    print("mse_list2 is:", mse_list2)
    mse_tp1 = ''
    mse_tp2 = ''
    # Exclude the lesion_mse

    check_tlc = glob(
        os.path.join(cc["output_directory"], lesion_mse, 'tlc', 'status.json'))
    if len(check_tlc) == 0:
        print("No T1 lesions found in tlc folder, running pbr first...")
        cmd = ['pbr', lesion_mse, '-w', 'tlc', '-R']
        call(cmd)

    print("searching for lesion_reg folder for registered lesions.")
    for mse_idx, mse in enumerate(mse_list1):
        if mse == lesion_mse:
            check_t1_lesion = check_tlc
        else:
            t1_status = load_json(
                os.path.join(cc["output_directory"], mse, 'alignment',
                             'status.json'))
            t1_files = t1_status["t1_files"]
            assert len(t1_files) >= 1
            t1_file = max(t1_files)
            t1_name = split_filename(t1_file)[1]
            check_t1_lesion = glob(
                os.path.join(cc["output_directory"], mse, 'mindcontrol',
                             t1_name, 'transform', 'lst_edit',
                             'no_FP_filled_FN_dr2*'))
        if mse != lesion_mse:
            if len(check_t1_lesion) == 0:
                mse_tp1 = mse_list1[mse_idx - 1]
                mse_tp2 = mse_list1[mse_idx]
                break
    if mse_tp1 is '' and mse_tp2 is '' and len(mse_list2) != 0:
        for mse_idx, mse in enumerate(mse_list2):
            t1_status = load_json(
                os.path.join(cc["output_directory"], mse, 'alignment',
                             'status.json'))
            t1_files = t1_status["t1_files"]
            assert len(t1_files) >= 1
            t1_file = max(t1_files)
            t1_name = split_filename(t1_file)[1]
            check_t1_lesion = glob(
                os.path.join(cc["output_directory"], mse, 'mindcontrol',
                             t1_name, 'transform', 'lst_edit',
                             'no_FP_filled_FN_dr2*'))
            if mse != lesion_mse:
                if len(check_t1_lesion) == 0:
                    mse_tp1 = mse_list2[mse_idx - 1]
                    mse_tp2 = mse_list2[mse_idx]
                    break
    if mse_tp1 is '' and mse_tp2 is '':
        print("Congratulations! You finished this subject: ", msid)
        # To add this info to a csv file
    return mse_tp1, mse_tp2
__author__ = 'akeshavan'
from jinja2 import Environment, FileSystemLoader
from nipype.utils.filemanip import load_json
import os

files_to_generate = [{"filename": "module_tables.js.tmpl", "location":"../api/"},
                     {"filename": "module_templates.js.tmpl", "location":"../ui/"},
                     {"filename": "module_templates.html.tmpl", "location": "../ui/"}]

env = Environment(loader=FileSystemLoader('./'))
info = load_json("generator.json")

for f in files_to_generate:
    template = env.get_template(f["filename"])
    outfile = os.path.join(f["location"], f["filename"].replace(".tmpl",""))
    print("writing", outfile)
    with open(outfile, "w") as q:
        q.write(template.render(**info))
#print(template.render(**info))

"""
template = env.get_template("module_templates.js.tmpl")
#print(template.render(**info))
template = env.get_template("module_templates.html.tmpl")
print(template.render(**info))
"""
Beispiel #40
0
def run_workflow(bids_dir):
    subjects_dir = os.path.join(bids_dir, "derivatives", "freesurfer")
    mindcontrol_base_dir = os.path.join(bids_dir, "derivatives", "mindcontrol_freesurfer")
    mindcontrol_outdir = mindcontrol_base_dir
    workflow_working_dir = os.path.join(mindcontrol_base_dir, "scratch"+"_"+str(uuid.uuid4()))

    subject_paths = glob(op.join(subjects_dir, "*"))

    subjects = []
    for path in subject_paths:
        subject = path.split('/')[-1]
        # check if mri dir exists, and don't add fsaverage
        if op.exists(op.join(path, 'mri')) and subject != 'fsaverage':
            subjects.append(subject)


    input_node = Node(IdentityInterface(fields=['subject_id',"subjects_dir",
                                            "mindcontrol_base_dir", "output_dir"]), name='inputnode')
    input_node.iterables=("subject_id", subjects)
    input_node.inputs.subjects_dir = subjects_dir
    input_node.inputs.mindcontrol_base_dir = mindcontrol_base_dir #this is where start_static_server is running
    input_node.inputs.output_dir = mindcontrol_outdir #this is in the freesurfer/ directory under the base_dir

    dg_node=Node(Function(input_names=["subjects_dir", "subject", "volumes"],
                          output_names=["volume_paths"], 
                          function=data_grabber), 
                 name="datagrab")
    #dg_node.inputs.subjects_dir = subjects_dir
    dg_node.inputs.volumes = volumes


    mriconvert_node = MapNode(MRIConvert(out_type="niigz"), 
                              iterfield=["in_file"], 
                              name='convert')

    get_stats_node = Node(Function(input_names=["subjects_dir", "subject"],
                                   output_names = ["output_dict"],
                                   function=parse_stats), name="get_freesurfer_stats")

    write_mindcontrol_entries = Node(Function(input_names = ["mindcontrol_base_dir",
                                                             "output_dir",
                                                             "subject",
                                                             "stats"],
                                              output_names=["output_json"],
                                              function=create_mindcontrol_entries), 
                                     name="get_mindcontrol_entries")

    datasink_node = Node(DataSink(),
                         name='datasink')
    subst = [('out_file',''),('_subject_id_',''),('_out','')]  + [("_convert%d" % index, "") for index in range(len(volumes))] 
    datasink_node.inputs.substitutions = subst

    wf = Workflow(name="MindPrepFS")
    wf.base_dir = workflow_working_dir
    wf.connect(input_node,"subject_id", dg_node,"subject")
    wf.connect(input_node,"subjects_dir", dg_node, "subjects_dir")
    wf.connect(input_node, "subject_id", get_stats_node, "subject")
    wf.connect(input_node, "subjects_dir", get_stats_node, "subjects_dir")
    wf.connect(input_node, "subject_id", write_mindcontrol_entries, "subject")
    wf.connect(input_node, "mindcontrol_base_dir", write_mindcontrol_entries, "mindcontrol_base_dir")
    wf.connect(input_node, "output_dir", write_mindcontrol_entries, "output_dir")
    wf.connect(get_stats_node, "output_dict", write_mindcontrol_entries, "stats")
    wf.connect(input_node, "output_dir", datasink_node, "base_directory")
    wf.connect(dg_node,"volume_paths", mriconvert_node, "in_file")
    wf.connect(mriconvert_node,'out_file',datasink_node,'out_file')
    wf.connect(write_mindcontrol_entries, "output_json", datasink_node, "out_file.@json")
    wf.run()

    shutil.rmtree(workflow_working_dir)
    from nipype.utils.filemanip import load_json, save_json

    files = glob(os.path.join(mindcontrol_base_dir, "*", "mindcontrol_entries.json"))
    output = []
    for f in files:
        output += load_json(f)
    save_json(os.path.join(mindcontrol_base_dir, "all_entries.json"), output)
import argparse


def get_collection(port=3001):
    from pymongo import MongoClient
    client = MongoClient("localhost", port)
    db = client.meteor
    collection = db.subjects
    return collection, client


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-e', '--env', dest="env")
    parser.add_argument("-s", nargs="+", dest="subjects")
    config = load_json(os.path.join(os.path.split(__file__)[0], "config.json"))
    parser.add_argument("-i", "--include", dest="include", nargs="+")
    parser.add_argument("-x", "--exclude", dest="exclude", nargs="+")
    args = parser.parse_args()
    #print(args)
    if args.exclude == None:
        args.exclude = []
    if args.include == None:
        args.include = []
    if args.subjects == None:
        args.subjects = []

    if args.env in ["development", "production"]:
        env = args.env
        if len(args.subjects) > 0:
            if args.subjects[0].endswith(".txt"):
def group_things(list_of_jsons):
    """Fields to save in output csv
- Subject id
- Num Outliers
- Mincost
- All tsnr values (0 for missing values)
"""
    import numpy as np
    from nipype.utils.filemanip import load_json
    from bips.workflows.gablab.wips.fmri.preprocessing.group_preproc_QA import extract_snr, extract_art
    #from bips.workflows.group_preproc_QA import extract_art

    snr_names = []
    snr_dict = {}

    for tmp in list_of_jsons:
        a = load_json(tmp)
        names = [ b[0] for b in a['SNR_table'][0][1:]]
        snr_names += names
        snr_names = np.unique(snr_names).tolist()
 
    for name in snr_names:
        snr_dict[name] = []

    mincost = []
    common_outliers = []
    total_outliers = []
    intensity_outliers = []
    motion_outliers = []
    subject_id = []

    all_fields = ['subject_id','total_outliers','mincost','motion_outliers','intensity_outliers','common_outliers']+snr_names
    dtype=[('subject_id','|S20')]+[(str(n),'f4') for n in all_fields[1:]]
    arr = np.zeros(len(list_of_jsons), dtype=dtype)
    
    for fi in list_of_jsons:
        f = load_json(fi)
        subject_id.append(f['subject_id'])
        mot,inten,com,out = extract_art(f['art'])
        motion_outliers.append(mot)
        intensity_outliers.append(inten)
        common_outliers.append(com)
        total_outliers.append(out)
        mincost.append(f['mincost'][0])
        for n in snr_names:
            t = extract_snr(f['SNR_table'],n)
            snr_dict[n].append(t)

    arr['subject_id'] = subject_id
    arr['total_outliers'] = total_outliers
    arr['mincost'] = mincost
    arr['motion_outliers'] = motion_outliers
    arr['intensity_outliers'] = intensity_outliers
    arr['common_outliers'] = common_outliers

    for key,item in snr_dict.iteritems():
        arr[key] = item
       
    import os
    from matplotlib.mlab import rec2csv
    outfile = os.path.abspath('grouped_metrics.csv')
    rec2csv(arr,outfile)
    return outfile
def convert_dicoms(sid,
                   dicom_dir_template,
                   outputdir,
                   queue=None,
                   heuristic_func=None,
                   extension=None,
                   embed=False,
                   no_moco=False):

    import os
    from nipype.utils.filemanip import load_json, save_json

    sdir = dicom_dir_template % sid
    tdir = os.path.join(outputdir, sid)
    infofile = os.path.join(tdir, '%s.auto.txt' % sid)
    editfile = os.path.join(tdir, '%s.edit.txt' % sid)
    if os.path.exists(editfile) and heuristic_func:
        info = load_json(editfile)
    elif not heuristic_func:
        pass
    else:
        infofile = os.path.join(tdir, '%s.auto.txt' % sid)
        info = heuristic_func(sdir, os.path.join(tdir, 'dicominfo.txt'))
        save_json(infofile, info)

    if heuristic_func:
        for key in info:
            if not os.path.exists(os.path.join(tdir, key)):
                os.mkdir(os.path.join(tdir, key))
            for idx, ext in enumerate(info[key]):
                convertcmd = [
                    'dcmstack', sdir, '--dest-dir',
                    os.path.join(tdir, key), '--file-ext',
                    '*-%d-*' % ext, '--force-read', '-v', '--output-name',
                    key + '%03d' % (idx + 1)
                ]
                if embed:
                    convertcmd.append('--embed-meta')
                convertcmd = ' '.join(convertcmd)
                print convertcmd
                os.system(convertcmd)
    else:
        if not no_moco:
            convertcmd = [
                'dcmstack', sdir, '--dest-dir',
                os.path.join(outputdir, sid), '--force-read', '-v'
            ]
            if embed:
                convertcmd.append('--embed-meta')
            convertcmd = ' '.join(convertcmd)
            print convertcmd
            os.system(convertcmd)
        else:
            import numpy as np
            from bips.workflows.workflow19 import isMoco
            foo = np.genfromtxt(os.path.join(tdir, 'dicominfo.txt'), dtype=str)
            for f in foo:
                if not isMoco(os.path.join(sdir, f[1])):
                    convertcmd = [
                        'dcmstack', sdir, '--dest-dir',
                        os.path.join(outputdir, sid), '--force-read', '-v',
                        '--file-ext',
                        '*-%s-*' % f[2]
                    ]
                    if embed:
                        convertcmd.append('--embed-meta')
                    convertcmd = ' '.join(convertcmd)
                    print convertcmd
                    os.system(convertcmd)
    return 1
Beispiel #44
0
    z_res = float(res_lines[2][1])
    #print(x_res, y_res, z_res)

    z_new = z  # The slice don't change

    x_new = (x - x_pix_fov / 2) * x_res + 0.5
    y_new = (y_pix_fov / 2 - y) * y_res - 0.5
    # for [70, 152, 75] now x = -18, y = -32
    # But it's actually around x = -17.5, y = -33.5 or -32.5

    return [x_new, y_new, z_new]


if __name__ == '__main__':
    working_dir = '/data/henry7/james/Jim6'
    centroids = load_json(os.path.join(
        working_dir, 'mse5466_centroids.json'))['reference']['present']
    new_coord = []
    with open(os.path.join(working_dir, 'mse5466_write.roi'), 'w') as f:
        for key, value in centroids.items():
            print(key, value)
            new_xyz = convert_to_coordinate(value)
            new_coord.append(new_xyz)
            f.writelines([
                "Begin Marker ROI\n", "  Slice={}\n".format(new_xyz[2]),
                "  Begin Shape\n",
                "    X={0}; Y={1}\n".format(new_xyz[0], new_xyz[1]),
                "  End Shape\n", "End Marker ROI\n"
            ])

    print(new_coord)
Beispiel #45
0
import os
from glob import glob
from create_apm_warp2t1 import nonlinear_reg_diff2t2_workflow
from subprocess import call
import nipype.pipeline.engine as pe
from nipype.utils.filemanip import load_json

subfile = '/home/kjordan/python_code/mydata_4myscripts/darpa/ptlist_darpa.txt'
datadir = '/data/henry7/PBR/subjects'
workingdir = '/scratch/henry_temp/kesshi/CHANGLAB/new_working_dir4'

ptlist = load_json(subfile)
metaworkflow = pe.Workflow("mwf", base_dir=workingdir)

for pt in ptlist:
    ptdir = os.path.join(datadir, pt)
    ptdiffdir = glob(ptdir+'/dti/ec*')[0]
    ptworkingdir = os.path.join(workingdir, pt)
    print ptdiffdir
    sub_bvec = glob(ptdiffdir+'/*_rotated.bvec')[0]
    print sub_bvec
    sub_ec_diff = glob(ptdiffdir+'/*_corrected.nii.gz')[0]
    print sub_ec_diff
    subT1 = glob(ptdir+'/nii/*MPRAGE.nii.gz')[0]
    print subT1
    subfa = glob(ptdiffdir+'/*_corrected_fa.nii.gz')[0]
    subt1mask = glob(ptdir+'/masks/ec*/brain_mask.nii.gz')[0]
    subdiffmask = glob(ptdiffdir+'/brain_mask_warped_thresh.nii.gz')[0]
    bettedfa = os.path.join(ptdiffdir, 'fa_masked.nii.gz')
    bettedt1 = glob(ptdir+'/masks/ec*/t1_masked.nii.gz')[0]
    call(['fslmaths', subfa, '-mul', subdiffmask, bettedfa])