def test_conversion(): #Load test data ref = DATA_DIR + "uncinate" anat_filename = DATA_DIR + "anat.nii.gz" #Test every possible conversions for k1, ref_format in FORMATS.items(): for k2, out_format in FORMATS.items(): print "Testing {0}2{1}".format(k1, k2) f, out = tempfile.mkstemp('_{0}2{1}'.format(k1, k2)) os.remove(out) ref_filename = ref + "." + k1 out_filename = out + "." + k2 input = ref_format(ref_filename, anat_filename) output = out_format.create(out_filename, input.hdr, anat_filename) tractconverter.convert(input, output) f, bak = tempfile.mkstemp('_{0}2{1}'.format(k2, k1)) bak_filename = bak + "." + k1 output = out_format(out_filename, anat_filename) backup = ref_format.create(bak_filename, input.hdr, anat_filename) tractconverter.convert(output, backup) input = ref_format(ref_filename, anat_filename) output = out_format(out_filename, anat_filename) compare_data(input, output) os.remove(out_filename) input = ref_format(ref_filename, anat_filename) backup = ref_format(bak_filename, anat_filename) compare_data(input, backup) os.remove(bak_filename)
def main(): parser = buildArgsParser() args = parser.parse_args() in_filename = args.input out_filename = args.output anat_filename = args.anat isForcing = args.isForce isVerbose = args.isVerbose if isVerbose: logging.basicConfig(level=logging.DEBUG) if not os.path.isfile(in_filename): parser.error('"{0}" must be an existing file!'.format(in_filename)) if not tractconverter.is_supported(in_filename): parser.error('Input file must be one of {0}!'.format(",".join( FORMATS.keys()))) if not tractconverter.is_supported(out_filename): parser.error('Output file must be one of {0}!'.format(",".join( FORMATS.keys()))) if os.path.isfile(out_filename): if isForcing: if out_filename == in_filename: parser.error( 'Cannot use the same name for input and output files. Conversion would fail.' ) else: logging.info('Overwriting "{0}".'.format(out_filename)) else: parser.error('"{0}" already exist! Use -f to overwrite it.'.format( out_filename)) inFormat = tractconverter.detect_format(in_filename) outFormat = tractconverter.detect_format(out_filename) #if inFormat == outFormat: # parser.error('Input and output must be from different types!'.format(",".join(FORMATS.keys()))) if anat_filename is not None: if not any(map(anat_filename.endswith, EXT_ANAT.split('|'))): if isForcing: logging.info('Reading "{0}" as a {1} file.'.format( anat_filename.split("/")[-1], EXT_ANAT)) else: parser.error( 'Anatomy file must be one of {1}!'.format(EXT_ANAT)) if not os.path.isfile(anat_filename): parser.error( '"{0}" must be an existing file!'.format(anat_filename)) #Convert input to output input = inFormat(in_filename, anat_filename) output = outFormat.create(out_filename, input.hdr, anat_filename) tractconverter.convert(input, output)
def test_convert_ascii_file(): rng = np.random.RandomState(42) # Create 10 fake streamlines streamlines = [] nb_points_per_streamline = rng.randint(5,20,(10)) for nb_points in nb_points_per_streamline: streamlines.append(rng.rand(nb_points, 3) * 10) # Create VTK ascii file f, out = tempfile.mkstemp() os.remove(out) with open(out + '_ascii.vtk', 'wb') as f: f.write("# vtk DataFile Version 3.0\n") f.write("Automatically generated ASCII vtk file\n") f.write("ASCII\n") f.write("DATASET POLYDATA\n") f.write("\n") total_nb_points = sum(map(len, streamlines)) f.write("POINTS {0} {1}\n".format(total_nb_points, "float")) for s in streamlines: for p in s: f.write("{0} {1} {2}\n".format(*p)) f.write("\n") nb_lines = len(streamlines) size = total_nb_points + nb_lines f.write("LINES {0} {1}\n".format(nb_lines, size)) cpt_points = 0 for s in streamlines: nb_points = len(s) line_points = " ".join(map(str, range(cpt_points, cpt_points + nb_points))) f.write("{0} {1}\n".format(nb_points, line_points)) cpt_points += nb_points # Test conversion ascii to binary format = tractconverter.FORMATS['vtk'] ref_filename = out + '_ascii.vtk' out_filename = out + '_binary.vtk' input = format(ref_filename, None) output = format.create(out_filename, input.hdr, None) tractconverter.convert(input, output) input = format(ref_filename, None) output = format(out_filename, None) compare_data(input, output) input = format(ref_filename, None) compare_data(input, streamlines) output = format(out_filename, None) compare_data(output, streamlines) os.remove(ref_filename) os.remove(out_filename)
def label_streamlines(streamlines, labels, labels_Value, affine, hdr, f_name, data_path): cc_slice = labels == labels_Value cc_streamlines = utils.target(streamlines, labels, affine=affine) cc_streamlines = list(cc_streamlines) other_streamlines = utils.target(streamlines, cc_slice, affine=affine, include=False) other_streamlines = list(other_streamlines) assert len(other_streamlines) + len(cc_streamlines) == len(streamlines) print("num of roi steamlines is %d", len(cc_streamlines)) # Make display objects color = line_colors(cc_streamlines) cc_streamlines_actor = fvtk.line(cc_streamlines, line_colors(cc_streamlines)) cc_ROI_actor = fvtk.contour(cc_slice, levels=[1], colors=[(1., 1., 0.)], opacities=[1.]) # Add display objects to canvas r = fvtk.ren() fvtk.add(r, cc_streamlines_actor) fvtk.add(r, cc_ROI_actor) # Save figures fvtk.record(r, n_frames=1, out_path=f_name + '_roi.png', size=(800, 800)) fvtk.camera(r, [-1, 0, 0], [0, 0, 0], viewup=[0, 0, 1]) fvtk.record(r, n_frames=1, out_path=f_name + '_roi.png', size=(800, 800)) """""" csd_streamlines_trk = ((sl, None, None) for sl in cc_streamlines) csd_sl_fname = f_name + '_roi_streamline.trk' nib.trackvis.write(csd_sl_fname, csd_streamlines_trk, hdr, points_space='voxel') #nib.save(nib.Nifti1Image(FA, img.get_affine()), 'FA_map2.nii.gz') print('Saving "_roi_streamline.trk" sucessful.') import tractconverter as tc input_format = tc.detect_format(csd_sl_fname) input = input_format(csd_sl_fname) output = tc.FORMATS['vtk'].create(csd_sl_fname + ".vtk", input.hdr) tc.convert(input, output) return cc_streamlines
def main(): parser = buildArgsParser() args = parser.parse_args() in_filename = args.input out_filename = args.output anat_filename = args.anat isForcing = args.isForce isVerbose = args.isVerbose if isVerbose: logging.basicConfig(level=logging.DEBUG) if not os.path.isfile(in_filename): parser.error('"{0}" must be an existing file!'.format(in_filename)) if not tractconverter.is_supported(in_filename): parser.error("Input file must be one of {0}!".format(",".join(FORMATS.keys()))) if not tractconverter.is_supported(out_filename): parser.error("Output file must be one of {0}!".format(",".join(FORMATS.keys()))) if os.path.isfile(out_filename): if isForcing: if out_filename == in_filename: parser.error("Cannot use the same name for input and output files. Conversion would fail.") else: logging.info('Overwriting "{0}".'.format(out_filename)) else: parser.error('"{0}" already exist! Use -f to overwrite it.'.format(out_filename)) inFormat = tractconverter.detect_format(in_filename) outFormat = tractconverter.detect_format(out_filename) # if inFormat == outFormat: # parser.error('Input and output must be from different types!'.format(",".join(FORMATS.keys()))) if anat_filename is not None: if not any(map(anat_filename.endswith, EXT_ANAT.split("|"))): if isForcing: logging.info('Reading "{0}" as a {1} file.'.format(anat_filename.split("/")[-1], EXT_ANAT)) else: parser.error("Anatomy file must be one of {1}!".format(EXT_ANAT)) if not os.path.isfile(anat_filename): parser.error('"{0}" must be an existing file!'.format(anat_filename)) # Convert input to output input = inFormat(in_filename, anat_filename) output = outFormat.create(out_filename, input.hdr, anat_filename) tractconverter.convert(input, output)
def label_streamlines(streamlines,labels,labels_Value,affine,hdr,f_name,data_path): cc_slice=labels==labels_Value cc_streamlines = utils.target(streamlines, labels, affine=affine) cc_streamlines = list(cc_streamlines) other_streamlines = utils.target(streamlines, cc_slice, affine=affine, include=False) other_streamlines = list(other_streamlines) assert len(other_streamlines) + len(cc_streamlines) == len(streamlines) print ("num of roi steamlines is %d",len(cc_streamlines)) # Make display objects color = line_colors(cc_streamlines) cc_streamlines_actor = fvtk.line(cc_streamlines, line_colors(cc_streamlines)) cc_ROI_actor = fvtk.contour(cc_slice, levels=[1], colors=[(1., 1., 0.)], opacities=[1.]) # Add display objects to canvas r = fvtk.ren() fvtk.add(r, cc_streamlines_actor) fvtk.add(r, cc_ROI_actor) # Save figures fvtk.record(r, n_frames=1, out_path=f_name+'_roi.png', size=(800, 800)) fvtk.camera(r, [-1, 0, 0], [0, 0, 0], viewup=[0, 0, 1]) fvtk.record(r, n_frames=1, out_path=f_name+'_roi.png', size=(800, 800)) """""" csd_streamlines_trk = ((sl, None, None) for sl in cc_streamlines) csd_sl_fname = f_name+'_roi_streamline.trk' nib.trackvis.write(csd_sl_fname, csd_streamlines_trk, hdr, points_space='voxel') #nib.save(nib.Nifti1Image(FA, img.get_affine()), 'FA_map2.nii.gz') print('Saving "_roi_streamline.trk" sucessful.') import tractconverter as tc input_format=tc.detect_format(csd_sl_fname) input=input_format(csd_sl_fname) output=tc.FORMATS['vtk'].create(csd_sl_fname+".vtk",input.hdr) tc.convert(input,output) return cc_streamlines
def walkAndConvert(p_input, p_conversions, p_output=None, p_anatFile=None, p_isRecursive=False, p_overwrite=False): for root, dirs, allFiles in os.walk(p_input): logging.info('Processing "{0}"...'.format(root)) root = root + "/" nbFiles = 0 for k, v in p_conversions.items(): #files = [f for f in allFiles if FORMATS[k]._check(root + f)] for i, f in enumerate(allFiles): logging.info('{0}/{1} files'.format(i, len(allFiles))) if not FORMATS[k]._check(root + f): logging.info('Skip') continue nbFiles += 1 inFile = root + f if p_output is not None: outFile = p_output + '/' + f[:-3] + v else: outFile = inFile[:-3] + v if path.exists(outFile) and not p_overwrite: logging.info(f + " : Already Done!!!") continue input = FORMATS[k](inFile, p_anatFile) output = FORMATS[v].create(outFile, input.hdr, p_anatFile) tractconverter.convert(input, output) logging.info(inFile) logging.info( '{0} skipped (none track files)'.format(len(allFiles) - nbFiles)) if not p_isRecursive: break logging.info("Conversion finished!")
def convert_trk_fibers_to_tck(dwi, trk_tractograms, tck_tractogram, tempdir=None): """ Convert a list of TRK tractograms to a TCK tractogram (MRtrix format). The input tractogram is assumed to be in LAS convention. Parameters ---------- dwi: str Path to dwi (or nodif_brain) to specify diffusion space. trk_tractogram: list of str Paths to the input TRK tractograms. tck_tractogram: str Path to the output TCK tractogram. tempdir: str, default None A temporary directory to store intermediate tractogram. """ # Local import import tractconverter # Check existence of input file if not os.path.isfile(dwi): raise ValueError("File does not exist: {0}.".format(dwi)) # Merge the input tractograms tmp_trk_tractogram = merge_fibers(trk_tractograms, tempdir=tempdir) # Convert TRK to TCK using tractconverter trk_fibers = tractconverter.TRK(tmp_trk_tractogram) tck_fibers = tractconverter.TCK.create(tck_tractogram, hdr=trk_fibers.hdr, anatFile=dwi) tractconverter.convert(trk_fibers, tck_fibers) # Clean temporary directory shutil.rmtree(os.path.dirname(tmp_trk_tractogram)) return tck_tractogram
def walkAndConvert(p_input, p_conversions, p_output=None, p_anatFile=None, p_isRecursive=False, p_overwrite=False): for root, dirs, allFiles in os.walk(p_input): logging.info('Processing "{0}"...'.format(root)) root = root + "/" nbFiles = 0 for k, v in p_conversions.items(): #files = [f for f in allFiles if FORMATS[k]._check(root + f)] for i, f in enumerate(allFiles): logging.info('{0}/{1} files'.format(i, len(allFiles))) if not FORMATS[k]._check(root + f): logging.info('Skip') continue nbFiles += 1 inFile = root + f if p_output is not None: outFile = p_output + '/' + f[:-3] + v else: outFile = inFile[:-3] + v if path.exists(outFile) and not p_overwrite: logging.info(f + " : Already Done!!!") continue input = FORMATS[k](inFile, p_anatFile) output = FORMATS[v].create(outFile, input.hdr, p_anatFile) tractconverter.convert(input, output) logging.info(inFile) logging.info('{0} skipped (none track files)'.format(len(allFiles) - nbFiles)) if not p_isRecursive: break logging.info("Conversion finished!")
#cst_vtk_fname = '/home/bao/tiensy/Tractography_Mapping/data/trackvis_tractography/ROI_seg_tvis/ROI_seg_tvis_native/' + subj + '_corticospinal_R_tvis.vtk' cst_ext_vtk_fname = '/home/bao/tiensy/Tractography_Mapping/data/trackvis_tractography/ROI_seg_tvis/ROI_seg_tvis_native/' + subj + '_cst_R_tvis_ext.vtk' in_format = str(".trk") out_format = ".vtk" #input_file = cst_trk_fname #output_file = cst_vtk_fname input_file = cst_ext_trk_fname output_file = cst_ext_vtk_fname input_format = tractconverter.detect_format(input_file) in_put = input_format(input_file, input_anatomy_ref) out_put = tractconverter.FORMATS['vtk'].create(output_file, in_put.hdr, input_anatomy_ref) tractconverter.convert(in_put, out_put) print "Done", output_file ''' input_path = '/home/bao/tiensy/Lauren_registration/data_compare_mapping/tractography/' output_path = '/home/bao/tiensy/Lauren_registration/data_compare_mapping/tractography/' for id_obj in np.arange(len(sub)): input_anatomy_ref = "/home/bao/tiensy/Lauren_registration/data_compare_mapping/anatomy/" + str(sub[id_obj])+ "_data_brain.nii.gz" input_file = input_path + str(sub[id_obj]) + "_tracks_dti_tvis" + in_format output_file = output_path + str(sub[id_obj]) + "_tracks_dti_tvis" + out_format