def DoIt_single(filenames, options): fn_measure = basename(commonprefix(filenames)) fn_measure = myutils.strip_from_end(fn_measure, '.h5') fn_measure = myutils.strip_from_end(fn_measure, '-type') f_measure = h5files.open('adaption_common.h5', 'a', search = False) files = [h5files.open(fn, 'r') for fn in filenames] groups_without_adaption = [f['/adaption/recomputed'] for f in files] groups_with_adaption = [f['/adaption/vessels_after_adaption'] for f in files] import analyzeGeneral dataman = myutils.DataManager(20, [ analyzeGeneral.DataBasicVessel(), analyzeGeneral.DataVesselSamples(), analyzeGeneral.DataVesselGlobal()]) with mpl_utils.PdfWriter(fn_measure+'flow_hist.pdf') as pdfpages: if 1: plotFlowHistogram(dataman, f_measure, filenames, options, pdfpages) with mpl_utils.PdfWriter(fn_measure+'radius_hist.pdf') as pdfpages: if 1: plotRadiusHistogram(dataman, f_measure, filenames, options, pdfpages) with mpl_utils.PdfWriter(fn_measure+'cap_flow_hist.pdf') as pdfpages: if 1: plotCapillaryFlowHistogram(dataman, f_measure, filenames, options, pdfpages) with mpl_utils.PdfWriter(fn_measure+'cap_radius_hist.pdf') as pdfpages: if 1: plotCapillaryRadiusHistogram(dataman, f_measure, filenames, options, pdfpages)
def DoIt(filenames, options): fn_measure = basename(commonprefix(filenames)) fn_measure = myutils.strip_from_end(fn_measure, '.h5') fn_measure = myutils.strip_from_end(fn_measure, '-type') f_measure = h5files.open('adaption_common.h5', 'a', search = False) files = [h5files.open(fn, 'r') for fn in filenames] groups_without_adaption = [f['/adaption/recomputed'] for f in files] groups_with_adaption = [f['/adaption/vessels_after_adaption'] for f in files] with mpl_utils.PdfWriter(fn_measure+'caps.pdf') as pdfpages: import analyzeGeneral dataman = myutils.DataManager(20, [ analyzeGeneral.DataBasicVessel(), analyzeGeneral.DataVesselSamples(), analyzeGeneral.DataVesselGlobal()]) # vesselgroups_without = groups_without_adaption # vesselgroups_with = groups_with_adaption # geometric_data_before = getGeometricData(groups_without_adaption) # perfusion_data_before = getTotalPerfusion(groups_without_adaption)*60 # geometric_data_after = getGeometricData(groups_with_adaption) # perfusion_data_after = getTotalPerfusion(groups_with_adaption)*60 if 1: plotFlowHistogram(dataman, f_measure, filenames, options, pdfpages) if 1: plotRadiusHistogram(dataman, f_measure, filenames, options, pdfpages) if 1: plotCapillaryFlowHistogram(dataman, f_measure, filenames, options, pdfpages) if 1: plotCapillaryRadiusHistogram(dataman, f_measure, filenames, options, pdfpages)
def DoIt(inputFileNames, pattern, options): for this_enlargement_factor in options.enlarge_factor: inFiles = [h5files.open(fn, 'r') for fn in inputFileNames] inGroups = list( itertools.chain.from_iterable( myutils.walkh5(f, pattern, return_h5objects=True) for f in inFiles)) if len(inGroups) <= 0: print 'no matching groups in hdf file(s)' sys.exit(0) for in_file in inputFileNames: bloodflowparams = krebsutils.pyDictFromParamGroup( h5files.open(in_file, 'r')['parameters/calcflow']) #enlarge_vessels(float(this_enlargement_factor),in_file, bloodflowparams) qsub.submit( qsub.func(enlarge_vessels, float(this_enlargement_factor), in_file, bloodflowparams), name='job_modify_enlarge_' + str(this_enlargement_factor) + '_', num_cpus=6, days= 5, # about one day per thread, the idea being that number of threads is proportional to systems size and runtime is mem='%iMB' % (1000), change_cwd=True)
def plotFlowHistogram(dataman, f_measure, filenames, options, pdfpages): filenames = adaption.get_files_with_successful_adaption(filenames) files = [h5files.open(fn, 'r') for fn in filenames] destination_group = f_measure.require_group('allFlow') if(options.two): typelist = 'typeD- typeE- typeG- typeH-' else: typelist = 'typeA- typeB- typeC- typeF- typeI-' if(options.all_types): typelist = 'typeA- typeB- typeC- typeD- typeE- typeF- typeG- typeH- typeI-' if(options.single): typelist = 'typeF- ' fig = plt.figure() #fig.suptitle('all Flows', fontsize=12) #fig.set_title('all Flows') ax1 = fig.add_subplot(211) ax2 = fig.add_subplot(212) ax1.set_title('M- network') ax1.set_xlabel(r'flow/$\frac{\mu m^3}{s}$') ax1.set_ylabel(r'$\rho$') ax1.text(-0.1, 1.15, 'A', transform=ax1.transAxes,fontsize=16, fontweight='bold', va='top', ha='right') ax2.set_title('Adapted- network') ax2.set_xlabel(r'flow/$\frac{\mu m^3}{s}$') ax2.set_ylabel(r'$\rho$') ax2.text(-0.1, 1.15, 'B', transform=ax2.transAxes,fontsize=16, fontweight='bold', va='top', ha='right') for (i,t) in enumerate(typelist.split()): print('Capillary flow for type: %s' % t) filteredFiles = filter( lambda fn: t in fn,filenames) files = [h5files.open(fn, 'r+') for fn in filteredFiles] if(len(files)==0):#that means no file of dedicated type is left after filter continue #h_nA, h_yA, x_edges_nA, x_edges_yA = generate_capillary_hist(dataman, files, destination_group, t[:-1]) #data = generate_murray_data_plot3(dataman, files, destination_group, t[:-1]) else: h_nA,h_yA,xedges_nA,xedges_yA = generate_flow_hist(dataman, files, destination_group, t[:-1] + '_allFlow') xedges_nA = np.asarray(xedges_nA) xedges_yA = np.asarray(xedges_yA) h1 =np.asarray(h_nA) h2 =np.asarray(h_yA) ax1.plot((xedges_nA[1:]+xedges_nA[:-1])/2, h1, label = type_to_label_dict[t], marker= vesselTypeMarkers[i], color=colors[i]) ax2.plot((xedges_yA[1:]+xedges_yA[:-1])/2, h2, label = type_to_label_dict[t], marker= vesselTypeMarkers[i], color=colors[i]) fontP = FontProperties() fontP.set_size('x-small') ax1.legend(prop=fontP) if double_log: ax2.loglog() ax1.loglog() else: ax2.semilogx() ax1.semilogx() plt.tight_layout() pdfpages.savefig(fig)
def render(self): """ run povray. This should be called on the computing node. """ with h5files.open(self.fn, 'a') as f: if 'po2vessels' in f[self.group_name]: from krebs.povrayRenderOxygenDetailed import renderScene renderScene(f[self.group_name], self.imageFilename, self.params) elif 'tumor' in f[self.group_name]: from krebs.povrayRenderTumor import renderScene self.params.timepoint = f[self.group_name].attrs.get('time') renderScene(f[self.group_name]['vessels'], f[self.group_name]['tumor'], self.imageFilename, self.params) # else: # from krebs.povrayRenderVessels import renderScene # renderScene(f[self.group_name], # self.imageFilename, # **self.params) #renderScene(drug_grp, imagefn, parameters) elif 'conc' in f[self.group_name]: from krebs.povrayRenderIff import renderScene renderScene(f[self.group_name], self.imageFilename, self.params) else: from krebs.povrayRenderVessels import render_different_data_types render_different_data_types(f[self.group_name], self.params)
def run(goodArguments): print('starting with arguments: %s' % goodArguments) no_files = len(goodArguments.oxygenFiles) hypoxicVolumes = [] tumorVolumes = [] threshold = 15 test = myutils.MeanValueArray.empty() for aFile in goodArguments.oxygenFiles: with h5files.open(aFile.name) as f: try: if not 'po2' in f: raise AssertionError('no proper oxygen file: %s!' % f) except Exception, e: print e.message sys.exit(-1) paths = myutils.walkh5(f, 'po2/out*') print('found paths: %s' % paths) hypoxicVolumes_per_time = [] tumorVolumes_per_time = [] timepoints = [] for path in paths: hypoxicFraction, hypoxicTissueVolume = estimate_ratio_hypoxic( f[path], threshold) hypoxicVolumes_per_time.append(hypoxicTissueVolume) t = f[path]['SOURCE'].attrs['time'] r = f[path]['SOURCE/tumor'].attrs['TUMOR_RADIUS'] volume = 4 / 3. * 3.1414 * r * r * r / 1e9 tumorVolumes_per_time.append(volume) timepoints.append(t) hypoxicVolumes.append(hypoxicVolumes_per_time) tumorVolumes.append(tumorVolumes_per_time)
def __init__(self, dataman, filenames, pattern): files = [h5files.open(fn, 'r+') for fn in filenames] items = [] has_tumor = True for f in files: paths = myutils.walkh5(f['.'], pattern) for path in paths: g = f[path] if g.attrs.get('CLASS', None) == 'GRAPH': gvessels = g gtumor = None try: source = h5files.openLink(g, 'SOURCE') gtumor = source.parent['tumor'] g = source.parent except Exception, e: raise RuntimeError( 'tried to get tumor data but failed:' + str(e)) else: gvessels, gtumor = g['vessels'], (g['tumor'] if 'tumor' in g else None) e = EnsembleItem(path=path, gvessels=gvessels, gtumor=gtumor, group=g) e.time = g.attrs['time'] has_tumor = has_tumor and gtumor is not None e.vessel_system_length = dataman.obtain_data( 'vessel_system_length', gvessels) items.append(e)
def DoReadIn(filenames, pattern, fn_measure,pdf,quantity): #read in lots of stuff files = [h5files.open(fn, 'r') for fn in filenames] output_f = h5py.File(fn_measure) if quantity == 'flow': iflog=True else: iflog=False if quantity == 'shearforce': iflog=True for t in 'typeA typeB typeC typeD typeE typeF typeG typeH typeI'.split(): #edges fig1 = plt.figure() plt.plot() ax1 = plt.axes() if 'all_flows_of_type_dist' in locals(): del all_flows_of_type_dist for afile in files: print(t) if t in afile.filename: print("where in") if iflog: flows_in_a_file = np.asarray(afile[pattern + '/edges/'+quantity]) flows_in_a_file = flows_in_a_file[np.nonzero(flows_in_a_file)] flows_in_a_file = np.log10(flows_in_a_file) if quantity == 'flow': flows_in_a_file = np.asarray(afile[pattern + '/edges/'+quantity]) flows_in_a_file = flows_in_a_file[np.nonzero(flows_in_a_file)] fac=60/100000. flows_in_a_file = np.multiply(fac,flows_in_a_file) flows_in_a_file = np.log10(flows_in_a_file) else: flows_in_a_file = np.asarray(afile[pattern + '/edges/'+quantity]) flow_dist, bin_edges = np.histogram(flows_in_a_file, bins=50, density=True) if 'all_flows_of_type_dist' in locals(): all_flows_of_type_dist = np.vstack((all_flows_of_type_dist,flow_dist)) else: all_flows_of_type_dist = flow_dist if 'all_flows_of_type_dist' in locals(): fig1.suptitle(t) ax1.grid() if quantity == 'flow': ax1.set_xlabel(r'\log_{10}(flow)/nl/min') elif quantity == 'radius': ax1.set_xlabel(r'radius/\mu m') ax1.set_ylabel('probability') width = 1*(bin_edges[1]-bin_edges[0]) centers = (bin_edges[:-1]+bin_edges[1:])/2 if len(all_flows_of_type_dist.shape)==1: ax1.plot(centers,all_flows_of_type_dist,'*') else: ys = np.mean(all_flows_of_type_dist,0) yerr = np.std(all_flows_of_type_dist,0) ax1.errorbar(centers,ys,yerr=yerr) pdf.savefig()
def getDomainSizeFromVesselFile(fn): with h5files.open(fn, 'r') as f: ld = krebsutils.read_lattice_data_from_hdf( krebsutils.find_lattice_group_(f['vessels'])) size = np.amax(ld.GetWorldSize()) # longest axis times the lattice spacing return size
def create_mean_plot_velocities(): my_res_file = h5files.open('analyzeVesselTumor.h5', 'r') count_hists = 0 the_keys = my_res_file['/global'].keys() print(the_keys[0]) no_bins = 55 the_density_in = np.zeros(no_bins - 1) the_density_out = np.zeros(no_bins - 1) for key in my_res_file['/global'].keys(): print('working with %s' % key) count_hists = count_hists + 1 count_in = my_res_file['/global/%s/flows_within_tumor/flows_inside' % key] count_out = my_res_file['/global/%s/flows_within_tumor/flows_outside' % key] radii_in = np.asarray( my_res_file['/global/%s/radii_within_tumor/radii_inside' % key]) radii_out = np.asarray( my_res_file['/global/%s/radii_within_tumor/radii_outside' % key]) count_in = np.asarray(count_in) / (np.pi * np.square(radii_in)) count_out = np.asarray(count_out) / (np.pi * np.square(radii_out)) density_in, bin_in_array = np.histogram(count_in, np.logspace(1, 4, no_bins), density=True) density_out, bin_out_array = np.histogram(count_out, np.logspace(1, 4, no_bins), density=True) the_density_in = the_density_in + density_in the_density_out = the_density_out + density_out print('we have %i histograms' % count_hists) the_density_in = np.divide(the_density_in, count_hists) the_density_out = np.divide(the_density_out, count_hists) mu1 = np.sum(the_density_in * np.diff(bin_in_array) * (0.5 * (bin_in_array[1:] + bin_in_array[:-1]))) quad1 = np.sum(the_density_in * np.diff(bin_in_array) * np.square( (0.5 * (bin_in_array[1:] + bin_in_array[:-1])))) std1 = np.sqrt(quad1 - mu1**2) mu2 = np.sum(the_density_out * np.diff(bin_out_array) * (0.5 * (bin_out_array[1:] + bin_out_array[:-1]))) quad2 = np.sum(the_density_out * np.diff(bin_out_array) * np.square( (0.5 * (bin_out_array[1:] + bin_out_array[:-1])))) std2 = np.sqrt(quad2 - mu2**2) plt.title('Flow distribution') ax = plt.subplot(211) ax.bar(bin_out_array[:-1], the_density_out, np.diff(bin_out_array)) ax.set_xscale('log') ax.text(10**3, 0.002, '$< v_{out} > = %f \pm %f$' % (mu2, std2)) plt.legend(['Outside']) plt.ylabel('Propability') ax = plt.subplot(212) ax.bar(bin_in_array[:-1], the_density_in, np.diff(bin_in_array)) ax.set_xscale('log') ax.text(10**2, 0.006, '$< v_{in} > = %f \pm %f$' % (mu1, std1)) plt.xlabel('Volumeflow through vessel segments/ $\mu m$') plt.ylabel('Propability') plt.legend(['Inside']) plt.savefig('Flow_distribution.png') plt.show()
def find_enlargement_factors(filenames): factors = [] files = [h5files.open(fn, 'r+') for fn in filenames] for f in files: if 'enlargeFactor' in f.attrs.keys(): if not f.attrs.get('enlargeFactor') in factors: factors.append(f.attrs.get('enlargeFactor')) return factors
def from_vessel_file(filenames, grp_pattern): dirs = set() dataman = myutils.DataManager(20, [ krebs.plotIff.DataTissue(), krebs.plotIff.DataGlobalIff(), krebs.plotIff.DataRadialIff(), krebs.analyzeGeneral.DataDistanceFromCenter(), krebs.analyzeGeneral.DataVesselSamples(), krebs.analyzeGeneral.DataBasicVessel(), o2analysis.DataDetailedPO2() ]) f_measure = h5files.open('chache.h5', 'a', search=False) def cachelocation(g): path = posixpath.join( 'FileCS_' + myutils.checksum(basename(g.file.filename)), g.name.strip(posixpath.sep)) return (f_measure, path) #run with grp_pattern: iff/vessels for fn in filenames: with h5py.File(fn, 'r+') as f: d = myutils.walkh5(f, grp_pattern) assert len(d), 'you f****d up, pattern "%s" not found in "%s"!' % ( grp_pattern, fn) dirs = set.union(dirs, d) for group_path in dirs: if 'vessel' in grp_pattern and not 'o2' in grp_pattern: vesselgroup = f[group_path] ldvessels = ku.read_lattice_data_from_hdf( vesselgroup['lattice']) fieldld = ku.SetupFieldLattice(ldvessels.worldBox, 3, 10, 0.) phi_vessels = krebs.analyzeGeneral.CalcPhiVessels( dataman, vesselgroup, fieldld, scaling=1., samples_per_cell=5) print('bla') import nibabel as nib new_image = nib.Nifti1Image(phi_vessels, affine=np.eye(4)) common_filename = os.path.splitext(os.path.basename(fn))[0] new_image.to_filename(common_filename + '_vessels' + '.nii') if 'o2' in grp_pattern: po2group = f[group_path] #sample_length = 500. #data = dataman.obtain_data('detailedPO2_global', 'po2_tissue', po2group, sample_length, cachelocation(po2group)) data = np.asarray(po2group['po2field']) print('bla') import nibabel as nib new_image = nib.Nifti1Image(data, affine=np.eye(4)) common_filename = os.path.splitext(os.path.basename(fn))[0] new_image.to_filename(common_filename + '_po2' + '.nii')
def __init__(self, dataman, filenames, pattern): files = [h5files.open(fn, 'r+') for fn in filenames] items = [] has_tumor = True for f in files: paths = myutils.walkh5(f['.'], pattern) for path in paths: po2group_w_a = f[path + '/vessels_after_adaption'] gvessels_w_a, gtumor = detailedo2.OpenVesselAndTumorGroups( po2group_w_a) po2group_no_a = f[path + '/recomputed'] gvessels_no_a, gtumor = detailedo2.OpenVesselAndTumorGroups( po2group_no_a) e = EnsembleItem(path=path, po2group_w_a=po2group_w_a, gvessels_w_a=gvessels_w_a, po2group_no_a=po2group_no_a, gvessels_no_a=gvessels_no_a, gtumor=gtumor) # if 'SOURCE' in po2group: # source = h5files.openLink(po2group, 'SOURCE') # if 'time' in source.attrs.keys(): # t = source.attrs['time'] # e.time = t has_tumor = has_tumor and gtumor is not None e.vessel_system_length = dataman.obtain_data( 'vessel_system_length', gvessels_no_a) e.initialVesselType = GetVesselTypeLabel(po2group_no_a) items.append(e) if has_tumor: d = collections.defaultdict(list) # path -> list of EnsembleItem for e in items: d[e.path].append(e) tumor_snapshot_times = dict( (k, np.average(map(lambda e: e.time, v))) for k, v in d.items()) tumor_snapshot_order = sorted( tumor_snapshot_times.keys(), key=(lambda path: tumor_snapshot_times[path])) tumor_snapshots = [(d[path], path, tumor_snapshot_times[path]) for path in tumor_snapshot_order] self.files = files self.items = items if has_tumor: self.tumor_snapshots = tumor_snapshots # list of tuple(items, path, time) self.has_tumor = has_tumor self.o2ConfigName = set( item.po2group_no_a.attrs.get('O2_CONFIG_NAME', None) for item in items) if len(self.o2ConfigName) != 1: raise RuntimeError( "Detected different O2_CONFIG_NAMES %s. You don't want to mix configurations, do you?" % self.o2ConfigName) self.o2ConfigName = self.o2ConfigName.pop()
def enlarge_vessels(factor, gv_filename, bloodflowparams): '''gvdst = group where the data is placed in, does not create a 'vesse' folder in it but writes nodes, edges directly; gv = source vessel group ''' inFile = h5files.open(gv_filename, 'r') gv = inFile['vessels'] fac_as_percent = int(np.ceil(float(factor) * 100 - 100)) dest_file = h5files.open( myutils.strip_from_end(gv_filename, '.h5') + '_growth_by_%02i.h5' % fac_as_percent, 'a') dest_file.attrs.create('enlargeFactor', data=fac_as_percent) gvdst = dest_file.create_group('vessels') gvdst.attrs['CLASS'] = 'GRAPH' myutils.buildLink(gvdst, 'SOURCE', gv) # first we need to copy some of the vessel data gvedst = gvdst.create_group('edges') gvndst = gvdst.create_group('nodes') gvedst.attrs['COUNT'] = gv['edges'].attrs['COUNT'] gvndst.attrs['COUNT'] = gv['nodes'].attrs['COUNT'] gv.copy('lattice', gvdst) for name in [ 'lattice_pos', 'roots', 'nodeflags', 'gf', 'bc_conductivity_value', 'bc_node_index', 'bc_type', 'bc_value' ]: gv['nodes'].copy(name, gvndst) for name in ['radius', 'node_a_index', 'node_b_index', 'flags']: if name == 'radius': radii = gv['edges/' + name] radii = factor * np.asarray(radii) gvedst.create_dataset(name, data=radii) else: gv['edges'].copy(name, gvedst) # then we recompute blood flow because the alorithm has changed and we may or may not want hematocrit (pressure, flow, shearforce, hematocrit, flags) = krebsutils.calc_vessel_hydrodynamics( gvdst, return_flags=True, bloodflowparams=bloodflowparams) # then we save the new data to complete the network copy gvedst.create_dataset('flow', data=flow, compression=9) gvedst.create_dataset('shearforce', data=shearforce, compression=9) gvedst.create_dataset('hematocrit', data=hematocrit, compression=9) gvndst.create_dataset('pressure', data=pressure, compression=9)
def doit(filenames): dataman = myutils.DataManager( 50, map(lambda x: x(), detailedo2Analysis.O2DataHandlers) + [ analyzeGeneral.DataTumorTissueSingle(), analyzeGeneral.DataDistanceFromCenter(), analyzeGeneral.DataBasicVessel(), analyzeGeneral.DataVesselSamples(), analyzeGeneral.DataVesselRadial(), analyzeGeneral.DataVesselGlobal(), analyzeBloodFlow.DataTumorBloodFlow() ]) ensemble = EnsembleFiles(dataman, filenames, 'po2/adaption/') out_prefix, out_suffix = myutils.splitcommonpresuffix( map(lambda s: basename(s), filenames)) output_base_filename = splitext(out_prefix + out_suffix)[0] if ensemble.o2ConfigName: fn_measure = 'detailedo2_%s_common.h5' % ensemble.o2ConfigName else: fn_measure = 'detailedo2_common.h5' f_measure = h5files.open(fn_measure, 'a') def cachelocation(g): path = posixpath.join( 'FileCS_' + myutils.checksum(basename(g.file.filename)), g.name.strip(posixpath.sep)) return (f_measure, path) measurementinfo = MeasurementInfo(sample_length=30., cachelocation_callback=cachelocation, distancemap_spec='radial') with mpl_utils.PageWriter(output_base_filename + '.pdf', fileformats=['pdf']) as pdfwriter: if 0: compare_tissue_saturation(dataman, ensemble, pdfwriter) if 0: #try: # histogramGroupFinal = f_measure['combinedHistogramsFinal'] # histogramGroupInitial = f_measure['combinedHistogramsInitial'] #except KeyError: #histogramGroupFinal = f_measure.recreate_group('combinedHistogramsFinal') histogramGroupInitial = f_measure.recreate_group( 'combinedHistogramsInitial') #ComputeHistogramsOfPo2Items(dataman, ensemble.items, measurementinfo, histogramGroupFinal) ComputeHistogramsOfPo2Items(dataman, ensemble.items, measurementinfo, histogramGroupInitial) #PlotHistograms(pdfwriter, histogramGroupFinal, 'tum', 'Tumor') PlotHistograms(pdfwriter, histogramGroupInitial, 'all', 'Initial')
def create_mean_plot_radii(): my_res_file = h5files.open('analyzeVesselTumor.h5', 'r') count_hists = 0 the_keys = my_res_file['/global'].keys() print(the_keys[0]) no_bins = 55 the_density_in = np.zeros(no_bins) the_density_out = np.zeros(no_bins) for key in my_res_file['/global'].keys(): print('working with %s' % key) count_hists = count_hists + 1 count_in = my_res_file['/global/%s/radii_within_tumor/radii_inside' % key] count_out = my_res_file['/global/%s/radii_within_tumor/radii_outside' % key] density_in, bin_in_array = np.histogram(count_in, no_bins, density=True) density_out, bin_out_array = np.histogram(count_out, no_bins, density=True) the_density_in = the_density_in + density_in the_density_out = the_density_out + density_out print('we have %i histograms' % count_hists) the_density_in = np.divide(the_density_in, count_hists) the_density_out = np.divide(the_density_out, count_hists) mu1 = np.sum(the_density_in * np.diff(bin_in_array) * (0.5 * (bin_in_array[1:] + bin_in_array[:-1]))) quad1 = np.sum(the_density_in * np.diff(bin_in_array) * np.square( (0.5 * (bin_in_array[1:] + bin_in_array[:-1])))) std1 = np.sqrt(quad1 - mu1**2) mu2 = np.sum(the_density_out * np.diff(bin_out_array) * (0.5 * (bin_out_array[1:] + bin_out_array[:-1]))) quad2 = np.sum(the_density_out * np.diff(bin_out_array) * np.square( (0.5 * (bin_out_array[1:] + bin_out_array[:-1])))) std2 = np.sqrt(quad2 - mu2**2) plt.title('Radii distribution') ax = plt.subplot(211) ax.bar(bin_out_array[:-1], the_density_out, np.diff(bin_out_array)) ax.text(30, 0.2, '$< r_{out} > = %f \pm %f$' % (mu2, std2)) plt.legend(['Outside']) plt.ylabel('Propability') ax = plt.subplot(212) plt.bar(bin_in_array[:-1], the_density_in, np.diff(bin_in_array)) ax.text(20, 0.3, '$< r_{in} > = %f \pm %f$' % (mu1, std1)) plt.xlabel('radius of vessel segments/ $\mu m$') plt.ylabel('Propability') plt.legend(['Inside']) plt.savefig('Radii_distribution.png') plt.show()
def worker_on_client(fn, pattern, o2params): print 'detailedo2 on %s / %s' % (fn, pattern) h5files.search_paths = [ dirname(fn) ] # so the plotting and measurement scripts can find the original tumor files using the stored basename alone #no, this is not working!!!!, hand num_threads over to the cpp side #num_threads = o2params.pop('num_threads') #krebsutils.set_num_threads(num_threads) o2_refs = detailedo2.doit(fn, pattern, (o2params, o2params['name'])) if 0: #this is for data analysis on the clusters for ref in o2_refs: po2group = h5files.open(ref.fn)[ref.path] detailedo2Analysis.WriteSamplesToDisk(po2group) h5files.closeall() # just to be sure
def MakeVesselFilenamePart(fn): with h5files.open(fn, mode='a') as f: if 'parameters' in f: if 'MESSAGE' in f['parameters'].attrs: msg = f['parameters'].attrs['MESSAGE'] ensemble_index = f['parameters'].attrs['ENSEMBLE_INDEX'] if msg.startswith('vessels-'): msg=msg[len('vessels-'):] if 'msg' not in locals(): msg = "hm" ensemble_index = 1 f['parameters'].attrs['MESSAGE'] = msg f['parameters'].attrs['ENSEMBLE_INDEX'] = ensemble_index name = '%s-sample%02i' % (msg, ensemble_index) return name
def create_nice_file_containing_all_the_data(): filenames = sys.argv[1:] files = [h5files.open(fn, 'r') for fn in filenames] fmeasure = h5files.open('analyzeVesselTumor.h5', 'a') dataman = myutils.DataManager(10, [DataVesselLength(), DataVesselTumor()]) allgroups = defaultdict(list) for f in files: keys = filter(lambda k: k.startswith('out'), f.keys()) for k in keys: allgroups[k].append(f[k]) allgroups = [(k, allgroups[k]) for k in sorted(allgroups.keys())] groupslist = [allgroups[-1]] outfn = 'Length-%s.pdf' % splitext(basename(filenames[0]))[0] pprint(groupslist) print '-> %s' % (outfn) for key, groups in groupslist: for group in groups: cachelocation = (fmeasure, '%s_file_%s_FILEID%s' % (group.name, group.file.filename, myutils.checksum(group.file.filename))) data = dataman.obtain_data('length_dist', 'length_dist', group['vessels'], group['tumor'], cachelocation) data = dataman.obtain_data('within_fake_tumor', 'radii_within_tumor', group['vessels'], group['tumor'], cachelocation) data = dataman.obtain_data('within_fake_tumor', 'pressures_within_tumor', group['vessels'], group['tumor'], cachelocation) data = dataman.obtain_data('within_fake_tumor', 'flows_within_tumor', group['vessels'], group['tumor'], cachelocation) print('you just created a big nice file')
def doit_optimize(vesselFileName, adaptParams, BfParams): returns = adaption_cpp.doAdaptionOptimization(vesselFileName, adaptParams, BfParams) print("should be optimized vaules:") print(returns) print('from file: %s' % vesselFileName) f_results = h5files.open("optimize_results.h5", 'a') a_uuid = str(uuid.uuid4()) g = f_results.create_group(vesselFileName + '_' + a_uuid) g.create_dataset('x_opt', data=returns) myutils.hdf_write_dict_hierarchy(g, 'adaptParams', adaptParams) myutils.hdf_write_dict_hierarchy(g, 'BfParams', BfParams) g.file.flush() #f_results.close() return returns
def run_out_in_single_file(goodArguments): print('starting with arguments: %s' % goodArguments) no_files = len(goodArguments.oxygenFiles) annoxicVolumes = [] hypoxicVolumes = [] normoxicVolumes = [] tumorVolumes = [] #threshold = 15 threshold1 = 0.1 threshold2 = 2.5 test = myutils.MeanValueArray.empty() annoxicVolumes_per_time = [] hypoxicVolumes_per_time = [] normoxicVolumes_per_time = [] tumorVolumes_per_time = [] timepoints = [] for aFile in goodArguments.oxygenFiles: with h5files.open(aFile.name) as f: try: if not 'po2' in f: raise AssertionError('no proper oxygen file: %s!' % f) except Exception, e: print e.message sys.exit(-1) paths = myutils.walkh5(f, 'po2/out*') print('found paths: %s' % paths) for path in paths: #hypoxicFraction,hypoxicTissueVolume = estimate_ratio_hypoxic(f[path], threshold) t = f[path]['SOURCE'].attrs['time'] r = f[path]['SOURCE/tumor'].attrs['TUMOR_RADIUS'] volume = 4 / 3. * 3.1414 * r * r * r / 1e9 #volume=(2*r)*(2*r)*(2*r)/1e9 tumorVolumes_per_time.append(volume) t = t / (3600.0 * 24) #days timepoints.append(t) annoxicTissueVolume, hypoxicTissueVolume, normoxicTissueVolume = estimate_annoxic_hypoxic_normoxic( f[path], r, threshold1, threshold2) annoxicVolumes_per_time.append(annoxicTissueVolume) hypoxicVolumes_per_time.append(hypoxicTissueVolume) normoxicVolumes_per_time.append(normoxicTissueVolume) annoxicVolumes.append(annoxicVolumes_per_time) hypoxicVolumes.append(hypoxicVolumes_per_time) normoxicVolumes.append(normoxicVolumes_per_time) tumorVolumes.append(tumorVolumes_per_time)
def create_mean_plot_length(): my_res_file = h5files.open('analyzeVesselTumor.h5', 'r') count_hists = 0 the_keys = my_res_file['/global'].keys() print(the_keys[0]) the_density_in = np.zeros_like( my_res_file['/global/%s/length_dist/inside_density' % the_keys[0]]) the_density_out = np.zeros_like( my_res_file['/global/%s/length_dist/outside_density' % the_keys[0]]) a_bin_in = my_res_file['/global/%s/length_dist/inside_bins' % the_keys[0]] a_bin_out = my_res_file['/global/%s/length_dist/outside_bins' % the_keys[0]] for key in my_res_file['/global'].keys(): print('working with %s' % key) count_hists = count_hists + 1 the_density_in = the_density_in + my_res_file[ '/global/%s/length_dist/inside_density' % key] the_density_out = the_density_out + my_res_file[ '/global/%s/length_dist/outside_density' % key] print('we have %i histograms' % count_hists) the_density_in = np.divide(the_density_in, count_hists) the_density_out = np.divide(the_density_out, count_hists) mu1 = np.sum(the_density_in * np.diff(a_bin_in) * (0.5 * (a_bin_in[1:] + a_bin_in[:-1]))) quad1 = np.sum(the_density_in * np.diff(a_bin_in) * np.square( (0.5 * (a_bin_in[1:] + a_bin_in[:-1])))) std1 = np.sqrt(quad1 - mu1**2) mu2 = np.sum(the_density_out * np.diff(a_bin_out) * (0.5 * (a_bin_out[1:] + a_bin_out[:-1]))) quad2 = np.sum(the_density_out * np.diff(a_bin_out) * np.square( (0.5 * (a_bin_out[1:] + a_bin_out[:-1])))) std2 = np.sqrt(quad2 - mu2**2) plt.title('Length distribution') ax = plt.subplot(211) ax.bar(a_bin_out[:-1], the_density_out, np.diff(a_bin_out)) ax.text(300, 0.035, '$< l_{out} > = %f \pm %f$' % (mu2, std2)) plt.legend(['Outside']) plt.ylabel('Propability') ax = plt.subplot(212) plt.bar(a_bin_in[:-1], the_density_in, np.diff(a_bin_in)) ax.text(300, 0.008, '$< l_{in} > = %f \pm %f$' % (mu1, std1)) plt.xlabel('length of vessel segments/ $\mu m$') plt.ylabel('Propability') plt.legend(['Inside']) plt.savefig('Length_distribution.png')
def MeasurementFile(f, h5files, prefix='measure_'): if not isinstance(f, (str, unicode)): fn = f.filename else: fn = f fnpath = dirname(fn) fnbase = basename(fn).rsplit('.h5')[0] fnmeasure = join(fnpath, prefix + fnbase + '.h5') existed = os.path.isfile(fnmeasure) fm = h5files.open(fnmeasure, 'a') if not existed: fm.attrs['TYPE'] = 'MEASUREMENT' fm.attrs['SOURCE'] = str(fn) fm['source'] = h5py.ExternalLink(fn, '/') return fm
def plotSingleRun(fn): out = splitext(basename(fn))[0] f = h5files.open(fn, 'r') rc = matplotlib.rc rc('figure', figsize=(7, 7), dpi=100) rc('font', size=8.) rc('axes', titlesize=10., labelsize=10.) rc('pdf', compression=6, fonttype=42) rc( 'figure', **{ 'subplot.left': 0.02, 'subplot.right': 1. - 0.05, 'subplot.bottom': 0.01, 'subplot.top': 1. - 0.05, 'subplot.wspace': 0.1, 'subplot.hspace': 0.1 }) #rc('savefig', facecolor='none', edgecolor='none', dpi=100) rc('savefig', dpi=100) rc('font', **{'family': 'sans-serif'}) #,'sans-serif':['Helvetica']}) rc('path', simplify_threshold=0.01) #rc('text', usetex=True, **{ 'latex.unicode' : True }) with mpl_utils.PdfWriter(out + ".pdf") as pdfpages: dataman = myutils.DataManager(100, [ DataTumorTissueSingle(), DataTumorMeasureCurvature(), DataTumor3dRendering(), DataTissueRadial(), DataTissueRadialAveraged() ]) resultfile = ResultFile(f, dataman) is3d = resultfile.obtain_data('ld').shape[2] > 1 if is3d: plot3dRendering(resultfile, pdfpages, showVessels=True, showTumor=False) plot3dRendering(resultfile, pdfpages) plot3dRendering(resultfile, pdfpages, showVessels=True) plotSnapshots(resultfile, pdfpages)
def worker_on_client(vessel_fn, tumor_parameters, o2_parameter_set_name, num_threads): krebsutils.set_num_threads(num_threads) tumor_fn = tumor_parameters['fn_out'] tend = tumor_parameters['tend'] pattern1 = 'out0000' pattern2 = 'out%04i' % int(round(tend / tumor_parameters['out_intervall'])) pattern = '|'.join([pattern1, pattern2]) print tumor_fn, pattern #os.system("%s -s '%s'" % (krebsjobs.submitTumOnlyVessels.exe,dicttoinfo(tumor_parameters))) os.system("%s -s '%s'" % (krebs.tumors.run_faketum, dicttoinfo(tumor_parameters))) o2_refs = detailedo2.doit( tumor_fn, pattern, (getattr(krebs.detailedo2Analysis.parameterSetsO2, o2_parameter_set_name), o2_parameter_set_name)) for ref in o2_refs: po2group = h5files.open(ref.fn)[ref.path] krebs.analyzeTissueOxygenDetailed.WriteSamplesToDisk(po2group) krebs.povrayRenderOxygenDetailed.doit(o2_refs[1].fn, o2_refs[1].path) h5files.closeall()
def createParamFile(files, label): writepath = '/localdisk/thierry/tmp/%s_parms.py' % label #mode = 'a' if os.path.exists(writepath) else 'w' mode = 'w' if os.path.exists(writepath): os.remove(writepath) with open(writepath,mode) as f: f.write('# -*- coding: utf-8 -*-\n') for t in typelist: for pso_file in files: if t in pso_file: f_opt_data = h5files.open(pso_file, 'r', search = False) xopt = f_opt_data.attrs.get('xopt') f.write('pso_param_%s_%s_vary1=deepcopy(adaption_master)\n' % (label,t)) f.write('pso_param_%s_%s_vary1[\'adaption\'].update(\n' % (label,t)) f.write(' k_m = %f,\n' % xopt[0]) f.write(' k_c = %f,\n' % xopt[1]) f.write(' k_s = %f,\n' % xopt[2]) f.write(' cond_length = %f,\n' % xopt[3]) f.write(' Q_refdot = %f,\n' % xopt[4]) f.write(')\n') print("Created file: %s"%writepath)
def ProduceData(fitParameters, filename): from krebs.analyzeGeneral import DataBasicVessel, DataVesselSamples, DataVesselGlobal from krebs.detailedo2Analysis import DataDetailedPO2 import krebs.detailedo2Analysis.singleVesselCases as singleVesselCases paramspo2Override = dict( massTransferCoefficientModelNumber = 1, conductivity_coeff1 = fitParameters[0], conductivity_coeff2 = fitParameters[1], conductivity_coeff3 = fitParameters[2], ) f = h5files.open(filename,'a') krebsutils.set_num_threads(2) dataman = myutils.DataManager(20, [DataDetailedPO2(),DataBasicVessel(), DataVesselSamples(), DataVesselGlobal()]) for k, params in fitCases: params = deepcopy(params) params.paramspo2.update(paramspo2Override) singleVesselCases.GenerateSingleCapillaryWPo2(dataman, f, k, 16, params) return f
distancemap_spec) plot_data(ax, ranges, mvd.avg * 1e6, mvd.std * 1e6, 'rg'[j], 't = %i h' % time) lim = ax.get_ylim() ax.vlines(ranges[1:-1], lim[0], lim[1]) #ax.set(ylim = lim, xlim = (ax.get_xlim()[0], 1000)) ax.set(xlabel=r'distance from rim [$\mu m$]', ylabel=r'mvd [1/mm$^2$]') ax.legend() pdfpages.savefig(fig) if __name__ == '__main__': filenames = sys.argv[1:] files = [h5files.open(fn, 'r') for fn in filenames] fmeasure = h5files.open('analyzeBloodVolumeMeasurements.h5', 'a') dataman = myutils.DataManager(10, [ DataTumorTissueSingle(), DataVesselRadial(), DataVesselSamples(), DataBasicVessel(), DataDistanceFromCenter() ]) #allgroups = sorted(filter(lambda k: k.startswith('out'), files[0].keys())) allgroups = defaultdict(list) for f in files: keys = filter(lambda k: k.startswith('out'), f.keys()) for k in keys: allgroups[k].append(f[k])
parser.add_option( "--write-flow", dest="write_flow", help= "writes the recomputed flow; note: hematocrit is completely ignored!", default=False, action="store_true") options, args = parser.parse_args() if options.write_flow: options.force_flow_recompute = True dataman = myutils.DataManager(100, [DataBasicVessel()]) filenames, pattern = args[:-1], args[-1] files = [ h5files.open(fn, 'a' if options.add_resistor_bc else 'r+') for fn in filenames ] groups = list( itertools.chain.from_iterable( myutils.walkh5(f, pattern, return_h5objects=True) for f in files)) for vesselgroup in groups: if options.force_flow_recompute and not options.add_resistor_bc: (pressure, flow, shearforce) = krebsutils.calc_vessel_hydrodynamics(vesselgroup) vessels = dataman.obtain_data('vessel_graph', vesselgroup, ['flags', 'radius']) vessels.edges['flow'] = flow vessels.nodes['pressure'] = pressure else:
def DoIt(filenames, pattern, with_o2): fn_measure = basename(commonprefix(filenames)) fn_measure = myutils.strip_from_end(fn_measure, '.h5') fn_measure = myutils.strip_from_end(fn_measure, '-type') def cachelocation(g): path = posixpath.join( 'FileCS_' + myutils.checksum(basename(g.file.filename)), g.name.strip(posixpath.sep)) return (f_measure, path) if with_o2: fn_measure = myutils.strip_from_end(fn_measure, '_detailedpo2') files = [h5files.open(fn, 'a') for fn in filenames] f_measure = h5files.open('plotVessels_chache.h5', 'a', search=False) groups = list( itertools.chain.from_iterable( myutils.walkh5(f, pattern, return_h5objects=True) for f in files)) if len(groups) <= 0: print 'no matching groups in hdf file(s)' sys.exit(0) if with_o2: name = posixpath.commonprefix(map(lambda g: g.name, groups)) name = myutils.strip_from_start(name, '/po2/vessels').replace('/', '-') fn_measure += name with mpl_utils.PdfWriter(fn_measure + '.pdf') as pdfpages: rc = matplotlib.rc rc('font', size=8.) rc('axes', titlesize=10., labelsize=8.) if with_o2: import detailedo2Analysis as o2analysis import detailedo2Analysis.plotsForPaper import detailedo2 dataman = myutils.DataManager(20, [ o2analysis.DataDetailedPO2(), analyzeGeneral.DataTumorTissueSingle(), analyzeGeneral.DataDistanceFromCenter(), analyzeGeneral.DataBasicVessel(), analyzeGeneral.DataVesselSamples(), analyzeBloodFlow.DataTumorBloodFlow(), analyzeGeneral.DataVesselRadial(), analyzeGeneral.DataVesselGlobal() ]) vesselgroups = list( detailedo2.OpenVesselAndTumorGroups(g)[0] for g in groups) #original_vesselgroups = list(h5files.openLink(g, 'SOURCE') for g in vesselgroups) if 1: PrintGlobalDataWithOxygen(pdfpages, groups, vesselgroups, f_measure, dataman) '''FormatParameters makes the network creation parameters that does not work, if we have an o2 file''' #text = FormatParameters(original_vesselgroups[0].file) text = [' '] text += detailedo2Analysis.plotsForPaper.FormatParameters( groups[0]) fig, _ = mpl_utils.MakeTextPage(text, figsize=(mpl_utils.a4size[0], mpl_utils.a4size[0])) pdfpages.savefig(fig, postfix='_vesselsparams') if 1: res = getMultiScatter(300. * len(filenames), vesselgroups) plotMultiScatterBeauty(res, pdfpages) else: dataman = myutils.DataManager(20, [ analyzeGeneral.DataTumorTissueSingle(), analyzeGeneral.DataVesselRadial(), analyzeGeneral.DataDistanceFromCenter(), analyzeBloodFlow.DataTumorBloodFlow(), analyzeGeneral.DataBasicVessel(), analyzeGeneral.DataVesselSamples(), analyzeGeneral.DataVesselGlobal() ]) #dataman = myutils.DataManager(20, [ analyzeGeneral.DataBasicVessel(), analyzeGeneral.DataVesselSamples(), analyzeGeneral.DataVesselGlobal()]) vesselgroups = groups if 0: res = getMultiScatter(300. * len(filenames), vesselgroups) plotMultiScatterBeauty(res, pdfpages) if 0: PlotRadiusHistogram2(dataman, vesselgroups, pdfpages) if 0 and all(map(lambda g: 'data' in g.parent, vesselgroups)): data = VesselData() for g in vesselgroups: data.add(g.parent['data']) plot_topological_stats_avg(data, pdfpages) if 0: #reproduce swine plot_geometric_stuff_on_RC(dataman, f_measure, filenames, options, pdfpages) if 1: PrintGlobalData(pdfpages, vesselgroups, f_measure, dataman)