예제 #1
0
def doit(filenames):
  dataman = myutils.DataManager(50, map(lambda x: x(), detailedo2Analysis.O2DataHandlers) + [ analyzeGeneral.DataTumorTissueSingle(), analyzeGeneral.DataDistanceFromCenter(), analyzeGeneral.DataBasicVessel(), analyzeGeneral.DataVesselSamples(), analyzeGeneral.DataVesselRadial(), analyzeGeneral.DataVesselGlobal(), analyzeBloodFlow.DataTumorBloodFlow()])
  ensemble = EnsembleFiles(dataman, filenames,'po2/adaption/' )
  out_prefix, out_suffix = myutils.splitcommonpresuffix(map(lambda s: basename(s), filenames))
  output_base_filename = splitext(out_prefix+out_suffix)[0]
  if ensemble.o2ConfigName:
    fn_measure = 'detailedo2_%s_common.h5' % ensemble.o2ConfigName
  else:
    fn_measure = 'detailedo2_common.h5'
    
  f_measure = h5files.open(fn_measure, 'a')
  def cachelocation(g):
    path = posixpath.join('FileCS_'+myutils.checksum(basename(g.file.filename)), g.name.strip(posixpath.sep))
    return (f_measure, path)
  measurementinfo = MeasurementInfo(sample_length = 30.,
                                    cachelocation_callback = cachelocation,
                                    distancemap_spec = 'radial')
                                    
  with mpl_utils.PageWriter(output_base_filename+'.pdf', fileformats = ['pdf']) as pdfwriter: 
    if 0:    
      compare_tissue_saturation(dataman, ensemble, pdfwriter)
    
    if 0:
      #try:
      #  histogramGroupFinal   = f_measure['combinedHistogramsFinal']
      #  histogramGroupInitial = f_measure['combinedHistogramsInitial']
      #except KeyError:
      
      #histogramGroupFinal   = f_measure.recreate_group('combinedHistogramsFinal')
      histogramGroupInitial = f_measure.recreate_group('combinedHistogramsInitial')        
      #ComputeHistogramsOfPo2Items(dataman, ensemble.items, measurementinfo, histogramGroupFinal)
      ComputeHistogramsOfPo2Items(dataman, ensemble.items, measurementinfo, histogramGroupInitial)
      #PlotHistograms(pdfwriter, histogramGroupFinal, 'tum', 'Tumor')
      PlotHistograms(pdfwriter, histogramGroupInitial, 'all', 'Initial')
예제 #2
0
    for filename in filenames:
        f = h5files.open(filename)
        print 'opened -- ', filename, '/',
        paths_before = myutils.walkh5(f['.'], pattern_before)
        paths_after = myutils.walkh5(f['.'], pattern_after)
        print paths_before, paths_after
        for path_before, path_after in zip(paths_before, paths_after):
            vesselgroup_before = f[path_before]
            vesselgroup_after = f[path_after]
            tumorgroup = analyzeGeneral.try_find_tumor_group_from_vesselgroup(
                vesselgroup_after)
            assert tumorgroup
            thegroups.append(
                [vesselgroup_before, vesselgroup_after, tumorgroup])

    prefix, suffix = myutils.splitcommonpresuffix(
        map(lambda s: basename(s), filenames))
    outputbasename, _ = splitext(prefix + suffix)

    fn_measure = join(dirname(outputbasename), 'common-mvd-grad-map-cache.h5')
    f_measure = h5files.open(fn_measure, 'a')

    def cachelocation(dataname, vesselgroup, tumorgroup, version):
        path = myutils.splitPath(
            posixpath.join(
                splitext(basename(vesselgroup.file.filename))[0],
                vesselgroup.name.strip(posixpath.sep))) + (dataname, )
        return (f_measure, path)

    def cachelocationEnsemble(dataname, groups):
        groupchecksum = myutils.checksum(*map(
            lambda g: str(g.file.filename + '/' + g.name), sum(groups, [])))
예제 #3
0
def doit(filenames, pattern):
    dataman = myutils.DataManager(20, [
        DataTumorTissueSingle(),
        DataDistanceFromCenter(),
        DataBasicVessel(),
        DataVesselSamples(),
        DataVesselRadial(),
        DataVesselGlobal(),
        DataTumorBloodFlow()
    ])

    ensemble = EnsembleFiles(dataman, filenames, pattern)
    if ensemble.has_tumor:
        print 'paths: ', map(lambda (_t0, path, _t1): path,
                             ensemble.tumor_snapshots)
    else:
        print 'paths: ', set(map(lambda e: e.path, ensemble.items))

    prefix, suffix = myutils.splitcommonpresuffix(
        map(lambda s: basename(s), filenames))
    outputbasename, _ = splitext(prefix + suffix)
    fn_measure = outputbasename + '-radial-cache.h5'
    f_measure = h5files.open(fn_measure, 'a')

    def cachelocation(g):
        path = posixpath.join(
            'FileCS_' + myutils.checksum(basename(g.file.filename)),
            g.name.strip(posixpath.sep))
        return (f_measure, path)

    #name = ensemble.items[0].path.split('/')[-1]
    measurementinfo = dict(sample_length=30.,
                           cachelocation_callback=cachelocation,
                           distancemap_spec='radial')

    print 'getting radial curves'
    stuff = []
    stuff2 = []
    for items, path, time in ensemble.tumor_snapshots:
        bins_spec, curves = CollectAllRadialData(dataman, items,
                                                 measurementinfo)
        tumorradius = GetAverageApproximateTumorRadius(dataman, ensemble,
                                                       items)
        stuff.append((time, tumorradius, curves))
        stuff2.append((time, tumorradius, curves, path))

    #output_filename+= '_'+name
    with mpl_utils.PdfWriter(outputbasename + '-radial.pdf') as pdfwriter:
        PlotRadialCurves(pdfwriter, bins_spec, stuff, measurementinfo,
                         ensemble.world_size)

    with h5py.File(outputbasename + '-radial.h5', 'w') as f:
        f.attrs['COMMONPREFIX'] = os.path.commonprefix(
            map(lambda s: basename(s), filenames))
        f.create_dataset('files', data=filenames)
        f.create_dataset('groups',
                         data=np.asarray(set(
                             map(lambda item: item.path, ensemble.items)),
                                         dtype=np.str))
        for i, (time, tumorradius, curves, path) in enumerate(stuff2):
            g = f.create_group(path.strip('/').replace('/', '-'))
            g.attrs['TUMORRADIUS'] = tumorradius
            g.attrs['TIME'] = time
            for name, curve in curves.iteritems():
                curve = myutils.MeanValueArray.fromSummation(
                    map(lambda x: x.avg, curve))
                g.create_dataset(name + '/avg', data=np.asarray(curve.avg))
                g.create_dataset(name + '/std', data=np.asarray(curve.std))
                g.create_dataset(name + '/mask', data=~curve.avg.mask)
                g.create_dataset(name + '/std_mean',
                                 data=np.asarray(curve.std_mean))
            g.create_dataset('bins', data=bins_spec.arange())