コード例 #1
0
    def obtain_data(self, dataman, dataname, *args):
        if dataname == 'dev_from_sphere':
            print(args)
            this_out_grp_name = args[0]
            no_of_bins = args[1]
            rangeMin = args[2]
            rangeMax = args[3]

            def read(hdf_cache_grp, data_name):
                print('read data at: %s' % hdf_cache_grp.name)
                print('data_name: %s ' % data_name)
                hist_data = np.asarray(hdf_cache_grp[data_name + '/hist_data'])
                hist_edges = np.asarray(hdf_cache_grp[data_name +
                                                      '/hist_edges'])
                return (hist_data, hist_edges)

            def write(hdf_cache_grp, data_name):
                with h5py.File(goodArguments.vbl_simulation_output_filename,
                               'r') as h5_f:
                    isonAS = np.asarray(h5_f[this_out_grp_name +
                                             '/vbl/isonAS'],
                                        dtype=bool)
                    isonAS = isonAS[:, 0]
                    pos = np.asarray(h5_f[this_out_grp_name +
                                          '/cells/cell_center_pos'])
                    dist_to_center = np.sqrt(np.sum(np.power(pos, 2), 1))
                    hist, bin_edges = np.histogram(dist_to_center[isonAS],
                                                   bins=no_of_bins,
                                                   range=[rangeMin, rangeMax])
                    #print(pos)

                this_out_grp = hdf_cache_grp.create_group(data_name)
                this_out_grp.create_dataset('hist_data', data=hist)
                this_out_grp.create_dataset('hist_edges', data=bin_edges)
                print('created data at: %s' % this_out_grp.name)

            possible_hdf_group_name = '%s/dev_from_sphere_%s/' % (
                this_out_grp_name, no_of_bins)
            #possible_hdf_group_name = possible_hdf_group_name+'/' + endity
            if not possible_hdf_group_name in f_cache:
                f_cache.create_group(possible_hdf_group_name)

            #return myutils.hdf_data_caching(read, write, f_cache[possible_hdf_group_name], this_out_grp_name)
            return myutils.hdf_data_caching(read, write, f_cache,
                                            possible_hdf_group_name)

            possible_hdf_group_name = 'box_plot_data_bins_%s_multiple/' % (
                no_of_bins)
            possible_hdf_group_name = possible_hdf_group_name + '/' + endity
            if not possible_hdf_group_name in f_cache:
                f_cache.create_group(possible_hdf_group_name)

            #return myutils.hdf_data_caching(read, write, f_cache[possible_hdf_group_name], this_out_grp_name)
            return myutils.hdf_data_caching(read, write, f_cache,
                                            possible_hdf_group_name)
コード例 #2
0
def get_flow(datamanager, destination_group, f):
    datanames = 'nA_flow yA_flow'.split()

    def process(grp):
        #veins, arteries, capillaries = getVesselTypes(grp)
        edges, flows = krebsutils.read_vessels_from_hdf(grp, ['flow'])
        #capillaries = np.asarray(capillaries)
        return flows

    def read(gmeasure, groupname):
        gmeasure = gmeasure[groupname]
        return [gmeasure[name][()] for name in datanames]

    def write(gmeasure, groupname):
        gmeasure = gmeasure.create_group(groupname)
        group_without_adaption = f['adaption/recomputed']
        group_with_adaption = f['adaption/vessels_after_adaption']
        for (group, name) in zip([group_without_adaption, group_with_adaption],
                                 datanames):
            flows = process(group)
            gmeasure.create_dataset(name, data=flows)

    ret = myutils.hdf_data_caching(read, write, destination_group,
                                   (f.filename), (1, ))
    return ret
コード例 #3
0
def ComputeRootFlowInfo(dataman, vesselgroup, cachelocation):
    def write(gmeasure, groupname):
        vessels = dataman.obtain_data('vessel_graph', vesselgroup,
                                      ['flags', 'flow', 'radius'])
        edgeData = zip(vessels['flow'], vessels['radius'])
        arterialData, venousData = GetRootVesselData(vessels, edgeData)
        arterialFlow, arterialRadius = zip(*arterialData)
        venousFlow, venousRadius = zip(*venousData)
        d = dict(
            totalFlow=np.sum(arterialFlow),
            arterialCount=len(arterialData),
            venousCount=len(venousData),
            avgArterialRadius=np.average(arterialRadius),
            avgVenousRadius=np.average(venousRadius),
        )
        g = gmeasure.create_group(groupname)
        for k, v in d.iteritems():
            g.attrs[k] = v

    def read(gmeasure, groupname):
        return dict(gmeasure[groupname].attrs.items())

    return myutils.hdf_data_caching(
        read, write, cachelocation[0],
        ('global', cachelocation[1], 'rootNodeFlowData'), (None, None, 1))
コード例 #4
0
def ComputeIsoTumorSpherePerfusion(dataman, vesselgroup, tumorGroup,
                                   cachelocation):
    def write(gmeasure, groupname):
        distancemap, ld, volume = ComputeTumorDistanceMapAndVolumeForPerfusion_(
            dataman, tumorGroup)
        vessels = dataman.obtain_data('vessel_graph', vesselgroup,
                                      ['position', 'flags'])
        distSamples = krebsutils.sample_field(vessels['position'],
                                              distancemap,
                                              ld,
                                              linear_interpolation=True)
        result = ComputeIsosurfaceBloodFlow(dataman, vesselgroup, distSamples,
                                            0)
        result = result['flow_in']
        result /= volume
        ds = gmeasure.create_dataset(groupname, data=result)
        ds.attrs['tumorGroup'] = str(tumorGroup)
        ds.attrs['volume'] = volume
        ds.attrs['unit'] = '1 / s'

    def read(gmeasure, groupname):
        return gmeasure[groupname]

    return myutils.hdf_data_caching(
        read, write, cachelocation[0],
        ('global', cachelocation[1], 'isoTumorSphereRBF'), (None, None, 1))
コード例 #5
0
def generate_rBV_of_group(datamanager, destination_group, f):
  datanames = 'rbv'.split()
  # structure in HDF file:
  #            gmeasure/groupname/
  #                               rbv  <- dataset
  #                               a    <- dataset
  #                               v    <- dataset
  
  def read(gmeasure, groupname):
    gmeasure = gmeasure[groupname]
    return [gmeasure[name][()] for name in datanames ]
    
  def write(gmeasure, groupname):
    if 'adaption' in f.keys():
      vessel_grp = f['adaption/vessels_after_adaption']
    else:
      vessel_grp = f['vessels']
    #group_with_adaption = f['adaption/vessels_after_adaption']
    gmeasure = gmeasure.create_group(groupname)
    for name in datanames:
      geometric_data = getGeometricData([vessel_grp])
      rbv, a, v, c = geometric_data[:4]
      gmeasure.create_dataset(name, data = rbv)
#    
#    for name, data in zip(datanames, [rbv, a, v, c]):
#      gmeasure.create_dataset(name, data = data)
  
  ret = myutils.hdf_data_caching(read, write, destination_group, (f.filename), (1, ))  
  # so, gmeasure/groupname is "/measurements/adaption".(None, 1,) are version number specifications, 
  # one for each path component. None means no version number is checked. If version number is larger than
  # stored number, then data is recomputed instead of loaded.
  return ret
コード例 #6
0
ファイル: addhematocrit.py プロジェクト: gitUmaru/tumorcode
    def obtain_data(self, dataman, dataname, *args):
        f, args = args[0], args[1:]
        #obtain_data = lambda *args: dataman.obtain_data(args[0], f, args[1:])

        if dataname == 'flow_w_hematocrit':
            vesselgroup = f[args[0]]

            def read(gmeasure, name):
                grp = gmeasure[name]
                d = dict(
                    hema=grp['edges/h_hema'],
                    flow=grp['edges/h_flow'],
                    force=grp['edges/h_force'],
                    press=grp['nodes/h_press'],
                )
                for k, v in d.iteritems():
                    d[k] = np.asarray(v)
                return d

            def write(gmeasure, name):
                press, flow, force, hema = krebsutils.calc_vessel_hydrodynamics(
                    vesselgroup, True)
                grp = gmeasure.create_group(name)
                egrp = grp.create_group('edges')
                ngrp = grp.create_group('nodes')
                ngrp.create_dataset('h_press', data=press, compression=9)
                egrp.create_dataset('h_flow', data=flow, compression=9)
                egrp.create_dataset('h_hema', data=hema, compression=9)
                egrp.create_dataset('h_force', data=force, compression=9)

            fm = myutils.MeasurementFile(f, h5files, prefix='hematocrit_')
            ret = myutils.hdf_data_caching(read, write, fm,
                                           args[0].split(posixpath.pathsep),
                                           (1, ))
            return ret
コード例 #7
0
def generate_murray_data_of_group(datamanager, vesselgroup, destination_group):
  datanames = 'daughter1 daughter2 mother daughter1_scale daughter2_scale mother_scale'.split() 
  # structure in HDF file:
  #            gmeasure/groupname/
  #                               rbv  <- dataset
  #                               a    <- dataset
  #                               v    <- dataset
  
  def read(gmeasure, groupname):
    gmeasure = gmeasure[groupname]
    return [gmeasure[name][()] for name in datanames ]
    
  def write(gmeasure, groupname):
    res = ku.get_Murray2_p(vesselgroup)
    daughter1 = res[0]
    daughter2 = res[1]
    mother = res[2]
    res2 = ku.get_Murray_scale(vesselgroup)
    daughter1_scale = res2[0]
    daughter2_scale = res2[1]
    mother_scale = res2[2]
    gmeasure = gmeasure.create_group(groupname)
    for name, data in zip(datanames, [daughter1, daughter2, mother,daughter1_scale, daughter2_scale, mother_scale]):
      gmeasure.create_dataset(name, data = data)
    
  
  ret = myutils.hdf_data_caching(read, write, destination_group,(vesselgroup.file.filename),(1,))  
  # so, gmeasure/groupname is "/measurements/adaption".(None, 1,) are version number specifications, 
  # one for each path component. None means no version number is checked. If version number is larger than
  # stored number, then data is recomputed instead of loaded.
  return ret
コード例 #8
0
def generate_adaption_data_of_group_rBF(datamanager, destination_group, f):
  datanames = 'with_adaption_rBF without_adaption_rBF'.split()
  # structure in HDF file:
  #            gmeasure/groupname/
  #                               rbv  <- dataset
  #                               a    <- dataset
  #                               v    <- dataset
  
  def read(gmeasure, groupname):
    gmeasure = gmeasure[groupname]
    return [gmeasure[name][()] for name in datanames ]
    
  def write(gmeasure, groupname):
    group_without_adaption = f['adaption/recomputed']
    group_with_adaption = f['adaption/vessels_after_adaption']
    gmeasure = gmeasure.create_group(groupname)
    for (group,name) in zip([group_without_adaption,group_with_adaption],datanames):
      perfusion_data = getTotalPerfusion([group])*60
      gmeasure.create_dataset(name, data=perfusion_data)
    #geometric_data = getGeometricData([vesselgroup])
#    rBF = perfusion_data
#    for name, data in zip(datanames, [rBF]):
#      gmeasure.create_dataset(name, data = data)
  
  ret = myutils.hdf_data_caching(read, write, destination_group, (f.filename), (1, ))  
  # so, gmeasure/groupname is "/measurements/adaption".(None, 1,) are version number specifications, 
  # one for each path component. None means no version number is checked. If version number is larger than
  # stored number, then data is recomputed instead of loaded.
  return ret
コード例 #9
0
    def obtain_data(self, dataman, dataname, *args):
        if dataname == '3drendering':
            import cStringIO
            import povrayRenderTumor
            import PIL.Image as Image

            f, group, showVessels, showTumor = args[0], args[1], args[2], args[
                3]

            ld = dataman.obtain_data('ld', f)
            is_cubic = ld.shape[2] * 2 > ld.shape[0]

            def read(gmeasure, groupname):
                ds = np.asarray(gmeasure[groupname])
                memfile = cStringIO.StringIO(ds)
                img = Image.open(memfile)
                arr = np.array(img)
                return arr

            def write(gmeasure, groupname):
                tempfn = '%s-%x.png' % (splitext(
                    f.filename)[0], random.getrandbits(32))
                vesselgroup = f[group]['vessels'] if showVessels else None
                tumorgroup = analyzeGeneral.tumor_group(
                    f, group) if showTumor else None
                povrayRenderTumor.renderScene(
                    vesselgroup,
                    tumorgroup,
                    tempfn,
                    res=(800, 800),
                    aa=1,
                    cam='corner' if is_cubic else 'topdown',
                    colored_slice=True,
                    out_alpha=False,
                    num_threads=3,
                    temp_file_dir='/tmp',
                    camera_distance=2. if is_cubic else 1.45)
                #fn = 'prez3d-stf-test-st0-MOtcx10ncx1-73877c0b.png'
                stringfile = open(tempfn).read()
                bytestream = np.fromstring(stringfile, dtype=np.byte)
                gmeasure.create_dataset(groupname, (len(bytestream), ),
                                        data=bytestream)

            dataname = '3drendering' + ('_w_vessels' if showVessels else
                                        '') + ('' if showTumor else '_notum')
            fm = myutils.MeasurementFile(f, h5files)
            ret = myutils.hdf_data_caching(read, write, fm, (group, dataname),
                                           (0, 11))
            return ret
コード例 #10
0
def generate_capillary_hist(dataman, inputfiles, destination_group,
                            destination_name):
    def process(inputfiles):
        #groups_with_adaption = [f['adaption/vessels_after_adaption'] for f in files]
        #groups_without_adaption = [f['adaption/recomputed'] for f in files]

        all_capillary_flows_nA = []
        all_capillary_flows_yA = []
        for f in inputfiles:
            nA_flow, yA_flow = get_capillary_flow(dataman, destination_group,
                                                  f)
            all_capillary_flows_nA = np.hstack(
                (nA_flow, all_capillary_flows_nA))
            all_capillary_flows_yA = np.hstack(
                (yA_flow, all_capillary_flows_yA))
        return (all_capillary_flows_nA, all_capillary_flows_yA)

    def write(gmeasure, groupname):
        gmeasure = gmeasure.create_group(groupname)
        x_edges_nA = np.logspace(1, 5.5, 100)
        x_edges_yA = np.logspace(1, 5.5, 100)
        nA_flows, yA_flows, = process(inputfiles)
        print(nA_flows.shape)
        h1, x_edges_nA = np.histogram(nA_flows, bins=x_edges_nA, density=True)
        h2, x_edges_yA = np.histogram(yA_flows, bins=x_edges_yA, density=True)
        gmeasure.create_dataset('h_nA', data=h1)
        gmeasure.create_dataset('h_yA', data=h2)
        gmeasure.create_dataset('x_edges_nA', data=x_edges_nA)
        gmeasure.create_dataset('x_edges_yA', data=x_edges_yA)

    def read(gmeasure, groupname):
        gmeasure = gmeasure[groupname]
        r1 = gmeasure['h_nA']
        r2 = gmeasure['h_yA']
        r3 = gmeasure['x_edges_nA']
        r4 = gmeasure['x_edges_yA']
        return (r1, r2, r3, r4)

    '''
  cache number needs to be higher than other murray stuff
  since this will open the cache file for the second time
  '''
    ret = myutils.hdf_data_caching(read, write, destination_group,
                                   (destination_name, ), (2, ))
    return ret
コード例 #11
0
def generate_adaption_data_average_rBF(datamanager, inputfiles, destination_group, destination_name):
  def process(vesselgroups):
    tmp = []
    for f in inputfiles:
      rbF_with,rBF_without = generate_adaption_data_of_group_rBF(datamanager, destination_group, f)
      tmp.append([rbF_with,rBF_without])
      #  generate_adaption_data_of_group_rBF(datamanager, destination_group, vesselgroup))
    avg_w,avg_wo = np.average(tmp, axis = 0)
    std_w,std_wo = np.std(tmp, axis = 0)    
    #avg_w = np.average(tmp[0], axis = 0)
    #std_w = np.std(tmp[0], axis = 0)
    return avg_w, std_w, avg_wo, std_wo
  
  def write(gmeasure, groupname):
    gmeasure = gmeasure.create_group(groupname)
    avg_with,std_with, avg_without, std_without = process(inputfiles)
    #for name in ['with_adaption', 'without_adaption']:
    #  avg_with, std_with, avg_without, std_without = process(groups)
    gmeasure.create_dataset('with_adaption_rBF_avg', data = avg_with)
    gmeasure.create_dataset('with_adaption_rBF_std', data = std_with)
    gmeasure.create_dataset('without_adaption_rBF_avg', data = avg_without)
    gmeasure.create_dataset('without_adaption_rBF_std', data = std_without)    
  
#    groups_without_adaption = [f['adaption/recomputed'] for f in inputfiles]
#    groups_with_adaption = [f['adaption/vessels_after_adaption'] for f in inputfiles]
#
#    for name, groups in zip(['with_adaption', 'without_adaption'], [groups_with_adaption, groups_without_adaption]):
#      avg, std = process(groups)
#      gmeasure.create_dataset(name+'_rBF_avg', data = avg)
#      gmeasure.create_dataset(name+'_rBF_std', data = std)
      # will give datasets:
      #     with_adaption_avg, with_adaption_std, without_adaption_avg, and without_adaption_std.
      # Each is tuple containing (rbv, a, v, c) returned by generate_adaption_data_of_group(...)
  
  def read(gmeasure, groupname):
    gmeasure = gmeasure[groupname]
    r1 = gmeasure['with_adaption_rBF_avg']
    r2 = gmeasure['with_adaption_rBF_std']
    r3 = gmeasure['without_adaption_rBF_avg']
    r4 = gmeasure['without_adaption_rBF_std']
    return (r1, r2, r3, r4)
  
  ret = myutils.hdf_data_caching(read, write, destination_group, (destination_name,), (1,))
  return ret
コード例 #12
0
def generate_murray_beta_histogram(datamanager, betas, destination_group, destination_name):

  def write(gmeasure,groupname):
    gmeasure = gmeasure.create_group(groupname)
    x_edges = np.linspace(0.0,2.5,50)
    h, edges = np.histogram(betas,bins=x_edges)
    gmeasure.create_dataset('h_betas', data=h)
    gmeasure.create_dataset('x_edges_betas', data= edges)
  def read(gmeasure, groupname):
    gmeasure = gmeasure[groupname]
    r1 = gmeasure['h_betas']
    r2 = gmeasure['x_edges_betas']
    return (r1,r2)
  '''
  cache number needs to be higher than other murray stuff
  since this will open the cache file for the second time
  '''
  ret = myutils.hdf_data_caching(read, write, destination_group, (destination_name,), (1,))
  return ret
コード例 #13
0
def ComputeIsoTumorSphereRescaledPerfusion(dataman, vesselGroup,
                                           tumorVesselGroup, tumorGroup,
                                           cachelocation, cachelocationTumor):
    def write(gmeasure, groupname):
        tumorPerfusion = ComputeIsoTumorSpherePerfusion(
            dataman, tumorVesselGroup, tumorGroup, cachelocationTumor)
        scalingFactor = ComputeIsoTumorSpherePerfusionScaleFactor(
            dataman, vesselGroup, tumorVesselGroup, tumorGroup, cachelocation,
            cachelocationTumor)
        rescaledPerfusion = scalingFactor[...] * tumorPerfusion[...]
        ds = gmeasure.create_dataset(groupname, data=rescaledPerfusion)
        ds.attrs['unit'] = tumorPerfusion.attrs['unit']

    def read(gmeasure, groupname):
        return gmeasure[groupname]

    return myutils.hdf_data_caching(read, write, cachelocation[0],
                                    ('global', cachelocation[1], 'scaled_RBF'),
                                    (None, None, 1))
コード例 #14
0
def ComputeSystemPerfusion(dataman, vesselgroup, cachelocation):
    def write(gmeasure, groupname):
        vessels = dataman.obtain_data('vessel_graph', vesselgroup,
                                      ['flags', 'flow'])
        arterialFlow, _ = GetRootVesselData(vessels, vessels['flow'])
        arterialFlow = np.sum(arterialFlow)
        ldvessels = krebsutils.read_lattice_data_from_hdf(
            vesselgroup['lattice'])
        totalVolume = np.cumprod(ldvessels.GetWorldSize())[2]
        perfusion = arterialFlow / totalVolume
        ds = gmeasure.create_dataset(groupname, data=perfusion)
        ds.attrs['unit'] = '1 / s'

    def read(gmeasure, groupname):
        return gmeasure[groupname]

    return myutils.hdf_data_caching(read, write, cachelocation[0],
                                    ('global', cachelocation[1], 'systemRBF'),
                                    (None, None, 1))
コード例 #15
0
def get_capillary_radius(datamanager, destination_group, f):
  datanames = 'nA_capillary_radius yA_capillary_radius'.split()
  def process(grp):
    veins, arteries, capillaries = getVesselTypes(grp)
    edges, radii = krebsutils.read_vessels_from_hdf(grp,['radius'])
    return radii[capillaries]
    
  def read(gmeasure, groupname):
    gmeasure = gmeasure[groupname]
    return [gmeasure[name][()] for name in datanames ]
    
  def write(gmeasure, groupname):
    gmeasure=gmeasure.create_group(groupname)
    group_without_adaption = f['adaption/recomputed']
    group_with_adaption = f['adaption/vessels_after_adaption']
    for (group,name) in zip([group_without_adaption,group_with_adaption],datanames):
      radii = process(group)
      gmeasure.create_dataset(name, data=radii)
  ret = myutils.hdf_data_caching(read, write, destination_group, (f.filename), (1, ))
  return ret
コード例 #16
0
def ComputeIsoTumorSpherePerfusionScaleFactor(dataman, vesselGroup,
                                              tumorVesselGroup, tumorGroup,
                                              cachelocation,
                                              cachelocationTumor):
    def write(gmeasure, groupname):
        systemPerfusion = ComputeSystemPerfusion(dataman, vesselGroup,
                                                 cachelocation)
        isoTumorPerfusion = ComputeIsoTumorSpherePerfusion(
            dataman, vesselGroup, tumorGroup, cachelocation)
        rescaledPerfusion = systemPerfusion[...] / isoTumorPerfusion[...]
        ds = gmeasure.create_dataset(groupname, data=rescaledPerfusion)
        ds.attrs['unit'] = '1'

    def read(gmeasure, groupname):
        return gmeasure[groupname]

    return myutils.hdf_data_caching(
        read, write, cachelocation[0],
        ('global', cachelocation[1], 'perfusion_scaling_factor'),
        (None, None, 1))
コード例 #17
0
    def obtain_data(self, dataman, dataname, *args):
      if dataname == 'vessel_graph':
        vesselgroup, properties = args
        graph = krebsutils.read_vessels_from_hdf(vesselgroup, properties, return_graph=True)
        for prop in properties:
          data, association = self.get_property(dataman, vesselgroup, 'auto', prop)
          getattr(graph, association)[prop] = data
        return graph

      elif dataname == 'vessel_graph_property':
        res, a = self.get_property(dataman, *args)
        return res, a

      elif dataname == 'vessel_system_length':
        group, = args
        def read(gmeasure, groupname):
          return np.asscalar(gmeasure[groupname][...])
        def write(gmeasure, groupname):
          l = np.sum(dataman.obtain_data('vessel_graph_property', group, 'edges', 'length')[0])
          gmeasure.create_dataset(groupname, data = l)
        return myutils.hdf_data_caching(read, write, group, ('vessel_system_length',), (1,))
コード例 #18
0
    def obtain_data(self, dataman, dataname, *args):
        if dataname == 'hist_2d_data':
            print(args)
            endity = args[0]
            this_out_grp_name = args[1]
            no_of_bins = args[2]

            def read(hdf_cache_grp, data_name):
                print('read data at: %s' % hdf_cache_grp.name)
                print('data_name: %s ' % data_name)
                h = np.asarray(hdf_cache_grp[data_name + '/' + 'h'])
                xedges = np.asarray(hdf_cache_grp[data_name + '/' + 'xedges'])
                yedges = np.asarray(hdf_cache_grp[data_name + '/' + 'yedges'])
                return (h, xedges, yedges)

            def write(hdf_cache_grp, data_name):
                this_out_grp = hdf_cache_grp.create_group(data_name)
                (h, xedges, yedges) = create_2d_histo(endity)
                #(average_value, errors, distances) = sample_line_general('o2', this_out_grp_name, vein_parallel_p1, vein_parallel_p2)
                #group_of_single_timepoint = hdf_cache_grp.create_group(this_out_grp_name)
                this_out_grp.create_dataset('h', data=h)
                this_out_grp.create_dataset('xedges', data=xedges)
                this_out_grp.create_dataset('yedges', data=yedges)
                print('created data at: %s' % this_out_grp.name)


#        print(hdf_cache_grp)
#        print(data_name)
#        print('before create')
#        group_of_single_timepoint.create_dataset('average_po2', data=average_value)
#        group_of_single_timepoint.create_dataset('average_po2_error', data=errors)
#        group_of_single_timepoint.create_dataset('distances', data=distances)

            possible_hdf_group_name = '%s/hist_2d_data_bins_%s/' % (
                this_out_grp_name, no_of_bins)
            possible_hdf_group_name = possible_hdf_group_name + '/' + endity
            if not possible_hdf_group_name in f_cache:
                f_cache.create_group(possible_hdf_group_name)
        return myutils.hdf_data_caching(read, write, f_cache,
                                        possible_hdf_group_name)
コード例 #19
0
def generate_adaption_data_average_rBV(datamanager, inputfiles, destination_group, destination_name):
  def process(inputfiles):
    tmp = []
    for f in inputfiles:
      rBV = generate_rBV_of_group(datamanager, destination_group, f)
      tmp.append(rBV)
    avg_rBV = np.average(tmp)
    std_rBV = np.std(tmp)    
#    avg_w = np.average(tmp[0], axis = 0)
#    std_w = np.std(tmp[0], axis = 0)
    return avg_rBV, std_rBV
  
  def write(gmeasure, groupname):
    gmeasure = gmeasure.create_group(groupname)    
  
    #groups_without_adaption = [f['adaption/recomputed'] for f in inputfiles]
    #groups_with_adaption = [f['adaption/vessels_after_adaption'] for f in inputfiles]
    avg_rBV, std_rBV = process(inputfiles)
    #for name in ['with_adaption', 'without_adaption']:
    #  avg_with, std_with, avg_without, std_without = process(groups)
    gmeasure.create_dataset('rBV_avg', data = avg_rBV)
    gmeasure.create_dataset('rBV_std', data = std_rBV)
    #gmeasure.create_dataset('without_adaption_rBV_avg', data = avg_without)
    #gmeasure.create_dataset('without_adaption_rBV_std', data = std_without)
      # will give datasets:
      #     with_adaption_avg, with_adaption_std, without_adaption_avg, and without_adaption_std.
      # Each is tuple containing (rbv, a, v, c) returned by generate_adaption_data_of_group(...)
  
  def read(gmeasure, groupname):
    gmeasure = gmeasure[groupname]
    r1 = gmeasure['rBV_avg']
    r2 = gmeasure['rBV_std']
    #r3 = gmeasure['without_adaption_rBV_avg']
    #r4 = gmeasure['without_adaption_rBV_std']
    return (r1, r2)#, r3, r4)
  
  ret = myutils.hdf_data_caching(read, write, destination_group, (destination_name,), (1,))
  return ret
コード例 #20
0
ファイル: plotVessels.py プロジェクト: Amenhotep19/tumorcode
def generateRadiusHistogram(dataman,
                            vesselgroups,
                            destination_group,
                            destination_name,
                            filterflags=None):
    def process(vesselgroups):
        bins = np.logspace(-1., 1.1, 50, base=10.)
        result = []
        for g in vesselgroups:
            r = dataman.obtain_data('basic_vessel_samples', 'radius', g, 30.)
            w = dataman.obtain_data('basic_vessel_samples', 'weight', g, 30.)
            f = dataman.obtain_data('basic_vessel_samples', 'flags', g, 30.)
            i = myutils.bbitwise_and(f, krebsutils.CIRCULATED)
            if filterflags is not None:
                i &= myutils.bbitwise_and(f, filterflags)
            h = myutils.MeanValueArray.fromHistogram1d(bins, r[i], w[i])
            result.append(h)
        result = myutils.MeanValueArray.fromSummation(result)
        #ax.bar(bins[:-1], result.sum, width=(bins[1]-bins[0]))
        y = result.sum
        y /= np.sum(y)
        y /= (bins[1:] - bins[:-1])
        return bins[:-1], y

    def write(gmeasure, groupname):
        gmeasure = gmeasure.create_group(groupname)
        h, bin_edges = process(vesselgroups)
        gmeasure.create_dataset('h', data=h)
        gmeasure.create_dataset('bin_edges', data=bin_edges)

    def read(gmeasure, groupname):
        gmeasure = gmeasure[groupname]
        return gmeasure['h'], gmeasure['bin_edges']

    ret = myutils.hdf_data_caching(read, write, destination_group,
                                   (destination_name, ), (1, ))
    return ret
コード例 #21
0
ファイル: __init__.py プロジェクト: Amenhotep19/tumorcode
    def obtain_data(self, dataman, dataname, *args):
        if dataname == 'detailedPO2_peff_samples':
            po2group, sample_length, every, cachelocation = args
            gvessels, gtumor = detailedo2.OpenVesselAndTumorGroups(po2group)

            def read(gmeasure, name):
                ds = gmeasure[name]
                return ds[...] if every is None else ds[::every]

            def write(gmeasure, name):
                samplelocation = MakeSampleLocation(po2group)
                parameters = dataman.obtain_data('detailedPO2Parameters',
                                                 po2group)
                rad = dataman.obtain_data('basic_vessel_samples', 'radius',
                                          gvessels, sample_length)
                hema = dataman.obtain_data('basic_vessel_samples',
                                           'hematocrit', gvessels,
                                           sample_length)
                po2 = dataman.obtain_data('detailedPO2_samples', 'po2',
                                          po2group, sample_length, 1,
                                          samplelocation)
                mtc = detailedo2.computeMassTransferCoefficient(
                    rad, parameters)
                blood_solubility = parameters.get(
                    'solubility_plasma', parameters.get('alpha_p', None))
                c_o2_total = detailedo2.PO2ToConcentration(
                    po2, hema, parameters)
                c_o2_plasma = detailedo2.PO2ToConcentration(
                    po2, np.zeros_like(hema), parameters
                )  # to get the o2 conc. in plasma i just set the amount of RBCs to zero
                c_o2_plasma *= (
                    1.0 - hema
                )  # and reduce the concentration according to the volume fraction of plasma
                beta_factor = c_o2_plasma / c_o2_total
                peff = (1.0 / blood_solubility) * mtc * beta_factor

                #        print 'po2', np.average(po2)
                #        print 'mtc', np.average(mtc)
                #        print 'hema', np.average(hema)
                #        print 'blood_solubility', blood_solubility
                #        print 'c_o2_total', np.average(c_o2_total)
                #        print 'c_o2_plasma', np.average(c_o2_plasma)
                #        print 'peff', np.average(peff)
                #        print 'beta_factor', np.average(beta_factor)

                gmeasure.create_dataset(name, data=peff, compression=9)

            version_id = myutils.checksum(sample_length, 1, getuuid_(po2group))
            ret = myutils.hdf_data_caching(
                read, write, cachelocation[0],
                (cachelocation[1], 'samples_and_fluxes', 'Peff'),
                (None, None, version_id))
            return ret

        elif dataname == 'detailedPO2_peff_radial':
            po2group, sample_length, bins_spec, distance_distribution_name, cachelocation = args

            # we assume that there is a tumor. without this measurement makes little sense

            def read(gmeasure, name):
                return myutils.MeanValueArray.read(gmeasure[name])

            def write(gmeasure, name):
                samplelocation = MakeSampleLocation(po2group)
                gvessels, gtumor = detailedo2.OpenVesselAndTumorGroups(
                    po2group)
                smpl = dataman.obtain_data('detailedPO2_peff_samples',
                                           po2group, sample_length, None,
                                           samplelocation)

                data, = analyzeGeneral.GenerateRadialDistributions(
                    dataman, gvessels, gtumor, sample_length, bins_spec,
                    distance_distribution_name, None,
                    [(smpl, analyzeGeneral.radialAvgPerVessels)])
                data.write(gmeasure, name)

            version = myutils.checksum(2, getuuid_(po2group))
            ret = analyzeGeneral.HdfCacheRadialDistribution(
                (read, write), 'Peff', bins_spec, distance_distribution_name,
                cachelocation, version)
            return ret

        # IDEA: store config stuff like sample length and distance distribution as member of the data handler for less call args
        # or store it in a common config instance.
        # Then allow to obtain a temporary handle on data which knows its GUID for a quick accurate check if data has changed.
        # The handle probably would have to obtain the GUID from disk but this is it.
        elif dataname == 'detailedPO2_peffSrho_radial':
            po2group, sample_length, bins_spec, distance_distribution_name, cachelocation = args

            def read(gmeasure, name):
                return myutils.MeanValueArray.read(gmeasure[name])

            def write(gmeasure, name):
                samplelocation = MakeSampleLocation(po2group)
                gvessels, gtumor = detailedo2.OpenVesselAndTumorGroups(
                    po2group)
                peff = dataman.obtain_data('detailedPO2_peff_samples',
                                           po2group, sample_length, None,
                                           samplelocation)
                rad = dataman.obtain_data('basic_vessel_samples', 'radius',
                                          gvessels, sample_length)
                peff = peff * math.pi * 2. * rad

                data, = analyzeGeneral.GenerateRadialDistributions(
                    dataman, gvessels, gtumor, sample_length, bins_spec,
                    distance_distribution_name, None,
                    [(peff, analyzeGeneral.radialAvgPerVolume)])
                data.write(gmeasure, name)

            version = myutils.checksum(2, getuuid_(
                po2group))  # this should actually depend on the samples
            ret = analyzeGeneral.HdfCacheRadialDistribution(
                (read, write), 'PeffSrho', bins_spec,
                distance_distribution_name, cachelocation, version)
            return ret

        assert False
コード例 #22
0
def computePO2(parameters):
  print("Computing o2 for file: %s" % parameters['input_file_name'])
  print("at group: %s" % parameters['input_group_path'])
  parameters['vessel_group_path'] = "recomputed_flow"
  parameters['output_group_path'] = "po2/" + parameters['input_group_path']
  output_buffer_name = basename(parameters['output_file_name']).rsplit('.h5')[0]
  parameters['output_file_name'] = "%s-%s.h5" % (output_buffer_name, parameters['input_group_path'])  
  print("storing in file: %s at %s" % (parameters['output_file_name'], parameters['output_group_path']))
  tumorgroup = None
  parameters['tumor_file_name'] = 'none'
  parameters['tumor_group_path'] = 'none'
  
  #f_out = h5files.open(parameters['output_file_name'], 'a', search = False)
  
  
      
  caching = False;
  if caching:
     
    #====  this is for recomputing flow =====#
    def read1(gmeasure, name):
      gmeasure = gmeasure[name]
      return myutils.H5FileReference(gmeasure.file.filename, gmeasure.name)
  
    def write1(gmeasure, name):
      gdst = gmeasure.create_group(name)
      f = h5py.File(parameters['input_file_name'])
      #f = h5files.open(parameters['input_file_name'], 'r')
      input_vessel_group = f[parameters['input_group_path']]
      copyVesselnetworkAndComputeFlow(gdst, input_vessel_group, parameters.get("calcflow"))
      f.close()
      
    new_flow_data_ref = myutils.hdf_data_caching(read1, write1, f_out, ('recomputed_flow'), (0, 1))
  
  #  #====  this is for po2 =====#
    def read2(gmeasure, name):
      gmeasure = gmeasure[name]
      return myutils.H5FileReference(gmeasure.file.filename, gmeasure.name)
  
    def write2(gmeasure, name):
      f_out.create_group("po2")
      detailedo2current.computePO2(parameters, parameters.get('calcflow')) 
      
  #
  #  #==== execute reading or computing and writing =====#
    with h5py.File(parameters['output_file_name'], 'a') as f_out:
      #f_out_bla = h5files.open("blub.h5", 'a', search = False)
      o2data_ref = myutils.hdf_data_caching(read2, write2, f_out, ('po2'), (0,1))
    #  #=== return filename and path to po2 data ====#
    #  #return o2data_re
  else:
    print("no caching!")
#    with h5py.File(parameters['input_file_name'], 'r') as f:
#      input_vessel_group = f[parameters['input_group_path']]
#      with h5py.File(parameters['output_file_name'], 'a') as f_out:
#        gdst = f_out.create_group('/recomputed_flow/' + parameters['input_group_path'])
#        copyVesselnetworkAndComputeFlow(gdst, input_vessel_group, parameters.get("calcflow"))
#        f_out.flush()
    #at this point all h5Files should be closed on python side
    detailedo2current.computePO2(parameters, parameters.get('calcflow')) 
  return parameters['output_file_name']
コード例 #23
0
    def obtain_data(self, dataman, dataname, *args):
        max_group_id = (int)(goodArguments.grp_pattern[3:7])
        min_group_id = 350
        if dataname == 'po2_at_certain_time_point_v':
            print(args)
            this_out_grp_name = args[0]

            def read(hdf_cache_grp, data_name):
                print('data_name: %s' % data_name)
                print('hdf_cache_grp in read')
                print(hdf_cache_grp.keys())
                return (np.asarray(hdf_cache_grp[data_name + '/' +
                                                 'average_po2']),
                        np.asarray(hdf_cache_grp[data_name + '/' +
                                                 'average_po2_error']),
                        np.asarray(hdf_cache_grp[data_name + '/' +
                                                 'distances']))

            def write(hdf_cache_grp, data_name):
                (average_value, errors,
                 distances) = sample_line_general('o2', this_out_grp_name,
                                                  vein_parallel_p1,
                                                  vein_parallel_p2)
                group_of_single_timepoint = hdf_cache_grp.create_group(
                    this_out_grp_name)
                print('hdf_cache_grp')
                print(hdf_cache_grp)
                print(data_name)
                print('before create')
                group_of_single_timepoint.create_dataset('average_po2',
                                                         data=average_value)
                group_of_single_timepoint.create_dataset('average_po2_error',
                                                         data=errors)
                group_of_single_timepoint.create_dataset('distances',
                                                         data=distances)

            return myutils.hdf_data_caching(read, write,
                                            f_cache['/po2_at_all_times_v/'],
                                            this_out_grp_name)
        if dataname == 'po2_at_all_times_v':

            def read(hdf_cache_grp_name, data_name):
                return_dict = dict()
                hdf_data_grp = hdf_cache_grp_name[data_name]
                for out_group_id in np.arange(min_group_id, max_group_id + 1):
                    out_group = 'out%04i' % out_group_id
                    print('reading group: %s' % out_group)
                    (average_value, errors, distances) = dataman.obtain_data(
                        'po2_at_certain_time_point_v', out_group)
                    return_dict[out_group] = (average_value, errors, distances)
                return return_dict

            def write(hdf_cache_grp_name, data_name):
                hdf_data_grp = hdf_cache_grp_name.create_group(data_name)
                ''' here comes the calculation '''
                for out_group_id in np.arange(min_group_id, max_group_id + 1):
                    out_group_name = 'out%04i' % out_group_id
                    print('calculating group: %s' % out_group_name)
                    dataman.obtain_data('po2_at_certain_time_point_v',
                                        out_group_name)

            return myutils.hdf_data_caching(read, write, f_cache,
                                            'po2_at_all_times_v')
        if dataname == 'po2_at_certain_time_point_vo':
            print(args)
            this_out_grp_name = args[0]

            def read(hdf_cache_grp, data_name):
                print('data_name: %s' % data_name)
                print('hdf_cache_grp in read')
                print(hdf_cache_grp.keys())
                return (np.asarray(hdf_cache_grp[data_name + '/' +
                                                 'average_po2']),
                        np.asarray(hdf_cache_grp[data_name + '/' +
                                                 'average_po2_error']),
                        np.asarray(hdf_cache_grp[data_name + '/' +
                                                 'distances']))

            def write(hdf_cache_grp, data_name):
                (average_value, errors,
                 distances) = sample_line_general('o2', this_out_grp_name,
                                                  vein_ortho_p1, vein_ortho_p2)
                group_of_single_timepoint = hdf_cache_grp.create_group(
                    this_out_grp_name)
                print('hdf_cache_grp')
                print(hdf_cache_grp)
                print(data_name)
                print('before create')
                group_of_single_timepoint.create_dataset('average_po2',
                                                         data=average_value)
                group_of_single_timepoint.create_dataset('average_po2_error',
                                                         data=errors)
                group_of_single_timepoint.create_dataset('distances',
                                                         data=distances)

            return myutils.hdf_data_caching(read, write,
                                            f_cache['/po2_at_all_times_vo/'],
                                            this_out_grp_name)
        if dataname == 'po2_at_all_times_vo':

            def read(hdf_cache_grp_name, data_name):
                return_dict = dict()
                hdf_data_grp = hdf_cache_grp_name[data_name]
                for out_group_id in np.arange(min_group_id, max_group_id + 1):
                    out_group = 'out%04i' % out_group_id
                    print('reading group: %s' % out_group)
                    (average_value, errors, distances) = dataman.obtain_data(
                        'po2_at_certain_time_point_vo', out_group)
                    return_dict[out_group] = (average_value, errors, distances)
                return return_dict

            def write(hdf_cache_grp_name, data_name):
                hdf_data_grp = hdf_cache_grp_name.create_group(data_name)
                ''' here comes the calculation '''
                for out_group_id in np.arange(min_group_id, max_group_id + 1):
                    out_group_name = 'out%04i' % out_group_id
                    print('calculating group: %s' % out_group_name)
                    dataman.obtain_data('po2_at_certain_time_point_vo',
                                        out_group_name)

            return myutils.hdf_data_caching(read, write, f_cache,
                                            'po2_at_all_times_vo')
コード例 #24
0
    def obtain_data(self, dataman, dataname, *args):
        if dataname == 'blood_flow':
            vesselgroup, tumorgroup, cachelocation = args
            has_tumor = tumorgroup is not None

            def read(gmeasure, groupname):
                return dict((k, float(v[...]))
                            for (k, v) in gmeasure[groupname].iteritems())

            def write(gmeasure, groupname):
                #vessels = krebsutils.read_vesselgraph(vesselgroup, ['flow', 'pressure', 'position', 'flags'])
                vessels = dataman.obtain_data('vessel_graph', vesselgroup,
                                              ['position', 'flags', 'flow'])
                pos = vessels['position']

                if has_tumor:
                    ldtumor = dataman.obtain_data('ld', tumorgroup.file)
                    dist = dataman.obtain_data('fieldvariable',
                                               tumorgroup.file, 'theta_tumor',
                                               tumorgroup.name)
                    dist = krebsutils.sample_field(pos,
                                                   dist,
                                                   ldtumor,
                                                   linear_interpolation=True)
                    res = ComputeIsosurfaceBloodFlow(dataman, vesselgroup,
                                                     dist, 0.5)
                    res.update(
                        DataTumorBloodFlow.ComputeTotalBloodFlow_(vessels))
                else:
                    res = DataTumorBloodFlow.ComputeTotalBloodFlow_(vessels)

                g = gmeasure.create_group(groupname)
                for k, v in res.iteritems():
                    g.create_dataset(k, data=v)

            #fm = myutils.MeasurementFile(f, h5files)
            ret = myutils.hdf_data_caching(
                read, write, cachelocation[0],
                ('global', cachelocation[1], 'tissue', 'blood_flow'),
                (1, 1, 1, 3))
            return ret

        if dataname == 'blood_flow_rbf':
            vesselgroup, tumorgroup, cachelocation = args

            has_tumor = tumorgroup is not None
            data = dataman.obtain_data('blood_flow', vesselgroup, tumorgroup,
                                       cachelocation).copy()
            ldvessels = krebsutils.read_lattice_data_from_hdf(
                vesselgroup['lattice'])
            total_flow = data['total_flow_in']
            total_volume = np.cumprod(ldvessels.GetWorldSize())[2]
            total_flow_p_volume = total_flow / total_volume
            data['rBF_total'] = total_flow_p_volume
            data['total_volume'] = total_volume
            if has_tumor:
                ldtumor = dataman.obtain_data('ld', tumorgroup.file)
                theta_tumor = dataman.obtain_data('fieldvariable',
                                                  tumorgroup.file,
                                                  'theta_tumor',
                                                  tumorgroup.name)
                tumor_volume = np.sum(theta_tumor) * (ldtumor.scale**3)
                tumor_flow = data['flow_in']
                tumor_flow_p_volume = tumor_flow / tumor_volume
                data['rBF_tumor'] = tumor_flow_p_volume
                data['tumor_volume'] = tumor_volume
                #print 'estimated tumor volume:', tumor_volume
                #print 'tumor flow:', tumor_flow
                #print 'rBF:', tumor_flow_p_volume*60.
            data = dict(map(DataTumorBloodFlow.FixUnit_, data.items()))
            return data

        if dataname in ('cum_rbf_radial', 'avg_surf_vessel_rad_radial',
                        'sphere_vessel_density'):
            vesselgroup, tumorgroup, bins_spec, distance_distribution_name, ld, cachelocation = args
            WorkerFunction = {
                'cum_rbf_radial': ComputeIsosurfaceRegionalBloodFlow,
                'avg_surf_vessel_rad_radial': ComputeIsosurfaceAvgRadius,
                'sphere_vessel_density': ComputeSphereVesselDensity,
            }[dataname]
            version = {
                'cum_rbf_radial': 4,
                'avg_surf_vessel_rad_radial': 3,
                'sphere_vessel_density': 1,
            }[dataname]

            def read(gmeasure, groupname):
                gmeasure = gmeasure[groupname]
                return gmeasure['values'], gmeasure['bins']

            def write(gmeasure, groupname):
                values, bins = ComputeIsosurfaceRadialCurve(
                    dataman, vesselgroup, tumorgroup, bins_spec,
                    distance_distribution_name, ld, cachelocation,
                    WorkerFunction)
                gmeasure = gmeasure.create_group(groupname)
                gmeasure.create_dataset('values', data=values)
                gmeasure.create_dataset('bins', data=bins)

            return krebs.analyzeGeneral.HdfCacheRadialDistribution(
                (read, write), dataname, bins_spec, distance_distribution_name,
                cachelocation, version)
コード例 #25
0
  def obtain_data(self, dataman, dataname, *args):
    if dataname == 'ld':
      f, = args
      ld = krebsutils.read_lattice_data_from_hdf(f['field_ld'])
      return ld

    ####
    if dataname == 'time':
      if len (args) == 1:
        group, = args
      else:
        group = args[0][args[1]] # f, group = args
      return group.attrs['time'] # in hours

    #####
    if dataname == 'fieldvariable':
      f, fieldname, group = args[:3]
      g = f[group] # group name to actual group
      tumor_path = tumor_path_(f, group)

      ld = dataman.obtain_data('ld', f)

      def get(name):
        return self.obtain_data(dataman, 'fieldvariable', f, name, group)

      if fieldname == 'phi_cells':
        data = f[tumor_path]['conc']
      elif fieldname == 'dist_tumor_':
        data = f[tumor_path]['ls']
      elif fieldname == 'theta_tumor':
        gtumor = f[tumor_path]
        if gtumor.attrs['TYPE'] == 'faketumor':
          data = gtumor['tc_density']
        else:
          data = gtumor['ptc']
      elif fieldname == 'phi_necro':
        data = f[tumor_path]['necro']
      elif fieldname in ('oxy'):
        try:
          data = g['oxy']
        except KeyError:
          data = g['fieldOxy']
      elif fieldname in ('gf'):
        data = g['fieldGf']
      elif fieldname in ('press', 'sources', 'vel'):
        data = f[tumor_path][fieldname]
      elif fieldname == 'phi_viabletumor':
        theta_tumor = get('theta_tumor')
        phi_cells = get('phi_cells')
        phi_necro = get('phi_necro')
        data = theta_tumor * (phi_cells - phi_necro)
      elif fieldname == 'phi_tumor':
        theta_tumor = get('theta_tumor')
        phi_cells = get('phi_cells')
        data = theta_tumor * phi_cells
      elif fieldname == 'dist_necro':
        phi_necro = get('phi_necro')
        phi_cells = get('phi_cells')
        tumor_contour_level = 0.5*np.average(phi_cells)
        data = calc_distmap(phi_necro, ld, tumor_contour_level)
      elif fieldname == 'dist_viabletumor':
        def read(gmeasure, dsname):
          return np.asarray(gmeasure[dsname])
        def write(gmeasure, dsname):
          dist_tumor = get('dist_tumor')
          dist_necro = get('dist_necro')
          data = np.maximum(dist_tumor, -dist_necro)
          gmeasure.create_dataset(dsname, data = data, compression = 9)
        fm = myutils.MeasurementFile(f, h5files)
        data = myutils.hdf_data_caching(read, write, fm, (tumor_path, 'dist_viabletumor'), (0,1))
      elif fieldname == 'phi_vessels':
        def read(gmeasure, name):
          return np.asarray(gmeasure[name])
        def write(gmeasure, name):
          phi_vessels = CalcPhiVessels(dataman, f[group]['vessels'], ld, scaling = 1.)
          gmeasure.create_dataset(name, data = phi_vessels, compression = 9)
        fm = myutils.MeasurementFile(f, h5files)
        data = myutils.hdf_data_caching(read, write, fm, (group, 'phi_vessels'), (0,1))
      elif fieldname == 'dist_tumor':
        def read(gmeasure, name):
          return np.asarray(gmeasure[name])
        def write(gmeasure, name):
          ls = get('dist_tumor_')
          ls = -ls
          dist = calc_distmap(np.asarray(ls < 0., dtype=np.float32), ld, 0.5)
          gmeasure.create_dataset(name, data = dist, compression = 9)
        fm = myutils.MeasurementFile(f, h5files)
        data = myutils.hdf_data_caching(read, write, fm, (tumor_path, 'dist_tumor_full'), (0,1))
      else:
        raise RuntimeError('unkown field %s' % fieldname)

      if len(args)>3 and args[3] == 'imslice':
        import plotBulkTissue
        return plotBulkTissue.imslice(data)
      else:
        return np.asarray(data)

    if dataname == 'fieldvariable_radial':
      property_name, tumorgroup, bins_spec, distance_distribution_name, cachelocation = args

      def write(gmeasure, groupname):
        distmap, ld = obtain_distmap_(dataman, tumorgroup, distance_distribution_name)
        data    = dataman.obtain_data('fieldvariable', tumorgroup.file, property_name, tumorgroup.name)
        bins    = bins_spec.arange()
        a = myutils.MeanValueArray.fromHistogram1d(bins, np.ravel(distmap), np.ravel(data))
        ds = a.write(gmeasure, groupname)
        ds.attrs['BINS_SPEC'] = str(bins_spec)

      def read(gmeasure, groupname):
        assert groupname == property_name
        return myutils.MeanValueArray.read(gmeasure, groupname)

      return HdfCacheRadialDistribution((read, write), property_name, bins_spec, distance_distribution_name, cachelocation, 1)

    if dataname == 'approximate_tumor_radius':
      tumorgroup, = args
      def write(gmeasure, groupname):
        rad = ApproximateTumorRadius(dataman, tumorgroup)
        ds = gmeasure.create_dataset(groupname, data = rad)
      def read(gmeasure, groupname):
        return np.asscalar(gmeasure[groupname][()])
      return myutils.hdf_data_caching(read, write, tumorgroup, ('approximate_tumor_radius',), (1,))
コード例 #26
0
    def obtain_data(self, dataman, dataname, *args):
        f, args = args[0], args[1:]
        obtain_data = lambda *args: dataman.obtain_data(args[0], f, *args[1:])
        ld = obtain_data('ld')
        if dataname == 'field_interfacedelta':
            group, = args
            distmap = obtain_data('fieldvariable', 'dist_tumor', group)
            return krebsutils.smooth_delta(distmap, 0.5 * ld.scale)
        ###
        if dataname == 'field_curvature':
            group, = args
            distmap = obtain_data('fieldvariable', 'dist_tumor', group)
            curvature = krebsutils.curvature(ld, distmap, False, False)
            return curvature
        ###
        if dataname == 'field_curvatureradius':
            group, = args
            distmap = obtain_data('fieldvariable', 'dist_tumor', group)
            curvature = krebsutils.curvature(ld, distmap, False, False)
            curvature = np.ma.array(curvature, mask=curvature <> 0.)
            # kappa = (1/r0 + 1/r1), r0=r1 -> r = 2/kappa
            curvature = 2. * np.ma.power(curvature, -1.)
            return curvature
        ###
        if dataname == 'shape_metrics':
            group, = args

            def read(gmeasure, groupname):
                return dict((k, float(v[...]))
                            for (k, v) in gmeasure[groupname].iteritems())

            def write(gmeasure, groupname):
                dim = 2 if ld.shape[2] == 1 else 3
                cellvol = ld.scale**dim
                if 0:
                    vtkds, = extractVtkFields.Extractor(
                        statedata['tumor'], ['ls']).asVtkDataSets()
                    area = integrate_surface(vtkds)
                    del vtkds
                theta_tumor = obtain_data('fieldvariable', 'theta_tumor',
                                          group)
                vol = np.sum(theta_tumor) * cellvol
                delta = obtain_data('field_interfacedelta', group)
                # grid cells contributing to the interfacial area
                interface_indices = np.nonzero(delta)
                interface_weights = delta[interface_indices]
                # area estimate
                area = np.sum(delta) * cellvol
                # radius estimate
                radialmap = krebsutils.make_radial_field(ld)
                radius = np.average(radialmap[interface_indices],
                                    weights=interface_weights)
                # comparison with the area of a sphere/cylinder with the same volume as the shape
                if dim == 3:
                    sphere_equiv_radius = math.pow(vol * 3. / (4. * math.pi),
                                                   1. / 3.)
                    sphere_equiv_area = 4. * math.pi * (sphere_equiv_radius**2)
                    cylinder_equiv_radius = math.sqrt(
                        vol / ld.GetWorldSize()[2] / math.pi)
                    cylinder_equiv_area = 2. * math.pi * cylinder_equiv_radius * ld.GetWorldSize(
                    )[2]
                else:
                    # pi r^2 = A -> r = (A/pi)^(1/2)
                    sphere_equiv_radius = math.pow(vol / math.pi, 1. / 2.)
                    sphere_equiv_area = 2. * math.pi * sphere_equiv_radius
                    cylinder_equiv_radius = sphere_equiv_radius
                    cylinder_equiv_area = sphere_equiv_area
                # how much of a perfect sphere/cylinder the shape is
                sphericity = sphere_equiv_area / area
                cylindericity = cylinder_equiv_area / area

                res = dict(area=area,
                           volume=vol,
                           radius=radius,
                           sphericity=sphericity,
                           cylindericity=cylindericity,
                           sphere_equiv_radius=sphere_equiv_radius,
                           sphere_equiv_area=sphere_equiv_area,
                           cylinder_equiv_radius=cylinder_equiv_radius,
                           cylinder_equiv_area=cylinder_equiv_area)
                g = gmeasure.create_group(groupname)
                for k, v in res.iteritems():
                    g.create_dataset(k, data=v)
                gmeasure.file.flush()

            fm = myutils.MeasurementFile(f, h5files)
            ret = myutils.hdf_data_caching(read, write, fm,
                                           (group, 'shape_metrics'), (0, 1))
            return ret
        ###
        if dataname == 'curvature_samples':
            group, = args
            delta = obtain_data('field_interfacedelta', group)
            interface_indices = np.nonzero(delta)
            curv = obtain_data('field_curvature', group)
            return curv[interface_indices], delta[interface_indices]
コード例 #27
0
    def obtain_data(self, dataman, dataname, *args):
        if dataname == 'intervascular_map_common_ld':
            vesselgroup, tumorgroup = args
            fieldLd, fieldLdFine = self.makeLD(vesselgroup)
            return fieldLd, fieldLdFine

        if dataname == 'intervascular_map_tumor_mask':
            vesselgroup, tumorgroup, fieldLd, fieldLdFine = args
            print 'intervascular_map_tumor_mask', str(vesselgroup)

            def read(gmeasure, groupname):
                return gmeasure[groupname]

            def write(gmeasure, groupname):
                distmap, _ = analyzeGeneral.obtain_distmap_(
                    dataman, tumorgroup, 'levelset', fieldLd)
                mask = distmap < -fieldLd.scale - 100.
                gmeasure.create_dataset(groupname, data=mask, compression=9)

            myfile, mycachelocation, version = self.makeCacheLocation(
                dataname, args, 'mask', 3)
            return myutils.hdf_data_caching(read, write, myfile,
                                            mycachelocation, version)

        if dataname == 'local_mvd_map':
            vesselgroup, tumorgroup, fieldLd, fieldLdFine = args
            print 'local_mvd_map', str(vesselgroup)

            def read(gmeasure, groupname):
                return gmeasure[groupname]

            def write(gmeasure, groupname):
                weight = dataman.obtain_data('basic_vessel_samples', 'weight',
                                             vesselgroup, self.sample_length)
                flags = dataman.obtain_data('basic_vessel_samples', 'flags',
                                            vesselgroup, self.sample_length)
                position = dataman.obtain_data('basic_vessel_samples',
                                               'position', vesselgroup,
                                               self.sample_length)
                mask = myutils.bbitwise_and(flags, krebsutils.CIRCULATED)
                flags = flags[mask]
                position = position[mask, ...]
                weight = weight[mask]
                ####### put into bins
                eps = 1.0 - 1.e-15
                x0, x1, y0, y1, z0, z1 = fieldLd.worldBox
                ranges = [
                    np.arange(x0, x1, fieldLd.scale * eps),
                    np.arange(y0, y1, fieldLd.scale * eps),
                    np.arange(z0, z1, fieldLd.scale * eps),
                ]
                mvd, _ = np.histogramdd(position, bins=ranges, weights=weight)
                mvd *= 1.e6 / (fieldLd.scale**3)
                ####### save
                gmeasure.create_dataset(groupname,
                                        data=mvd,
                                        compression=9,
                                        dtype=np.float32)

            myfile, mycachelocation, version = self.makeCacheLocation(
                dataname, args, 'mvd')
            return myutils.hdf_data_caching(read, write, myfile,
                                            mycachelocation, version)

        if dataname == 'intervascular_pressure_map':
            vesselgroup, tumorgroup, fieldLd, fieldLdFine = args
            print 'intervascular_pressure_map', str(vesselgroup)

            def read(gmeasure, groupname):
                return gmeasure[groupname]

            def write(gmeasure, groupname):
                graph = dataman.obtain_data(
                    'vessel_graph', vesselgroup,
                    ['position', 'radius', 'flags', 'pressure'])
                graph = graph.get_filtered(
                    myutils.bbitwise_and(graph['flags'],
                                         krebsutils.CIRCULATED))
                edgevalues = graph['pressure']
                edgevalues = edgevalues[graph.edgelist]
                # main calculation
                thefield = krebsutils.CalcIntervascularInterpolationField(
                    graph.edgelist, graph['radius'], graph['position'],
                    edgevalues, fieldLdFine, 1.)
                del edgevalues
                # gradient of the interpolated blood pressure
                thegrad = scipy.ndimage.gaussian_filter(thefield,
                                                        1.0,
                                                        0,
                                                        mode='nearest')
                gradfield_ = krebsutils.field_gradient(thegrad)
                thegrad = np.sum([np.square(g) for g in gradfield_], axis=0)
                thegrad = np.sqrt(thegrad)
                del gradfield_, thefield
                # now we scale down the highres version, TODO: make it work with other than 3 dimensions
                # first local average
                m = self.fine_bin_subdivision
                kernel = np.ones((m, m, m), dtype=np.float32)
                thegrad = scipy.signal.fftconvolve(thegrad,
                                                   kernel,
                                                   mode='valid')
                # then pick every m'th which contains the average of m finer boxes combined
                thegrad = np.ascontiguousarray(thegrad[::m, ::m, ::m])
                assert all(thegrad.shape == fieldLd.shape)
                gmeasure.create_dataset(groupname,
                                        data=thegrad,
                                        compression=9,
                                        dtype=np.float32)

            myfile, mycachelocation, version = self.makeCacheLocation(
                dataname, args, 'grad')
            return myutils.hdf_data_caching(read, write, myfile,
                                            mycachelocation, version)

        if dataname == 'intervascular_map_correlations':
            listofgroups, = args

            def read(gmeasure, groupname):
                gmeasure = gmeasure[groupname]
                return gmeasure

            def write(gmeasure, groupname):
                allSamples = []
                filemapping = []
                for i, (vesselgroup_before, vesselgroup_after,
                        tumorgroup_after) in enumerate(listofgroups):
                    fieldLd, fieldLdFine = dataman.obtain_data(
                        'intervascular_map_common_ld', vesselgroup_before,
                        None)
                    mask = dataman.obtain_data('intervascular_map_tumor_mask',
                                               vesselgroup_after,
                                               tumorgroup_after, fieldLd,
                                               fieldLdFine)
                    mvd = dataman.obtain_data('local_mvd_map',
                                              vesselgroup_after, None, fieldLd,
                                              fieldLdFine)
                    grad = dataman.obtain_data('intervascular_pressure_map',
                                               vesselgroup_before, None,
                                               fieldLd, fieldLdFine)
                    mvd = np.asarray(mvd).ravel()
                    grad = np.asarray(grad).ravel()
                    mask = np.asarray(mask).ravel()
                    data = (i * np.ones(mask.shape, dtype=np.int), mask, mvd,
                            grad)
                    allSamples += zip(*data)
                    stuff = map(
                        unicode.encode,
                        (vesselgroup_before.file.filename,
                         vesselgroup_before.name, vesselgroup_after.name))
                    filemapping += stuff
                allSamples = zip(*allSamples)
                gmeasure = gmeasure.create_group(groupname)
                gmeasure.create_dataset(
                    'fileindex',
                    data=allSamples[0])  # index into filemapping array
                gmeasure.create_dataset(
                    'mask', data=allSamples[1]
                )  # equal true for samples within tumor (?)
                gmeasure.create_dataset('mvd', data=allSamples[2])
                gmeasure.create_dataset('grad', data=allSamples[3])
                gmeasure.create_dataset('filemapping',
                                        data=np.asarray(filemapping))

            myfile, mycachelocation, = self.cachelocationEnsembleFactory(
                dataname, listofgroups)
            version = (None, ) * (len(mycachelocation) - 1) + (4, )
            return myutils.hdf_data_caching(read, write, myfile,
                                            mycachelocation, version)

        if dataname == 'intervascular_global_correlations':
            listofgroups, = args
            group = dataman.obtain_data('intervascular_map_correlations',
                                        listofgroups)
            sorteddata = collections.defaultdict(list)
            stuff = map(lambda s: np.asarray(group[s]),
                        'fileindex mask mvd grad'.split())
            stuff = zip(*stuff)  # transposed
            for fileindex, mask, mvd, grad in stuff:
                sorteddata[fileindex, mask].append((mvd, grad))
            result = []
            for (fileindex, mask), mvd_grad in sorteddata.iteritems():
                mvd_grad = np.average(
                    mvd_grad, axis=0)  # result: 2 elements: (mvd, grad)
                result.append((fileindex, mask, mvd_grad[0], mvd_grad[1]))
            result = sorted(result, key=lambda t: t[0])
            result = zip(*result)
            result = dict(
                zip('fileindex mask mvd grad'.split(), map(np.asarray,
                                                           result)))
            return result  # returns dict of fileindex, mask, mvd, grad
コード例 #28
0
ファイル: __init__.py プロジェクト: Amenhotep19/tumorcode
    def obtain_data(self, dataman, dataname, *args):
        if dataname == 'detailedPO2Parameters':
            po2group, = args
            return detailedo2.readParameters(po2group)

        if dataname == 'detailedPO2':
            po2group, = args
            a = np.asarray(po2group['po2vessels'])
            if a.shape[0] <> 2: a = np.transpose(a)
            po2field = po2group['po2field']
            ld = krebsutils.read_lattice_data_from_hdf(po2group['field_ld'])
            parameters = dataman.obtain_data('detailedPO2Parameters', po2group)
            return a, ld, po2field, parameters

        if dataname == 'detailedPO2_consumption':
            po2group, tumorgroup = args
            return detailedo2.computeO2Uptake(po2group, tumorgroup)

        if dataname == 'detailedPO2_samples' or dataname == 'detailedPO2_total_fluxes':
            prop, po2group, sample_length, every, cachelocation = args

            def read(gmeasure, name):
                gmeasure = gmeasure[name]
                if dataname == 'detailedPO2_samples':
                    if prop == 'gtv':
                        parameters = dataman.obtain_data(
                            'detailedPO2Parameters', po2group)
                        #po2 = dataman.obtain_data('detailedPO2_samples','po2', *args[1:])
                        #extpo2 = dataman.obtain_data('detailedPO2_samples','extpo2', *args[1:])
                        #return (po2-extpo2)/(parameters['grid_lattice_const']*parameters.get('transvascular_ring_size',0.5))
                        gvessels, gtumor = detailedo2.OpenVesselAndTumorGroups(
                            po2group)
                        jtv = dataman.obtain_data('detailedPO2_samples', 'jtv',
                                                  *args[1:])
                        #radius = dataman.obtain_data('basic_vessel_samples', 'radius', gvessels, sample_length)
                        #radius = radius * (60.e-4*math.pi*2.* parameters['kD_tissue']*parameters['alpha_t'])
                        radius = (
                            60.e-4 *
                            parameters.get('D_tissue',
                                           parameters.get('kD_tissue', None)) *
                            parameters.get('solubility_tissue',
                                           parameters.get('alpha_t', None)))
                        gtv = jtv / radius
                        return gtv
                    elif prop == 'sat':
                        po2 = dataman.obtain_data('detailedPO2_samples', 'po2',
                                                  *args[1:])
                        parameters = dataman.obtain_data(
                            'detailedPO2Parameters', po2group)
                        return detailedo2.PO2ToSaturation(po2, parameters)
                    else:
                        ds = gmeasure['smpl_' + prop]
                        ds = ds[...] if every is None else ds[::every]
                        return ds
                else:
                    keys = filter(lambda k: k.startswith('flux_'),
                                  gmeasure.keys())
                    fluxes = dict(map(lambda k: (k[5:], gmeasure[k][()]),
                                      keys))
                    fluxes['e1'] = abs(
                        100. * (fluxes['Jin_root'] - fluxes['Jout_root'] -
                                fluxes['Jout_tv']) / fluxes['Jin_root'])
                    fluxes['e2'] = abs(
                        100. *
                        (fluxes['Jin_root'] - fluxes['Jout_root'] -
                         fluxes['Jout_cons'] - fluxes.get('tv_cons', 0.)) /
                        fluxes['Jin_root'])
                    fluxes['e3'] = abs(
                        100. * (fluxes['Jout_tv'] - fluxes['Jout_cons'] -
                                fluxes.get('tv_cons', 0.)) / fluxes['Jout_tv'])
                    return fluxes

            def write(gmeasure, name):
                # do the sampling
                gvessels, gtumor = detailedo2.OpenVesselAndTumorGroups(
                    po2group)
                smpl, fluxes = detailedo2.sampleVessels(
                    po2group, gvessels, gtumor, sample_length)
                #cons = dataman.detailedPO2_consumption(po2group, gtumor)
                cons = dataman.obtain_data('detailedPO2_consumption', po2group,
                                           gtumor)
                # get the raw data, just for the consumption flux
                po2vessels, ld, po2field, parameters = dataman.obtain_data(
                    'detailedPO2', po2group)
                avgCons = np.asarray(myutils.largeDatasetAverage(cons),
                                     dtype=np.float64)
                fluxes['Jout_cons'] = avgCons * np.product(
                    cons.shape) * (ld.scale * 1.e-4)**3
                del po2vessels, po2field, ld

                gmeasure = gmeasure.create_group(name)
                for k, v in smpl.iteritems():
                    gmeasure.create_dataset('smpl_' + k,
                                            data=v,
                                            compression=9,
                                            dtype=np.float32)
                for k, v in fluxes.iteritems():
                    gmeasure.create_dataset('flux_' + k,
                                            data=v)  # scalar dataset

            version_id = myutils.checksum(sample_length, 3, getuuid_(po2group))
            ret = myutils.hdf_data_caching(
                read, write, cachelocation[0],
                (cachelocation[1], 'samples_and_fluxes'), (None, version_id))
            return ret

        if dataname == 'detailedPO2_global':
            prop, po2group, sample_length, cachelocation = args
            samplelocation = MakeSampleLocation(po2group)

            def write(gmeasure, measurename):
                assert prop == measurename
                gvessels, gtumor = detailedo2.OpenVesselAndTumorGroups(
                    po2group)
                if prop in ['po2', 'sat', 'gtv', 'jtv']:
                    w = dataman.obtain_data('basic_vessel_samples', 'weight',
                                            gvessels, sample_length)
                    d = dataman.obtain_data('detailedPO2_samples', prop,
                                            po2group, sample_length, 1,
                                            samplelocation)
                    gmeasure.create_dataset(prop,
                                            data=myutils.WeightedAverageStd(
                                                d, weights=w))
                elif prop in ['sat_vein', 'sat_capi', 'sat_art']:
                    w = dataman.obtain_data('basic_vessel_samples', 'weight',
                                            gvessels, sample_length)
                    d = dataman.obtain_data('detailedPO2_samples', 'sat',
                                            po2group, sample_length, 1,
                                            samplelocation)
                    f = dataman.obtain_data('basic_vessel_samples', 'flags',
                                            gvessels, sample_length)
                    mask = ~myutils.bbitwise_and(
                        f, krebsutils.WITHIN_TUMOR) & myutils.bbitwise_and(
                            f, krebsutils.CIRCULATED)
                    m = {
                        'sat_vein': krebsutils.VEIN,
                        'sat_capi': krebsutils.CAPILLARY,
                        'sat_art': krebsutils.ARTERY
                    }
                    mask &= myutils.bbitwise_and(f, m[prop])
                    d, w = d[mask], w[mask]
                    gmeasure.create_dataset(prop,
                                            data=myutils.WeightedAverageStd(
                                                d, weights=w))
                elif prop in [
                        'e1', 'e2', 'e3', 'Jin_root', 'Jout_root', 'Jout_tv',
                        'tv_cons', 'Jout_cons'
                ]:
                    d = dataman.obtain_data('detailedPO2_total_fluxes', prop,
                                            po2group, sample_length, 1,
                                            samplelocation)
                    gmeasure.create_dataset(prop, data=[d[prop], 0])
                elif prop == 'po2_tissue':
                    _, po2ld, po2field, parameters = dataman.obtain_data(
                        'detailedPO2', po2group)
                    d = myutils.largeDatasetAverageAndStd(po2field)
                    gmeasure.create_dataset(prop, data=d)
                elif prop == 'mro2':
                    uptakefield = detailedo2.computeO2Uptake(po2group, gtumor)
                    d = myutils.largeDatasetAverageAndStd(uptakefield)
                    gmeasure.create_dataset(prop, data=d)
                elif prop in ('sat_via_hb_ratio', 'vfhb_oxy', 'vfhb_deoxy',
                              'vfhb'):
                    weight = dataman.obtain_data('basic_vessel_samples',
                                                 'weight', gvessels,
                                                 sample_length)
                    flags = dataman.obtain_data('basic_vessel_samples',
                                                'flags', gvessels,
                                                sample_length)
                    mask = myutils.bbitwise_and(flags, krebsutils.CIRCULATED)
                    hema = dataman.obtain_data('basic_vessel_samples',
                                               'hematocrit', gvessels,
                                               sample_length)[mask]
                    sat = dataman.obtain_data('detailedPO2_samples', 'sat',
                                              po2group, sample_length, None,
                                              samplelocation)[mask]
                    rad = dataman.obtain_data('basic_vessel_samples', 'radius',
                                              gvessels, sample_length)[mask]
                    weight = weight[mask]
                    hbvolume = weight * rad * rad * math.pi * hema
                    ld = krebsutils.read_lattice_data_from_hdf(
                        po2group['field_ld'])
                    volume = np.product(ld.GetWorldSize())
                    if prop == 'sat_via_hb_ratio':
                        result = np.sum(hbvolume * sat) / np.sum(hbvolume)
                    elif prop == 'vfhb_oxy':
                        result = np.sum(hbvolume * sat) / volume
                    elif prop == 'vfhb_deoxy':
                        result = np.sum(hbvolume * (1. - sat)) / volume
                    elif prop == 'vfhb':
                        result = np.sum(hbvolume) / volume
                    gmeasure.create_dataset(prop, data=[result, 0.])
                elif prop in ('chb_oxy', 'chb_deoxy', 'chb'):
                    m = {
                        'chb_oxy': 'vfhb_oxy',
                        'chb_deoxy': 'vfhb_deoxy',
                        'chb': 'vfhb'
                    }
                    result = dataman.obtain_data('detailedPO2_global', m[prop],
                                                 po2group, sample_length,
                                                 cachelocation)
                    result = result * detailedo2.chb_of_rbcs
                    gmeasure.create_dataset(prop, data=[result, 0.])
                elif prop == 'mro2_by_j':
                    fluxes = dataman.obtain_data('detailedPO2_total_fluxes',
                                                 prop, po2group, sample_length,
                                                 1)
                    ld = krebsutils.read_lattice_data_from_hdf(
                        po2group['field_ld'])
                    worldbb = ld.worldBox
                    result = fluxes['Jout_tv'] / np.prod(worldbb[1] -
                                                         worldbb[0]) * 1.e12
                    gmeasure.create_dataset(prop, data=[result, 0.])
                elif prop == 'oef':
                    fluxes = dataman.obtain_data('detailedPO2_total_fluxes',
                                                 prop, po2group, sample_length,
                                                 1, samplelocation)
                    result = (fluxes['Jin_root'] -
                              fluxes['Jout_root']) / fluxes['Jin_root']
                    gmeasure.create_dataset(prop, data=[result, 0.])

                else:
                    assert False

            def read(gmeasure, measurename):
                d = np.asarray(gmeasure[measurename])
                if measurename in ('chb_oxy', 'chb', 'chb_deoxy'):
                    d *= 1.e6
                return d[0]  # its a tuple (avg, std), we want avg now.

            version_num = collections.defaultdict(lambda: 3)
            version_id = myutils.checksum(sample_length, version_num[prop],
                                          getuuid_(po2group))
            #version_id = myutils.checksum(sample_length, (2 if prop in  else 1))
            return myutils.hdf_data_caching(read, write, cachelocation[0],
                                            ('global', cachelocation[1], prop),
                                            (1, 1, version_id))

        if dataname == 'detailedPO2_radial':
            po2group, sample_length, bins_spec, distance_distribution_name, cachelocation = args
            samplelocation = MakeSampleLocation(po2group)

            # we assume that there is a tumor. without this measurement makes little sense

            def read(gmeasure, name):
                d = dict(gmeasure[name].items())
                d = dict(
                    (k, myutils.MeanValueArray.read(v)) for k, v in d.items())
                hbo = d['vfhb_oxy']
                hbd = d['vfhb_deoxy']
                hb = myutils.MeanValueArray(hbo.cnt, hbo.sum + hbd.sum,
                                            hbo.sqr + hbd.sqr)
                d['vfhb'] = hb
                sat = hbo.avg / hb.avg
                d['sat_via_hb_ratio'] = myutils.MeanValueArray(
                    np.ones_like(hb.cnt), sat, sat * sat)
                d['chb_oxy'] = d['vfhb_oxy'] * detailedo2.chb_of_rbcs * 1.e6
                d['chb_deoxy'] = d['vfhb_deoxy'] * detailedo2.chb_of_rbcs * 1.e6
                d['chb'] = d['vfhb'] * detailedo2.chb_of_rbcs * 1.e6
                return d

            def write(gmeasure, name):
                gvessels, gtumor = detailedo2.OpenVesselAndTumorGroups(
                    po2group)
                weight_smpl = dataman.obtain_data('basic_vessel_samples',
                                                  'weight', gvessels,
                                                  sample_length)
                flags = dataman.obtain_data('basic_vessel_samples', 'flags',
                                            gvessels, sample_length)
                # get teh radial distance function (either distance from tumor border or distance from center)
                dist_smpl, distmap, mask, tumor_ld = dataman.obtain_data(
                    'distancemap_samples', gvessels, gtumor, sample_length,
                    distance_distribution_name, None)
                # tumor_ld might actually be a unrelated lattice

                #filter uncirculated
                mask = mask & myutils.bbitwise_and(flags,
                                                   krebsutils.CIRCULATED)
                dist_smpl = dist_smpl[mask]
                weight_smpl = weight_smpl[mask]

                bins = bins_spec.arange()
                gmeasure = gmeasure.create_group(name)

                for name in ['po2', 'extpo2', 'jtv', 'sat', 'gtv', 'dS_dx']:
                    smpl = dataman.obtain_data('detailedPO2_samples', name,
                                               po2group, sample_length, None,
                                               samplelocation)
                    myutils.MeanValueArray.fromHistogram1d(
                        bins, dist_smpl, smpl[mask],
                        w=weight_smpl).write(gmeasure, name)
                del smpl

                _, po2ld, po2field, parameters = dataman.obtain_data(
                    'detailedPO2', po2group)
                po2field = krebsutils.resample_field(np.asarray(po2field),
                                                     po2ld.worldBox,
                                                     tumor_ld.shape,
                                                     tumor_ld.worldBox,
                                                     order=1,
                                                     mode='nearest')
                myutils.MeanValueArray.fromHistogram1d(bins, distmap.ravel(),
                                                       po2field.ravel()).write(
                                                           gmeasure,
                                                           'po2_tissue')
                del po2field

                uptakefield = detailedo2.computeO2Uptake(po2group, gtumor)
                uptakefield = krebsutils.resample_field(uptakefield,
                                                        po2ld.worldBox,
                                                        tumor_ld.shape,
                                                        tumor_ld.worldBox,
                                                        order=1,
                                                        mode='nearest')
                myutils.MeanValueArray.fromHistogram1d(
                    bins, distmap.ravel(),
                    uptakefield.ravel()).write(gmeasure, 'mro2')
                del uptakefield

                hema = dataman.obtain_data('basic_vessel_samples',
                                           'hematocrit', gvessels,
                                           sample_length)[mask]
                sat = dataman.obtain_data('detailedPO2_samples', 'sat',
                                          po2group, sample_length, None,
                                          samplelocation)[mask]
                rad = dataman.obtain_data('basic_vessel_samples', 'radius',
                                          gvessels, sample_length)[mask]
                hbvolume = weight_smpl * rad * rad * math.pi * hema
                vol_per_bin = myutils.MeanValueArray.fromHistogram1d(
                    bins, distmap.ravel(), np.ones_like(
                        distmap.ravel())).cnt * (tumor_ld.scale**3)
                tmp = myutils.MeanValueArray.fromHistogram1d(
                    bins, dist_smpl, hbvolume * sat)
                tmp.cnt = vol_per_bin.copy()
                tmp.write(gmeasure, 'vfhb_oxy')
                tmp = myutils.MeanValueArray.fromHistogram1d(
                    bins, dist_smpl, hbvolume * (1. - sat))
                tmp.cnt = vol_per_bin.copy()
                tmp.write(gmeasure, 'vfhb_deoxy')
                del tmp, hbvolume, vol_per_bin

            version = getuuid_(po2group)
            ret = analyzeGeneral.HdfCacheRadialDistribution(
                (read, write), 'po2', bins_spec, distance_distribution_name,
                cachelocation, version)
            return ret
        assert False
コード例 #29
0
    for k,v in params.iteritems():
      pgroup.attrs[k] = v
    pgroup2 = pgroup.create_group('calcflow')
    for k,v in bfparams.iteritems():
      pgroup2.attrs[k] = v
    # ----
    detailedo2.computePO2_(po2group, vesselgroup, None, po2params)
    # generated samples here as well for completeness and ease of debugging. put the sample data under gmeasure/name/samples.
    fluxes = dataman.obtain_data('detailedPO2_total_fluxes', '', po2group, sample_length, 1, (gmeasure.parent, name))

  def read(gmeasure, name):
    return gmeasure[name]


  return myutils.hdf_data_caching(read, write, f,
                                  (config_name,),
                                  (version,))



def GenerateSingleCapillarySamples(dataman, po2group, cachelocation, properties = ['po2','sat', 'extpo2']):
  vesselgroup, _ = detailedo2.OpenVesselAndTumorGroups(po2group)
  smpl = dict(
    weight = dataman.obtain_data('basic_vessel_samples', 'weight', vesselgroup, sample_length),
    pos = dataman.obtain_data('basic_vessel_samples', 'position', vesselgroup, sample_length),
  )
  for prop in properties:
    smpl[prop] = dataman.obtain_data('detailedPO2_samples', prop, po2group, sample_length, 1, cachelocation)
  # sort by x-position
  x = smpl['pos'][:,0]
  ordering = np.argsort(x)
コード例 #30
0
ファイル: __init__.py プロジェクト: Amenhotep19/tumorcode
def ObtainOxygenExtractionFraction(dataman, po2group, cachelocation):
    def read(gmeasure, groupname):
        return dict(
            (k, float(v[...])) for (k, v) in gmeasure[groupname].iteritems())

    def write(gmeasure, groupname):
        gvessels, gtumor = detailedo2.OpenVesselAndTumorGroups(po2group)
        parameters = dataman.obtain_data('detailedPO2Parameters', po2group)
        vessels = dataman.obtain_data(
            'vessel_graph', gvessels,
            ['flow', 'pressure', 'position', 'flags', 'hematocrit'])
        pos = vessels['position']
        press = vessels['pressure']
        flow = vessels['flow']
        flags = vessels['flags']
        hema = vessels['hematocrit']
        po2vessels = np.asarray(po2group['po2vessels'])
        hema = np.column_stack((hema, hema))
        conc = detailedo2.PO2ToConcentration(po2vessels, hema, parameters)
        #sat  = detailedo2.PO2ToSaturation(po2vessels, parameters)
        conc_diff = np.abs(conc[:, 1] - conc[:, 0])
        conc = np.average(conc, axis=1)
        flow_diff = 0.5 * flow * conc_diff
        flow = flow * conc
        roots = set(gvessels['nodes/roots'][...])
        del hema
        del conc
        del conc_diff
        del po2vessels

        if gtumor:
            ldtumor = dataman.obtain_data('ld', gtumor.file)
            dist = dataman.obtain_data('fieldvariable', gtumor.file,
                                       'theta_tumor', gtumor.name)
            dist = krebsutils.sample_field(pos,
                                           dist,
                                           ldtumor,
                                           linear_interpolation=True)
            dist = dist - 0.5  # is greater 0 inside the tumor?!

        total_flow_in, total_flow_out = 0., 0.
        flow_in, flow_out = 0., 0.
        flow_in_err, flow_out_err, total_flow_in_err, total_flow_out_err = 0., 0., 0., 0.
        for i, (a, b) in enumerate(vessels.edgelist):
            if not (flags[i] & krebsutils.CIRCULATED): continue
            if gtumor and dist[a] < 0 and dist[b] > 0:  # b is in the tumor
                if press[a] < press[b]:
                    flow_out += flow[i]
                    flow_out_err += flow_diff[i]
                else:
                    flow_in += flow[i]
                    #print 'tumor inflow of sat', sat[i]
                    flow_in_err += flow_diff[i]
            elif gtumor and dist[a] > 0 and dist[b] < 0:  # a is in the tumor
                if press[a] > press[b]:
                    flow_out += flow[i]
                    flow_out_err += flow_diff[i]
                else:
                    flow_in += flow[i]
                    #print 'tumor inflow of sat', sat[i]
                    flow_in_err += flow_diff[i]
            if (a in roots or b in roots):
                if flags[i] & krebsutils.ARTERY:
                    #print 'total inflow of sat', sat[i]
                    total_flow_in += flow[i]
                    total_flow_in_err += flow_diff[i]
                elif flags[i] & krebsutils.VEIN:
                    #print 'total inflow of sat', sat[i]
                    total_flow_out += flow[i]
                    total_flow_out_err += flow_diff[i]

        Err = lambda a, b, da, db: abs(1.0 / a - (a - b) / a / a) * da + abs(
            1.0 / a) * db

        res = dict(tumor_o2_in=flow_in,
                   tumor_o2_out=flow_out,
                   total_o2_out=total_flow_out,
                   total_o2_in=total_flow_in,
                   oef_total=(total_flow_in - total_flow_out) / total_flow_in,
                   oef_tumor=(flow_in - flow_out) / flow_in,
                   flow_in_err=flow_in_err,
                   flow_out_err=flow_out_err,
                   total_flow_in_err=total_flow_in_err,
                   total_flow_out_err=total_flow_out_err,
                   oef_total_err=Err(total_flow_in, total_flow_out,
                                     total_flow_in_err, total_flow_out_err),
                   oef_err=Err(flow_in, flow_out, flow_in_err, flow_out_err))
        #print 'computed oef: '
        #pprint.pprint(res)
        g = gmeasure.create_group(groupname)
        for k, v in res.iteritems():
            g.create_dataset(k, data=v)

    ret = myutils.hdf_data_caching(
        read, write, cachelocation[0],
        ('global', cachelocation[1], 'oxygen_extraction'), (
            None,
            None,
            8,
        ))
    return ret