def __init__(self, estimator=None): self.estimator = estimator self.data_raw = np.array([float("NaN")]) # linear data storage self.error_raw = np.array([float("NaN")]) # linear data storage self.name = "" self.unit = "" self.dettyp = None # Dose, Fluence, LET etc... # optional first differential axis self.diff_axis1 = MeshAxis(n=1, min_val=float("NaN"), max_val=float("NaN"), name="", unit="", binning=MeshAxis.BinningType.linear) # optional second differential axis self.diff_axis2 = MeshAxis(n=1, min_val=float("NaN"), max_val=float("NaN"), name="", unit="", binning=MeshAxis.BinningType.linear)
def parse_usrbin(self, estimator): """ USRBIN scores distribution of one of several quantities in a regular spatial structure (binning detector) independent from the geometry. :param estimator: an Estimator object, will be modified here and filled with data """ try: usr_object = Usrbin(self.filename) # loop over all detectors (pages) in USRBIN object for det_no, detector in enumerate(usr_object.detector): page = Page(estimator=estimator) page.title = detector.name # USRBIN doesn't support differential binning type, only spatial binning is allowed estimator.x = MeshAxis(n=detector.nx, min_val=detector.xlow, max_val=detector.xhigh, name="X", unit="cm", binning=MeshAxis.BinningType.linear) estimator.y = MeshAxis(n=detector.ny, min_val=detector.ylow, max_val=detector.yhigh, name="Y", unit="cm", binning=MeshAxis.BinningType.linear) estimator.z = MeshAxis(n=detector.nz, min_val=detector.zlow, max_val=detector.zhigh, name="Z", unit="cm", binning=MeshAxis.BinningType.linear) page.name = "scorer {}".format(detector.score) page.unit = "" # unpack detector data # TODO cross-check if reshaping is needed page.data_raw = np.array( unpackArray(usr_object.readData(det_no))) page.error_raw = np.empty_like(page.data_raw) estimator.add_page(page) return usr_object except IOError: return None
def __init__(self): """ Create dummy estimator object. >>> e = Estimator() """ self.x = MeshAxis(n=1, min_val=float("NaN"), max_val=float("NaN"), name="", unit="", binning=MeshAxis.BinningType.linear) self.y = self.x self.z = self.x self.number_of_primaries = 0 # number of histories simulated self.file_counter = 0 # number of files read self.file_corename = "" # common core for paths of contributing files self.file_format = "" # binary file format of the input files self.error_type = ErrorEstimate.none self.geotyp = None # MSH, CYL, etc... self.pages = () # empty tuple of pages at the beginning
def read_data(self, estimator): logger.debug("Reading: " + self.filename) with open(self.filename, "rb") as f: d1 = np.dtype([('magic', 'S6'), ('end', 'S2'), ('vstr', 'S16')]) _x = np.fromfile(f, dtype=d1, count=1) # read the data into numpy logger.debug("Magic : " + _x['magic'][0].decode('ASCII')) logger.debug("Endiannes: " + _x['end'][0].decode('ASCII')) logger.debug("VerStr: " + _x['vstr'][0].decode('ASCII')) while f: token = read_next_token(f) if token is None: break token_id, token_type, payload_len, raw_payload = token payload = [None] * payload_len _has_geo_units_in_ascii = False # decode all strings (currently there will never be more than one per token) if 'S' in token_type.decode('ASCII'): for i, _j in enumerate(raw_payload): payload[i] = raw_payload[i].decode('ASCII').strip() else: payload = raw_payload if payload_len == 1: payload = payload[0] try: token_name = SHBDOTagID(token_id).name logger.debug("Read token {:s} (0x{:02x}) value {} type {:s} length {:d}".format( token_name, token_id, raw_payload, token_type.decode('ASCII'), payload_len )) except ValueError: logger.info("Found unknown token (0x{:02x}) value {} type {:s} length {:d}, skipping".format( token_id, raw_payload, token_type.decode('ASCII'), payload_len )) # geometry type if SHBDOTagID.geometry_type == token_id: estimator.geotyp = SHGeoType[payload.strip().lower()] if SHBDOTagID.geo_n_bins == token_id: estimator.x = estimator.x._replace(n=payload[0]) estimator.y = estimator.y._replace(n=payload[1]) estimator.z = estimator.z._replace(n=payload[2]) if SHBDOTagID.geo_p_start == token_id: estimator.x = estimator.x._replace(min_val=payload[0]) estimator.y = estimator.y._replace(min_val=payload[1]) estimator.z = estimator.z._replace(min_val=payload[2]) if SHBDOTagID.geo_q_stop == token_id: estimator.x = estimator.x._replace(max_val=payload[0]) estimator.y = estimator.y._replace(max_val=payload[1]) estimator.z = estimator.z._replace(max_val=payload[2]) if SHBDOTagID.geo_unit_ids == token_id and not _has_geo_units_in_ascii: estimator.x = estimator.x._replace(unit=unit_name_from_unit_id.get(payload[0], "")) estimator.y = estimator.y._replace(unit=unit_name_from_unit_id.get(payload[1], "")) estimator.z = estimator.z._replace(unit=unit_name_from_unit_id.get(payload[2], "")) # Units may also be given as pure ASCII directly from SHIELD-HIT12A new .bdo format. # If this is available, then use those embedded in the .bdo file, instead of pymchelper setting them. if SHBDOTagID.geo_units == token_id: _units = payload.split(";") if len(_units) == 3: estimator.x = estimator.x._replace(unit=_units[0]) estimator.y = estimator.y._replace(unit=_units[1]) estimator.z = estimator.z._replace(unit=_units[2]) _has_geo_units_in_ascii = True # page(detector) type, it begins new page block if SHBDOTagID.detector_type == token_id: # here new page is added to the estimator structure estimator.add_page(Page()) logger.debug("Setting page.dettyp = {} ({})".format(SHDetType(payload), SHDetType(payload).name)) estimator.pages[-1].dettyp = SHDetType(payload) # page(detector) data is the last thing related to page that is saved in binary file # at this point all other page related tags should already be processed if SHBDOTagID.data_block == token_id: logger.debug("Setting page data = {}".format(np.asarray(payload))) estimator.pages[-1].data_raw = np.asarray(payload) # read tokens based on tag <-> name mapping for detector if token_id in detector_name_from_bdotag: logger.debug("Setting detector.{} = {}".format(detector_name_from_bdotag[token_id], payload)) setattr(estimator, detector_name_from_bdotag[token_id], payload) # read tokens based on tag <-> name mapping for pages if token_id in page_tags_to_save: logger.debug("Setting page.{} = {}".format(SHBDOTagID(token_id).name, payload)) setattr(estimator.pages[-1], SHBDOTagID(token_id).name, payload) # Loop over the file is over here # Check if we have differential scoring, i.e. data dimension is larger than 1: for page in estimator.pages: try: page.diff_axis1 = MeshAxis(n=page.page_diff_size[0], min_val=page.page_diff_start[0], max_val=page.page_diff_stop[0], name="", unit=page.page_diff_units.split(";")[0], binning=MeshAxis.BinningType.linear) except AttributeError: logger.info("Lack of data for first level differential scoring") except IndexError: logger.info("Lack of units for first level differential scoring") try: page.diff_axis2 = MeshAxis(n=page.page_diff_size[1], min_val=page.page_diff_start[1], max_val=page.page_diff_stop[1], name="", unit=page.page_diff_units.split(";")[1], binning=MeshAxis.BinningType.linear) except AttributeError: logger.info("Lack of data for second level differential scoring") except IndexError: logger.info("Lack of units for second level differential scoring") # Copy the SH12A specific units into the general placeholders: for page in estimator.pages: page.unit = page.detector_unit # in future, a user may optionally give a more specific name in SH12A detect.dat, which then # may be written to the .bdo file. If the name is not set, use the official detector name instead: if not page.name: page.name = str(page.dettyp) estimator.file_format = 'bdo2019' logger.debug("Done reading bdo file.") return True
def parse_usrbdx(self, estimator): """ USRBDX defines a detector for a boundary crossing fluence or current estimator :param estimator: an Estimator object, will be modified here and filled with data """ try: usr_object = Usrbdx(self.filename) # loop over all detectors (pages) in USRBDX object for det_no, detector in enumerate(usr_object.detector): page = Page(estimator=estimator) page.title = detector.name page.area = detector.area # area of the detector in cm**2 if detector.nb == 1: energy_binning = MeshAxis.BinningType.linear angle_binning = MeshAxis.BinningType.linear elif detector.nb == -1: energy_binning = MeshAxis.BinningType.logarithmic angle_binning = MeshAxis.BinningType.linear elif detector.nb == 2: energy_binning = MeshAxis.BinningType.linear angle_binning = MeshAxis.BinningType.logarithmic elif detector.nb == -2: energy_binning = MeshAxis.BinningType.logarithmic angle_binning = MeshAxis.BinningType.logarithmic else: return Exception("Invalid binning type") # USRBDX doesn't support spatial (XYZ) binning type # USRBDX provides double differential binning, first axis is kinetic energy (in GeV) page.diff_axis1 = MeshAxis( n=detector.ne, # number of energy intervals for scoring min_val=detector. elow, # minimum kinetic energy for scoring (GeV) max_val=detector. ehigh, # maximum kinetic energy for scoring (GeV) name="kinetic energy", unit="GeV", binning=energy_binning) # second axis is solid angle (in steradians) page.diff_axis2 = MeshAxis( n=detector.na, # number of angular bins min_val=detector.alow, # minimum solid angle for scoring max_val=detector.ahigh, # maximum solid angle for scoring name="solid angle", unit="sr", binning=angle_binning) # detector.fluence corresponds to i2 in WHAT(1) in first card of USBDX if detector.fluence == 1: page.name = "fluence" elif detector.fluence == 0: page.name = "current" else: page.name = "" page.unit = "cm-2 GeV-1 sr-1" # TODO If the generalised particle is 208.0 (ENERGY) or 211.0 (EM-ENRGY), # the quantity scored is differential energy fluence (if # cosine-weighted) or differential energy current (energy crossing the # surface). In both cases the quantity will be expressed in GeV per # cm2 per energy unit per steradian per primary # unpack detector data # TODO cross-check if reshaping is needed page.data_raw = np.array( unpackArray(usr_object.readData(det_no))) page.error_raw = np.empty_like(page.data_raw) estimator.add_page(page) return usr_object except IOError: return None
def parse_usrtrack(self, estimator): """ :param estimator: an Estimator object, will be modified here and filled with data USRTRACK defines a detector for a track-length fluence estimator """ try: usr_object = UsrTrack(self.filename) # loop over all detectors (pages) in USRTRACK object for det_no, detector in enumerate(usr_object.detector): page = Page(estimator=estimator) page.title = detector.name page.volume = detector.volume # volume of the detector in cm**3 # USRTRACK doesn't support spatial (XYZ) binning type if detector.type == 1: energy_binning = MeshAxis.BinningType.linear elif detector.type == -1: energy_binning = MeshAxis.BinningType.logarithmic else: return Exception("Invalid binning type") # USRTRACK provides single differential binning, with diff axis in kinetic energy (in GeV) page.diff_axis1 = MeshAxis( n=detector.ne, # number of energy intervals for scoring min_val=detector. elow, # minimum kinetic energy for scoring (GeV) max_val=detector. ehigh, # maximum kinetic energy for scoring (GeV) name="kinetic energy", unit="GeV", binning=energy_binning) page.name = "fluence" page.unit = "cm-2 GeV-1" # TODO IMPORTANT! The results of USRTRACK are always given as DIFFERENTIAL # distributions of fluence (or tracklength, if the detector region # volume is not specified) in energy, in units of cm-2 GeV-1 (or # cm GeV-1) per incident primary unit weight. Thus, for example, when # requesting a fluence energy spectrum, to obtain INTEGRAL BINNED # results (fluence in cm-2 or tracklength in cm PER ENERGY BIN per # primary) one must multiply the value of each energy bin by the width # of the bin (even for logarithmic binning) # TODO If the generalised particle is 208 (ENERGY) or 211 (EM-ENRGY), the # quantity scored is differential energy fluence (or tracklength, if # the detector region volume is not specified), expressed in GeV per # cm2 (or cm GeV) per energy unit per primary. That can sometimes lead # to confusion since GeV cm-2 GeV-1 = cm-2, where energy does not appear. # Note that integrating over energy one gets GeV/cm2. # unpack detector data # TODO cross-check if reshaping is needed page.data_raw = np.array( unpackArray(usr_object.readData(det_no))) page.error_raw = np.empty_like(page.data_raw) estimator.add_page(page) return usr_object except IOError: return None
def read_header(self, estimator): logger.info("Reading header: " + self.filename) estimator.tripdose = 0.0 estimator.tripntot = -1 # effective read # first figure out if this is a VOXSCORE card header_dtype = np.dtype([('__fo1', '<i4'), ('geotyp', 'S10')]) header = np.fromfile(self.filename, header_dtype, count=1) if not header: print("File {:s} has unknown format".format(self.filename)) return None if 'VOXSCORE' in header['geotyp'][0].decode('ascii'): header_dtype = np.dtype([ ('__fo1', '<i4'), # 0x00 ('geotyp', 'S10'), # 0x04 ('__fo2', '<i4'), # 0x0E ('__fo3', '<i4'), # 0x12 ('nstat', '<i4'), # 0x16 : nstat ('__fo4', '<i4'), # 0x1A ('__foo1', '<i4'), # 0x1E ('tds', '<f4'), # 0x22 : tripdose ('__foo2', '<i4'), # 0x26 ('__foo3', '<i4'), # 0x2A ('tnt', '<i8'), # 0x2E : tripntot ('__foo4', '<i4'), # 0x36 ('__fo5', '<i4'), # 0x3A # DET has 8x float64 ('det', ('<f8', 8)), # 0x3E : DET ('__fo6', '<i4'), # 0x7E ('__fo7', '<i4'), # 0x82 # IDET has 11x int32 ('idet', '<i4', 11), # 0x86 : IDET ('__fo8', '<i4'), # 0xB2 ('reclen', '<i4') ]) # 0xB6 # payload starts at 0xBA (186) estimator.payload_offset = 186 else: # first figure out the length. header_dtype = np.dtype([ ('__fo1', '<i4'), ('geotyp', 'S10'), ('__fo2', '<i4'), ('__fo3', '<i4'), ('nstat', '<i4'), ('__fo4', '<i4'), ('__fo5', '<i4'), # DET has 8x float64 ('det', ('<f8', 8)), # DET ('__fo6', '<i4'), ('__fo7', '<i4'), # IDET has 11x int32 ('idet', '<i4', 11), # IDET ('__fo8', '<i4'), ('reclen', '<i4') ]) # payload starts at 0x9E (158) estimator.payload_offset = 158 header = np.fromfile(self.filename, header_dtype, count=1) estimator.rec_size = header['reclen'][0] // 8 if 'VOXSCORE' in header['geotyp'][0].decode('ascii'): estimator.tripdose = header['tds'][0] estimator.tripntot = header['tnt'][0] # map 10-elements table to namedtuple, for easier access # here is description of IDET table, assuming fortran-style numbering # (arrays starting from 1) # IDET(1) : Number of bins in first dimension. x or r or zones # IDET(2) : Number of bins in snd dimension, y or theta # IDET(3) : Number of bins in thrd dimension, z # IDET(4) : Particle type requested for scoring # IDET(5) : Detector type (see INITDET) # IDET(6) : Z of particle to be scored # IDET(7) : A of particle to be scored (only integers here) # IDET(8) : Detector material parameter # IDET(9) : Number of energy/amu (or LET) differential bins, # negative if log. # IDET(10): Type of differential scoring, either LET, E/amu # or polar angle # IDET(11): Starting zone of scoring for zone scoring DetectorAttributes = namedtuple('DetectorAttributes', [ 'dim_1_bins', 'dim_2_bins', 'dim_3_bins', 'particle_type', 'det_type', 'particle_z', 'particle_a', 'det_material', 'diff_bins_no', 'diff_scoring_type', 'starting_zone' ]) det_attribs = DetectorAttributes(*header['idet'][0]) nx = det_attribs.dim_1_bins ny = det_attribs.dim_2_bins nz = det_attribs.dim_3_bins # DET(1-3): start positions for x y z or r theta z # DET(4-6): stop positions for x y z or r theta z # DET(7) : start differential grid # DET(8) : stop differential grid estimator.det = header['det'] estimator.particle = det_attribs.particle_type try: estimator.geotyp = SHGeoType[header['geotyp'][0].decode( 'ascii').strip().lower()] except Exception: estimator.geotyp = SHGeoType.unknown estimator.number_of_primaries = header['nstat'][0] if estimator.geotyp not in {SHGeoType.zone, SHGeoType.dzone}: xmin = header['det'][0][0] ymin = header['det'][0][1] zmin = header['det'][0][2] xmax = header['det'][0][3] ymax = header['det'][0][4] zmax = header['det'][0][5] else: # special case for zone scoring, x min and max will be zone numbers xmin = det_attribs.starting_zone xmax = xmin + nx - 1 ymin = 0.0 ymax = 0.0 zmin = 0.0 zmax = 0.0 if estimator.geotyp in {SHGeoType.plane, SHGeoType.dplane}: # special case for plane scoring, according to documentation we have: # xmin, ymin, zmin = Sx, Sy, Sz (point on the plane) # xmax, ymax, zmax = nx, ny, nz (normal vector) # to avoid situation where i.e. xmax < xmin (corresponds to nx < Sx) # we store only point on the plane estimator.sx, estimator.sy, estimator.sz = xmin, ymin, zmin estimator.nx, estimator.ny, estimator.nz = xmax, ymax, zmax xmax = xmin ymax = ymin zmax = zmin xunit, xname = _get_mesh_units(estimator, 0) yunit, yname = _get_mesh_units(estimator, 1) zunit, zname = _get_mesh_units(estimator, 2) estimator.x = MeshAxis(n=np.abs(nx), min_val=xmin, max_val=xmax, name=xname, unit=xunit, binning=_bintyp(nx)) estimator.y = MeshAxis(n=np.abs(ny), min_val=ymin, max_val=ymax, name=yname, unit=yunit, binning=_bintyp(ny)) estimator.z = MeshAxis(n=np.abs(nz), min_val=zmin, max_val=zmax, name=zname, unit=zunit, binning=_bintyp(nz)) page = Page(estimator=estimator) page.dettyp = SHDetType(det_attribs.det_type) page.unit, page.name = _get_detector_unit(page.dettyp, estimator.geotyp) estimator.add_page(page) return True # reading OK
def read_data(self, estimator): logger.debug("Reading: " + self.filename) with open(self.filename, "rb") as f: d1 = np.dtype([('magic', 'S6'), ('end', 'S2'), ('vstr', 'S16')]) _x = np.fromfile(f, dtype=d1, count=1) # read the data into numpy logger.debug("Magic : " + _x['magic'][0].decode('ASCII')) logger.debug("Endian: " + _x['end'][0].decode('ASCII')) logger.debug("VerStr: " + _x['vstr'][0].decode('ASCII')) # if no pages are present, add first one if not estimator.pages: estimator.add_page(Page()) while f: token = read_next_token(f) if token is None: break pl_id, _pl_type, _pl_len, _pl = token pl = [None] * _pl_len # decode all strings (currently there will never be more than one per token) if 'S' in _pl_type.decode('ASCII'): for i, _j in enumerate(_pl): pl[i] = _pl[i].decode('ASCII').strip() else: pl = _pl try: token_name = SHBDOTagID(pl_id).name logger.debug( "Read token {:s} (0x{:02x}) value {} type {:s} length {:d}" .format(token_name, pl_id, _pl, _pl_type.decode('ASCII'), _pl_len)) except ValueError: logger.info( "Skipping token (0x{:02x}) value {} type {:s} length {:d}" .format(pl_id, _pl, _pl_type.decode('ASCII'), _pl_len)) if SHBDOTagID.shversion == pl_id: estimator.mc_code_version = pl[0] logger.debug("MC code version:" + estimator.mc_code_version) if SHBDOTagID.filedate == pl_id: estimator.filedate = pl[0] if SHBDOTagID.user == pl_id: estimator.user = pl[0] if SHBDOTagID.host == pl_id: estimator.host = pl[0] if SHBDOTagID.rt_nstat == pl_id: estimator.number_of_primaries = pl[0] # beam configuration etc... if pl_id in detector_name_from_bdotag: setattr(estimator, detector_name_from_bdotag[pl_id], pl[0]) # estimator block here --- if SHBDOTagID.det_geotyp == pl_id: estimator.geotyp = SHGeoType[pl[0].strip().lower()] if SHBDOTagID.ext_ptvdose == pl_id: estimator.tripdose = 0.0 if SHBDOTagID.ext_nproj == pl_id: estimator.tripntot = -1 # read a single detector if SHBDOTagID.det_dtype == pl_id: estimator.pages[0].dettyp = SHDetType(pl[0]) if SHBDOTagID.det_part == pl_id: # particle to be scored estimator.scored_particle_code = pl[0] if SHBDOTagID.det_partz == pl_id: # particle to be scored estimator.scored_particle_z = pl[0] if SHBDOTagID.det_parta == pl_id: # particle to be scored estimator.scored_particle_a = pl[0] if SHBDOTagID.det_nbin == pl_id: nx = pl[0] ny = pl[1] nz = pl[2] if SHBDOTagID.det_xyz_start == pl_id: xmin = pl[0] ymin = pl[1] zmin = pl[2] if SHBDOTagID.det_xyz_stop == pl_id: xmax = pl[0] ymax = pl[1] zmax = pl[2] # partial support for differential scoring (only linear binning) # TODO add some support for DMSH, DCYL and DZONE # TODO add support for logarithmic binning diff_geotypes = { SHGeoType.dplane, SHGeoType.dmsh, SHGeoType.dcyl, SHGeoType.dzone } if hasattr(estimator, 'geotyp') and estimator.geotyp in diff_geotypes: if SHBDOTagID.det_dif_start == pl_id: estimator.dif_min = pl[0] if SHBDOTagID.det_dif_stop == pl_id: estimator.dif_max = pl[0] if SHBDOTagID.det_nbine == pl_id: estimator.dif_n = pl[0] if SHBDOTagID.det_difftype == pl_id: estimator.dif_type = pl[0] if SHBDOTagID.det_zonestart == pl_id: estimator.zone_start = pl[0] if SHBDOTagID.data_block == pl_id: estimator.pages[0].data_raw = np.asarray(pl) # TODO: would be better to not overwrite x,y,z and make proper case for ZONE scoring later. if estimator.geotyp in {SHGeoType.zone, SHGeoType.dzone}: # special case for zone scoring, x min and max will be zone numbers xmin = estimator.zone_start xmax = xmin + nx - 1 ymin = 0.0 ymax = 0.0 zmin = 0.0 zmax = 0.0 elif estimator.geotyp in {SHGeoType.plane, SHGeoType.dplane}: # special case for plane scoring, according to documentation we have: # xmin, ymin, zmin = Sx, Sy, Sz (point on the plane) # xmax, ymax, zmax = nx, ny, nz (normal vector) # to avoid situation where i.e. xmax < xmin (corresponds to nx < Sx) # we store only point on the plane estimator.sx, estimator.sy, estimator.sz = xmin, ymin, zmin estimator.nx, estimator.ny, estimator.nz = xmax, ymax, zmax xmax = xmin ymax = ymin zmax = zmin # check if scoring quantity is LET, if yes, than change units from [MeV/cm] to [keV/um] if hasattr(estimator, 'dif_type') and estimator.dif_type == 2: estimator.dif_min /= 10.0 estimator.dif_max /= 10.0 # # differential scoring data replacement if hasattr(estimator, 'dif_min') and hasattr( estimator, 'dif_max') and hasattr(estimator, 'dif_n'): if nz == 1: # max two axis (X or Y) filled with scored value, Z axis empty # we can put differential quantity as Z axis nz = estimator.dif_n zmin = estimator.dif_min zmax = estimator.dif_max estimator.dif_axis = 2 elif ny == 1: # Z axis filled with scored value (X axis maybe also), Y axis empty # we can put differential quantity as Y axis ny = estimator.dif_n ymin = estimator.dif_min ymax = estimator.dif_max estimator.dif_axis = 1 elif nx == 1: nx = estimator.dif_n xmin = estimator.dif_min xmax = estimator.dif_max estimator.dif_axis = 0 xunit, xname = _get_mesh_units(estimator, 0) yunit, yname = _get_mesh_units(estimator, 1) zunit, zname = _get_mesh_units(estimator, 2) estimator.x = MeshAxis(n=np.abs(nx), min_val=xmin, max_val=xmax, name=xname, unit=xunit, binning=_bintyp(nx)) estimator.y = MeshAxis(n=np.abs(ny), min_val=ymin, max_val=ymax, name=yname, unit=yunit, binning=_bintyp(ny)) estimator.z = MeshAxis(n=np.abs(nz), min_val=zmin, max_val=zmax, name=zname, unit=zunit, binning=_bintyp(nz)) estimator.pages[0].unit, estimator.pages[ 0].name = _get_detector_unit(estimator.pages[0].dettyp, estimator.geotyp) estimator.file_format = 'bdo2016' logger.debug("Done reading bdo file.") logger.debug("Detector data : " + str(estimator.pages[0].data)) logger.debug("Detector nstat: " + str(estimator.number_of_primaries)) logger.debug("Detector nx : " + str(estimator.x.n)) logger.debug("Detector ny : " + str(estimator.y.n)) logger.debug("Detector nz : " + str(estimator.z.n)) estimator.file_counter = 1 super(SHReaderBDO2016, self).read_data(estimator) return True