def _load_metadata(self, MetaData): """Reads metadata file it also reads the default configuration file in case of duplicates, information from MetaDataFile is used Parameters ---------- MetaData : dict or filename """ default_settings = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'config_MIfile.ini') if (type(MetaData) in [dict, collections.OrderedDict]): self.MetaData = cf.Config(None, defaultConfigFiles=[default_settings]) self.MetaData.Import(MetaData, section_name='MIfile') else: self.MetaData = cf.Config(MetaData, defaultConfigFiles=[default_settings]) self.MaxBufferSize = self.MetaData.Get('settings', 'max_buffer_size', 100000000, int) if (self.FileName is None): self.FileName = self.MetaData.Get('MIfile', 'filename', None) self.hdrSize = self.MetaData.Get('MIfile', 'hdr_len', 0, int) self.gapBytes = self.MetaData.Get('MIfile', 'gap_bytes', 0, int) self.Shape = self.MetaData.Get('MIfile', 'shape', [0, 0, 0], int) self.ImgNumber = self.Shape[0] self.ImgHeight = self.Shape[1] self.ImgWidth = self.Shape[2] self.PxPerImg = self.ImgHeight * self.ImgWidth self.PixelFormat = self.MetaData.Get('MIfile', 'px_format', 'B', str) self.PixelDepth = _data_depth[self.PixelFormat] self.PixelDataType = _data_types[self.PixelFormat] self.FPS = self.MetaData.Get('MIfile', 'fps', 1.0, float) self.PixelSize = self.MetaData.Get('MIfile', 'px_size', 1.0, float)
def Export(self, mi_filename, metadata_filename, zRange=None, cropROI=None): """Export a chunk of MIfile to a second file Parameters ---------- mi_filename : filename of the exported MIfile metadata_filename : filename of the exported metadata zRange : range of images to be exported. if None, all images will be exported cropROI : ROI to be exported. if None, full images will be exported """ self.OpenForWriting(mi_filename) mi_chunk = self.Read(zRange, cropROI) exp_meta = self.GetMetadata().copy() exp_meta['hdr_len'] = 0 exp_meta['gap_bytes'] = 0 exp_meta['shape'] = list(mi_chunk.shape) if ('fps' in exp_meta): val_zRange = self.Validate_zRange(zRange) exp_meta['fps'] = float(exp_meta['fps']) * 1.0 / val_zRange[2] exp_config = cf.Config() exp_config.Import(exp_meta, section_name='MIfile') exp_config.Export(metadata_filename) self.WriteData(mi_chunk)
def LoadMetadata(self, MetaData=None, MetaDataSection=None): """Load metadata from dict or filename Parameters ---------- MetaData : string or dict. MetaDataSection : if self.MetaData is a dictionnary, load subsection of the configuration parameters """ if (MetaData is not None): self.MetaData = MetaData assert (self.MetaData is not None), 'No Metadata to be loaded' self.MetaData = cf.LoadMetadata(self.MetaData, MetaDataSection) if 'MIfile' not in self.MetaData.GetSections(): logging.warn( 'No MIfile section found in MIstack metadata (available sections: ' + str(self.MetaData.GetSections()) + ')') else: logging.debug( 'Now loading MIstack.MetaData from Config object. Available sections: ' + str(self.MetaData.GetSections())) self.MIshape = self.MetaData.Get('MIfile', 'shape', [0, 0, 0], int) self.hdrSize = self.MetaData.Get('MIfile', 'hdr_len', 0, int) self.gapBytes = self.MetaData.Get('MIfile', 'gap_bytes', 0, int) self.ImgsPerMIfile = self.MIshape[0] self.ImgHeight = self.MIshape[1] self.ImgWidth = self.MIshape[2] self.PxPerImg = self.ImgHeight * self.ImgWidth self.PixelFormat = self.MetaData.Get('MIfile', 'px_format', 'B', str) self.PixelDepth = MI._data_depth[self.PixelFormat] self.PixelDataType = MI._data_types[self.PixelFormat] self.FPS = self.MetaData.Get('MIfile', 'fps', 1.0, float) self.PixelSize = self.MetaData.Get('MIfile', 'px_size', 1.0, float)
def ExportConfiguration(self): cf.ExportDict({'imgs_metadata' : self.MIinput.GetMetadata(section='MIfile'), 'corrmap_metadata' : self.outMetaData, 'corrmap_parameters' : {'out_folder' : self.outFolder, 'lags' : self.lagList, 'img_range' : self.imgRange, 'crop_roi' : self.cropROI }, 'kernel' : self.Kernel.ToDict(), }, os.path.join(self.outFolder, 'CorrMapsConfig.ini'))
def _load_metadata(self, meta_data): """Reads metadata file it also reads the default configuration file in case of duplicates, information from MetaDataFile is used Parameters ---------- meta_data : dict or filename """ if (type(meta_data) in [dict, collections.OrderedDict]): logging.debug('Now loading MIfile metadata (dict with ' + str(len(meta_data)) + ' keys)') else: logging.debug('Now loading MIfile metadata (from filename: ' + str(meta_data) + ')') default_settings = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'config_MIfile.ini') self.MetaData = cf.LoadMetadata(meta_data, SectionName='MIfile', DefaultFiles=[default_settings]) if 'MIfile' not in self.MetaData.GetSections(): logging.warn( 'No MIfile section found in MIfile metadata (available sections: ' + str(self.MetaData.GetSections()) + ')') else: logging.debug( 'Now loading MIfile.MetaData from Config object. Available sections: ' + str(self.MetaData.GetSections())) self.MaxBufferSize = self.MetaData.Get('settings', 'max_buffer_size', 100000000, int) if (self.FileName is None): self.FileName = self.MetaData.Get('MIfile', 'filename', None) self.hdrSize = self.MetaData.Get('MIfile', 'hdr_len', 0, int) self.gapBytes = self.MetaData.Get('MIfile', 'gap_bytes', 0, int) self.Shape = self.MetaData.Get('MIfile', 'shape', [0, 0, 0], int) self.ImgNumber = self.Shape[0] self.ImgHeight = self.Shape[1] self.ImgWidth = self.Shape[2] self.PxPerImg = self.ImgHeight * self.ImgWidth self.PixelFormat = self.MetaData.Get('MIfile', 'px_format', 'B', str) self.PixelDepth = _data_depth[self.PixelFormat] self.PixelDataType = _data_types[self.PixelFormat] self.FPS = self.MetaData.Get('MIfile', 'fps', 1.0, float) self.PixelSize = self.MetaData.Get('MIfile', 'px_size', 1.0, float)
def ExportConfiguration(self): cf.ExportDict( { 'fw_corrmap_metadata': self.cmaps_fw.outMetaData, 'bk_corrmap_metadata': self.cmaps_bk.outMetaData, 'naffmap_parameters': { 'out_folder': self.outFolder, 'lag_range': self.lag_range, 'lags': self.lagList, 'img_range': self.t_range, 'crop_roi': self.cropROI, 'norm_range': self.norm_range, 'qz_fw': self.qz_fw, 'qz_bk': self.qz_bk, 'trans_bk_matrix': self.trans_bk_matrix, 'trans_bk_offset': self.trans_bk_offset }, 'smooth_kernel': self.smooth_kernel_specs }, os.path.join(self.outFolder, 'NaffMapsConfig.ini'))
def LoadFromConfig(ConfigFile, outFolder=None): """Loads a CorrMaps object from a config file like the one exported with CorrMaps.ExportConfiguration() Parameters ---------- ConfigFile : full path of the config file to read outFolder : folder containing correlation maps. if None, the value from the config file will be used if not None, the value from the config file will be discarded Returns ------- a CorrMaps object with an "empty" image MIfile (containing metadata but no actual image data) """ config = cf.Config(ConfigFile) if (outFolder is None): outFolder = config.Get('corrmap_parameters', 'out_folder') kernel_specs = DSH.Kernel.Kernel(config.ToDict(section='kernel')) return CorrMaps(MI.MIfile(None,config.ToDict(section='imgs_metadata')),\ outFolder, config.Get('corrmap_parameters', 'lags', [], int),\ kernel_specs, config.Get('corrmap_parameters', 'img_range', None, int),\ config.Get('corrmap_parameters', 'crop_roi', None, int))
def LoadMetadata(self, MetaData=None, MetaDataSection=None): """Load metadata from dict or filename Parameters ---------- MetaData : string or dict. If None, self.MetaData_init will be used, if available MetaDataSection : if self.MetaData is a string, load section of the configuration file """ if (MetaData is not None): self.MetaData = MetaData assert (self.MetaData is not None), 'No Metadata to be loaded' self.MetaData = cf.LoadMetadata(self.MetaData, MetaDataSection) self.MIshape = self.MetaData.Get('MIfile', 'shape', [0, 0, 0], int) self.hdrSize = self.MetaData.Get('MIfile', 'hdr_len', 0, int) self.gapBytes = self.MetaData.Get('MIfile', 'gap_bytes', 0, int) self.ImgsPerMIfile = self.MIshape[0] self.ImgHeight = self.MIshape[1] self.ImgWidth = self.MIshape[2] self.PxPerImg = self.ImgHeight * self.ImgWidth self.PixelFormat = self.MetaData.Get('MIfile', 'px_format', 'B', str) self.PixelDepth = MI._data_depth[self.PixelFormat] self.PixelDataType = MI._data_types[self.PixelFormat] self.FPS = self.MetaData.Get('MIfile', 'fps', 1.0, float) self.PixelSize = self.MetaData.Get('MIfile', 'px_size', 1.0, float)
def MergeMIfiles(MergedFileName, MIfileList, MergedMetadataFile=None, MergeAxis=0, MoveAxes=[], FinalShape=None): """Merge multiple image files into one image file Parameters ---------- MergedFileName : full path of the destination merged MIfile MIfileList : list of MIfile objects or of 2-element list [MIfilename, MedatData] Image shape and pixel format must be the same for all MIfiles MergedMetadataFile : if not None, export metadata of merged file MergeAxis : Stitch MIfiles along specific axis. Default is 0 (z axis or time). MIfiles need to have the same shape along the other axes MoveAxes : list of couples of indices, of the form (ax_pos, ax_dest) If not empty, after merging and before writing to output file, do a series of np.moveaxis(res, ax_pos, ax_dest) moves FinalShape : if not None, reshape final output (eventually after moving axes) to given shape Shape along the merged axis will be disregarded and automatically computed (can set this to -1) Returns ------- outMIfile : merged MIfile """ if (len(MoveAxes) > 0): if (type(MoveAxes[0]) is int): MoveAxes = [MoveAxes] logging.info('MIfile.MergeMIfiles() procedure started. ' + str(len(MIfileList)) + ' MIfiles to be merged into ' + str(MergedFileName)) strLog = 'Merging along Axis ' + str(MergeAxis) if len(MoveAxes) > 0: strLog += '; followed by np.moveaxis moves: ' + str(MoveAxes) if FinalShape is None: strLog += '. No final reshaping' else: strLog += '. Reshape output to ' + str(FinalShape) logging.info(strLog) # Load all MIfiles and generate output metadata mi_in_list = [] out_meta = { 'hdr_len': 0, 'shape': [0, 0, 0], 'px_format': None, 'fps': 0.0, 'px_size': 0.0 } for midx in range(len(MIfileList)): if (type(MIfileList[midx]) is list): add_mi = MIfile(MIfileList[midx][0], MIfileList[midx][1]) logging.debug( 'MergeMIfiles(): adding new MIfile object with filename ' + str(MIfileList[midx][0])) else: add_mi = MIfileList[midx] logging.debug('MergeMIfiles(): adding existing MIfile object (' + str(MIfileList[midx].FileName) + ')') mi_in_list.append(add_mi) cur_format = add_mi.DataFormat() cur_MIshape = add_mi.GetShape() if (MergeAxis < 0): MergeAxis += len(cur_MIshape) out_meta['shape'][MergeAxis] += cur_MIshape[MergeAxis] if (midx == 0): for ax in range(len(cur_MIshape)): if ax != MergeAxis: out_meta['shape'][ax] = cur_MIshape[ax] out_meta['px_format'] = cur_format out_meta['fps'] = add_mi.GetFPS() out_meta['px_size'] = add_mi.GetPixelSize() else: for ax in range(len(cur_MIshape)): if (ax != MergeAxis and out_meta['shape'][ax] != cur_MIshape[ax]): raise IOError('Cannot merge MIfiles of shapes ' + str(out_meta['shape']) +\ ' and ' + str(cur_MIshape) + ' along axis ' + str(MergeAxis) +\ ' (sizes on axis ' + str(ax) + ' do not match)') assert out_meta[ 'px_format'] == cur_format, 'MIfiles should all have the same pixel format' logging.debug('Current shape is ' + str(cur_MIshape) + '. Output shape updated to ' + str(out_meta['shape'])) if (FinalShape is not None): re_shape = list(FinalShape.copy()) re_shape[MergeAxis] = int( np.prod(out_meta['shape']) / (re_shape[MergeAxis - 1] * re_shape[MergeAxis - 2])) #for move in MoveAxes: # re_shape = sf.MoveListElement(re_shape, move[0], move[1]) assert np.prod(re_shape)==np.prod(out_meta['shape']), 'An error occurred trying to reshape MIfile of shape ' + str(out_meta['shape']) +\ ' into shape ' + str(re_shape) + ': pixel number is not conserved (' + str(np.prod(re_shape)) +\ '!=' + str(np.prod(out_meta['shape'])) + ')!' out_meta['shape'] = list(re_shape) logging.debug('Output shape should be ' + str(re_shape)) if (MergedMetadataFile is not None): conf = cf.Config() conf.Import(out_meta, section_name='MIfile') conf.Export(MergedMetadataFile) outMIfile = MIfile(MergedFileName, out_meta) if (MergeAxis == 0 and len(MoveAxes) == 0): for cur_mifile in mi_in_list: outMIfile.WriteData(cur_mifile.Read(closeAfter=True), closeAfter=False) logging.debug('MIfile ' + str(cur_mifile.FileName) + ' read and directly transfered to output MIfile') outMIfile.Close() else: write_data = mi_in_list[0].Read(closeAfter=True) logging.debug('Writing buffer initialized with first MIfile (' + str(mi_in_list[0].FileName) + '). Shape is ' + str(write_data.shape)) for midx in range(1, len(mi_in_list)): cur_buf = mi_in_list[midx].Read(closeAfter=True) write_data = np.append(write_data, cur_buf, axis=MergeAxis) logging.debug('MIfile #' + str(midx) + ' (' + str(mi_in_list[midx].FileName) + ') with shape ' + str(cur_buf.shape) +\ ' appended to writing buffer along axis ' + str(MergeAxis) + '. Current shape is ' + str(write_data.shape)) for move in MoveAxes: write_data = np.moveaxis(write_data, move[0], move[1]) logging.debug('Axis ' + str(move[0]) + ' moved to position ' + str(move[1]) + '. Current shape is ' + str(write_data.shape)) outMIfile.WriteData(write_data, closeAfter=True) logging.debug('Final buffer with shape ' + str(write_data.shape) + ' written to output MIfile ' + str(outMIfile.FileName)) return outMIfile
else: g_params[param_kw] = sys.argv[argidx] param_kw = None if (len(inp_fnames)<=0): inp_fnames = [os.path.join(os.path.dirname(os.path.abspath(__file__)), 'serial_corrmap_config.ini')] if ('-silent' not in cmd_list): print('\n\nBATCH CORRELATION MAP CALCULATOR\nWorking on {0} input files'.format(len(inp_fnames))) # Loop through all configuration files for cur_inp in inp_fnames: if ('-silent' not in cmd_list): print('Current input file: ' + str(cur_inp)) # Read global section conf = Config.Config(cur_inp) num_proc = conf.Get('global', 'n_proc', 1, int) px_per_chunk = conf.Get('global', 'px_per_proc', 1, int) kernel_specs = conf.Get('global', 'kernel_specs') lag_list = conf.Get('global', 'lag_list', [], int) froot = conf.Get('global', 'root', '') if ('-skip_cmap' not in cmd_list) or ('-skip_vmap' not in cmd_list) or ('-skip_vmap_assemble' not in cmd_list) or ('-skip_displ' not in cmd_list) or ('-skip_grad' not in cmd_list): # Loop through all 'input_N' sections of the configuration file for cur_sec in conf.GetSections(): if (cur_sec[:len('input_')]=='input_'): # Read current input section mi_fname = os.path.join(froot, conf.Get(cur_sec, 'mi_file')) if ('-silent' not in cmd_list):
def LoadFromConfig(ConfigFile, input_sect='input', outFolder=None): """Loads a SALS object from a config file like the one exported with VelMaps.ExportConfig() Parameters ---------- ConfigFile : full path of the config file to read outFolder : folder containing velocity and correlation maps. if None, the value from the config file will be used if not None, the value from the config file will be discarded Returns ------- a SALS object, eventually with an "empty" image MIfile (containing metadata but no actual image data) """ config = cf.Config(ConfigFile) froot = config.Get('global', 'root', '', str) miin_fname = config.Get(input_sect, 'mi_file', None, str) miin_meta_fname = config.Get(input_sect, 'meta_file', None, str) input_stack = False if (miin_fname is not None): # if miin_fname is a string, let's use a single MIfile as input. # otherwise, it can be a list: in that case, let's use a MIstack as input if (isinstance(miin_fname, str)): miin_fname = os.path.join(froot, miin_fname) else: input_stack = True for i in range(len(miin_fname)): miin_fname[i] = os.path.join(froot, miin_fname[i]) if (miin_meta_fname is not None): miin_meta_fname = os.path.join(froot, miin_meta_fname) elif input_stack: logging.error( 'SALS.LoadFromConfig ERROR: medatada filename must be specified when loading a MIstack' ) return None if input_stack: MIin = MIs.MIstack(miin_fname, miin_meta_fname, Load=True, StackType='t') else: MIin = MI.MIfile(miin_fname, miin_meta_fname) ctrPos = config.Get('SALS_parameters', 'center_pos', None, float) if (ctrPos is None): logging.error( 'SALS.LoadFromConfig ERROR: no SALS_parameters.center_pos parameter found in config file ' + str(ConfigFile)) return None else: r_max = ppf.MaxRadius(MIin.ImageShape(), ctrPos) radRange = sf.ValidateRange(config.Get('SALS_parameters', 'r_range', None, float), r_max, MinVal=1, replaceNone=True) angRange = sf.ValidateRange(config.Get('SALS_parameters', 'a_range', None, float), 2 * np.pi, replaceNone=True) rSlices = np.geomspace(radRange[0], radRange[1], int(radRange[2]) + 1, endpoint=True) aSlices = np.linspace(angRange[0], angRange[1], int(angRange[2]) + 1, endpoint=True) if (outFolder is None): outFolder = config.Get(input_sect, 'out_folder', None, str) if (outFolder is not None): outFolder = os.path.join(config.Get('global', 'root', '', str), outFolder) mask = config.Get('SALS_parameters', 'px_mask', None, str) mask = MI.ReadBinary( sf.PathJoinOrNone(froot, config.Get(input_sect, 'px_mask', mask, str)), MIin.ImageShape(), MIin.DataFormat(), 0) dark = MI.ReadBinary( sf.PathJoinOrNone(froot, config.Get(input_sect, 'dark_bkg', None, str)), MIin.ImageShape(), MIin.DataFormat(), 0) opt = MI.ReadBinary( sf.PathJoinOrNone(froot, config.Get(input_sect, 'opt_bkg', None, str)), MIin.ImageShape(), MIin.DataFormat(), 0) PD_data = sf.PathJoinOrNone( froot, config.Get(input_sect, 'pd_file', None, str)) if (PD_data is not None): PD_data = np.loadtxt(PD_data, dtype=float) img_times = config.Get(input_sect, 'img_times', None, str) if img_times is not None: # if miin_fname is a string, let's use a single text file as input. # otherwise, it can be a list: in that case, let's open each text file and append all results if (isinstance(img_times, str)): img_times = np.loadtxt(os.path.join(froot, img_times), dtype=float, usecols=config.Get( 'format', 'img_times_colidx', 0, int), skiprows=1) else: tmp_times = np.empty(shape=(0, ), dtype=float) for cur_f in img_times: tmp_times = np.append( tmp_times, np.loadtxt(os.path.join(froot, cur_f), dtype=float, usecols=config.Get('format', 'img_times_colidx', 0, int), skiprows=1)) img_times = tmp_times exp_times = sf.PathJoinOrNone( froot, config.Get(input_sect, 'exp_times', None, str)) if (exp_times is not None): exp_times = np.unique( np.loadtxt(exp_times, dtype=float, usecols=config.Get('format', 'exp_times_colidx', 0, int))) dlsLags = config.Get('SALS_parameters', 'dls_lags', None, int) tavgT = config.Get('SALS_parameters', 'timeavg_T', None, int) return SALS(MIin, outFolder, ctrPos, [rSlices, aSlices], mask, [dark, opt, PD_data], exp_times, dlsLags, img_times, tavgT)
def Compute(self): sf.CheckCreateFolder(self.outFolder) logging.info( 'NonAffMaps.Compute() started! Result will be saved in folder ' + str(self.outFolder)) # Search for correlation map MIfiles, skip autocorrelation maps fw_mistack = self.cmaps_fw.GetCorrMaps(openMIfiles=True) bk_mistack = self.cmaps_bk.GetCorrMaps(openMIfiles=True) common_lags = list( set(fw_mistack.IdxList).intersection(bk_mistack.IdxList)) if self.lag_range is None: if 0 in common_lags: common_lags.remove(0) else: if self.lag_range[1] < 0: self.lag_range[1] = np.max(common_lags) + 1 common_lags = [ lag for lag in common_lags if (lag != 0 and lag >= self.lag_range[0] and lag <= self.lag_range[1]) ] self.lagList = common_lags # Export configuration self.ExportConfiguration() if self.trans_bk_matrix is not None: tr_matrix = np.reshape(np.asarray(self.trans_bk_matrix), (2, 2)) logging.debug( 'Backscattered correlation maps will be transformed using matrix ' + str(tr_matrix) + ' and offset ' + str(self.trans_bk_offset)) # For each couple of correlation maps (with equal lagtime) for lidx in range(len(self.lagList)): logging.info('Now working on lagtime ' + str(lidx) + '/' + str(len(self.lagList)) + ' (d' + str(self.lagList[lidx]) + ')') fw_lidx = fw_mistack.IdxList.index(self.lagList[lidx]) bk_lidx = bk_mistack.IdxList.index(self.lagList[lidx]) # eventually compute normalization factors if self.norm_range is not None: fw_norm_factor = np.mean(fw_mistack.MIfiles[fw_lidx].Read( zRange=self.norm_range[:2], cropROI=self.norm_range[2:], closeAfter=False)) if self.trans_bk_matrix is None and self.trans_bk_offset is None: bk_norm_factor = np.mean(bk_mistack.MIfiles[bk_lidx].Read( zRange=self.norm_range[:2], cropROI=self.norm_range[2:], closeAfter=False)) else: bk_norm_data = bk_mistack.MIfiles[bk_lidx].Read( zRange=self.norm_range[:2], cropROI=None, closeAfter=False) if len(bk_norm_data.shape) > 2: bk_norm_data = np.mean(bk_norm_data, axis=0) logging.debug('shape before transformation: ' + str(bk_norm_data.shape)) bk_norm_data = sp.ndimage.affine_transform(bk_norm_data, tr_matrix, offset=self.trans_bk_offset,\ output_shape=bk_norm_data.shape, order=1, mode='constant', cval=1.0) norm_cropROI = MI.ValidateROI(self.norm_range[2:], bk_norm_data.shape, replaceNone=True) logging.debug('shape after transformation: ' + str(bk_norm_data.shape) + ' will be cropped with ROI ' + str(norm_cropROI)) bk_norm_factor = np.mean(bk_norm_data[norm_cropROI[1]:norm_cropROI[1]+norm_cropROI[3],\ norm_cropROI[0]:norm_cropROI[0]+norm_cropROI[2]]) bk_norm_data = None else: fw_norm_factor, bk_norm_factor = 1, 1 logging.info('Normalization factors: ' + str(fw_norm_factor) + ' (front) and ' + str(bk_norm_factor) + ' (back)') # load, normalize and eventually smooth correlation maps. fw_data = np.true_divide( fw_mistack.MIfiles[fw_lidx].Read(zRange=self.t_range, cropROI=self.cropROI, closeAfter=True), fw_norm_factor) bk_data = np.true_divide( bk_mistack.MIfiles[bk_lidx].Read(zRange=self.t_range, cropROI=self.cropROI, closeAfter=True), bk_norm_factor) if self.smooth_kernel_specs is not None: Kernel3D = self.LoadKernel(self.smooth_kernel_specs) logging.debug('Smoothing with kernel with shape ' + str(Kernel3D.shape)) fw_data = signal.convolve(fw_data, Kernel3D, mode='same') bk_data = signal.convolve(bk_data, Kernel3D, mode='same') # transform backscattered images if self.trans_bk_matrix is not None: tr_matrix3D = np.asarray( [[1, 0, 0], [0, tr_matrix[0, 0], tr_matrix[0, 1]], [0, tr_matrix[1, 0], tr_matrix[1, 1]]]) tr_offset3D = np.asarray( [0, self.trans_bk_offset[0], self.trans_bk_offset[1]]) bk_data = sp.ndimage.affine_transform(bk_data, tr_matrix3D, offset=tr_offset3D,\ output_shape=fw_data.shape, order=1, mode='constant', cval=1.0) # sigma2 = ln(forward-scattering corr / backscattering corr) * 6 / (qz_bk^2 - qz_fw^2) sigma2 = np.log(np.true_divide( fw_data, bk_data)) * 6.0 / (self.qz_bk**2 - self.qz_fw**2) # For the first lagtime, generate and export metadata if (lidx == 0): out_meta = fw_mistack.MIfiles[fw_lidx].GetMetadata().copy() out_meta['hdr_len'] = 0 out_meta['gap_bytes'] = 0 out_meta['shape'] = list(sigma2.shape) if ('fps' in out_meta): val_tRange = fw_mistack.MIfiles[fw_lidx].Validate_zRange( self.t_range) out_meta['fps'] = float( out_meta['fps']) * 1.0 / val_tRange[2] exp_config = cf.Config() exp_config.Import(out_meta, section_name='MIfile') metadata_fname = os.path.join(self.outFolder, 'NAffMap_metadata.ini') exp_config.Export(metadata_fname) logging.info('Metadata exported to file ' + str(metadata_fname)) # export data cur_fname = 'NaffMap_d' + str(self.lagList[lidx]).zfill(4) + '.dat' MI.MIfile(os.path.join(self.outFolder, cur_fname), metadata_fname).WriteData(sigma2) logging.info('Result saved to file ' + str(cur_fname)) fw_mistack.MIfiles[fw_lidx].Close() bk_mistack.MIfiles[bk_lidx].Close()
def GetCorrMaps(self, openMIfiles=True, getAutocorr=True, check_lagtimes=False): """Searches for MIfile correlation maps Parameters ---------- openMIfiles: if true, it opens all MIfiles for reading. getAutocorr: if True, returns d0 in the list of correlation maps otherwise, returns None instead of the autocorrelation map check_lagtimes: if true, checks that the lagtimes extracted from the filenames match with self.lagList Returns ------- corr_config: configuration file for correlation maps corr_mifiles: list of correlation maps, one per time delay lag_list: list of lagtimes """ if not self._corrmaps_loaded: assert os.path.isdir( self.outFolder), 'Correlation map folder ' + str( self.outFolder) + ' not found.' config_fname = os.path.join(self.outFolder, 'CorrMapsConfig.ini') assert os.path.isfile(config_fname), 'Configuration file ' + str( config_fname) + ' not found' self.conf_cmaps = cf.Config(config_fname) all_cmap_fnames = sf.FindFileNames(self.outFolder, Prefix='CorrMap_d', Ext='.dat', Sort='ASC', AppendFolder=True) self.cmap_mifiles = [] self.all_lagtimes = [] for i in range(len(all_cmap_fnames)): cur_lag = sf.LastIntInStr(all_cmap_fnames[i]) self.all_lagtimes.append(cur_lag) self.cmap_mifiles.append( MI.MIfile( all_cmap_fnames[i], self.conf_cmaps.ToDict(section='corrmap_metadata'))) self.cmap_mifiles[-1].OpenForReading() # Check lagtimes for consistency if (check_lagtimes): print( 'These are all lagtimes. They should be already sorted and not contain 0:' ) print(self.all_lagtimes) for cur_lag in self.lagList: if (cur_lag not in self.all_lagtimes): print( 'WARNING: no correlation map found for lagtime ' + str(cur_lag)) self._corrmaps_loaded = True if (self.all_lagtimes[0] == 0 and getAutocorr == False): return self.conf_cmaps, [ None ] + self.cmap_mifiles[1:], self.all_lagtimes else: return self.conf_cmaps, self.cmap_mifiles, self.all_lagtimes