def test_basename_datafile(self): # check that the right basename is returned pth = 'a/b/c.nx.hdf' assert_(basename_datafile(pth) == 'c') pth = 'c.nx.hdf' assert_(basename_datafile(pth) == 'c')
def test_basename_datafile(self): # check that the right basename is returned pth = "a/b/c.nx.hdf" assert_(basename_datafile(pth) == "c") pth = "c.nx.hdf" assert_(basename_datafile(pth) == "c")
def reducer(self, callback=None): """ Reduce all the entries in reduction_entries Parameters ---------- callback : callable Function, `f(percent_finished)` that is called with the current percentage progress of the reduction """ # refnx.reduce.reduce needs you to be in the directory where you're # going to write files to if self.output_directory: os.chdir(self.output_directory) # if no data directory was specified then assume it's the cwd data_directory = self.data_directory if not data_directory: data_directory = "./" def full_path(fname): f = os.path.join(data_directory, fname) return f # if the streamed directory isn't mentioned then assume it's the same # as the data directory streamed_directory = self.streamed_directory if not os.path.isdir(streamed_directory): self.streamed_directory = data_directory logging.info("-------------------------------------------------------" "\nStarting reduction run") logging.info( "data_folder={data_directory}, trim_trailing=True, " "lo_wavelength={low_wavelength}, " "hi_wavelength={high_wavelength}, " "rebin_percent={rebin_percent}, " "normalise={monitor_normalisation}, " "background={background_subtraction} " "eventmode={streamed_reduction} " "event_folder={streamed_directory}".format(**self.__dict__)) # sets up time slices for event reduction if self.streamed_reduction: eventmode = np.arange(self.stream_start, self.stream_end, self.stream_duration) eventmode = np.r_[eventmode, self.stream_end] else: eventmode = None # are you manual beamfinding? peak_pos = None if self.manual_beam_find and self.manual_beam_finder is not None: peak_pos = -1 idx = 0 cached_direct_beams = {} for row, val in self.reduction_entries.items(): if not val["use"]: continue flood = None if val["flood"]: flood = full_path(val["flood"]) combined_dataset = None # process entries one by one for ref, db in zip( ["reflect-1", "reflect-2", "reflect-3"], ["direct-1", "direct-2", "direct-3"], ): reflect = val[ref] direct = val[db] # if the file doesn't exist there's no point continuing if (not os.path.isfile(full_path(reflect))) or ( not os.path.isfile(full_path(direct))): continue # which of the nspectra to reduce (or all) ref_pn = PlatypusNexus(full_path(reflect)) if direct not in cached_direct_beams: cached_direct_beams[direct] = PlatypusReduce( direct, data_folder=data_directory) reducer = cached_direct_beams[direct] try: reduced = reducer( ref_pn, scale=val["scale"], h5norm=flood, lo_wavelength=self.low_wavelength, hi_wavelength=self.high_wavelength, rebin_percent=self.rebin_percent, normalise=self.monitor_normalisation, background=self.background_subtraction, manual_beam_find=self.manual_beam_finder, peak_pos=peak_pos, eventmode=eventmode, event_folder=streamed_directory, ) except Exception as e: # typical Exception would be ValueError for non overlapping # angles logging.info(e) continue logging.info("Reduced {} vs {}, scale={}, angle={}".format( reflect, direct, val["scale"], reduced[1]["omega"][0, 0], )) if combined_dataset is None: combined_dataset = ReflectDataset() fname = basename_datafile(reflect) fname_dat = os.path.join(self.output_directory, "c_{0}.dat".format(fname)) fname_xml = os.path.join(self.output_directory, "c_{0}.xml".format(fname)) try: combined_dataset.add_data( reducer.data(), requires_splice=True, trim_trailing=True, ) except ValueError as e: # datasets don't overlap logging.info(e) continue if combined_dataset is not None: # after you've finished reducing write a combined file. with open(fname_dat, "wb") as f: combined_dataset.save(f) with open(fname_xml, "wb") as f: combined_dataset.save_xml(f) logging.info("Written combined files: {} and {}".format( fname_dat, fname_xml)) # can be used to create a progress bar idx += 1 if callback is not None: ok = callback(100 * idx / len(self.reduction_entries)) if not ok: break logging.info("\nFinished reduction run" "-------------------------------------------------------")
def reducer(self, callback=None): """ Reduce all the entries in reduction_entries Parameters ---------- callback : callable Function, `f(percent_finished)` that is called with the current percentage progress of the reduction """ # refnx.reduce.reduce needs you to be in the directory where you're # going to write files to if self.output_directory: os.chdir(self.output_directory) # if no data directory was specified then assume it's the cwd data_directory = self.data_directory if not data_directory: data_directory = './' def full_path(fname): f = os.path.join(data_directory, fname) return f # if the streamed directory isn't mentioned then assume it's the same # as the data directory streamed_directory = self.streamed_directory if not os.path.isdir(streamed_directory): self.streamed_directory = data_directory logging.info('-------------------------------------------------------' '\nStarting reduction run') logging.info( 'data_folder={data_directory}, trim_trailing=True, ' 'lo_wavelength={low_wavelength}, ' 'hi_wavelength={high_wavelength}, ' 'rebin_percent={rebin_percent}, ' 'normalise={monitor_normalisation}, ' 'background={background_subtraction} ' 'eventmode={streamed_reduction} ' 'event_folder={streamed_directory}'.format(**self.__dict__)) # sets up time slices for event reduction if self.streamed_reduction: eventmode = np.arange(self.stream_start, self.stream_end, self.stream_duration) eventmode = np.r_[eventmode, self.stream_end] else: eventmode = None # are you manual beamfinding? peak_pos = None if (self.manual_beam_find and self.manual_beam_finder is not None): peak_pos = -1 idx = 0 cached_direct_beams = {} for row, val in self.reduction_entries.items(): if not val['use']: continue flood = None if val['flood']: flood = val['flood'] combined_dataset = None # process entries one by one for ref, db in zip(['reflect-1', 'reflect-2', 'reflect-3'], ['direct-1', 'direct-2', 'direct-3']): reflect = val[ref] direct = val[db] # if the file doesn't exist there's no point continuing if ((not os.path.isfile(full_path(reflect))) or (not os.path.isfile(full_path(direct)))): continue # which of the nspectra to reduce (or all) ref_pn = PlatypusNexus(reflect) if direct not in cached_direct_beams: cached_direct_beams[direct] = PlatypusReduce( direct, data_folder=data_directory) reducer = cached_direct_beams[direct] reduced = reducer( ref_pn, scale=val['scale'], norm_file_num=flood, lo_wavelength=self.low_wavelength, hi_wavelength=self.high_wavelength, rebin_percent=self.rebin_percent, normalise=self.monitor_normalisation, background=self.background_subtraction, manual_beam_find=self.manual_beam_finder, peak_pos=peak_pos, eventmode=eventmode, event_folder=streamed_directory) logging.info( 'Reduced {} vs {}, scale={}, angle={}'.format( reflect, direct, val['scale'], reduced['omega'][0, 0])) if combined_dataset is None: combined_dataset = ReflectDataset() fname = basename_datafile(reflect) fname_dat = os.path.join(self.output_directory, 'c_{0}.dat'.format(fname)) fname_xml = os.path.join(self.output_directory, 'c_{0}.xml'.format(fname)) combined_dataset.add_data(reducer.data(), requires_splice=True, trim_trailing=True) if combined_dataset is not None: # after you've finished reducing write a combined file. with open(fname_dat, 'wb') as f: combined_dataset.save(f) with open(fname_xml, 'wb') as f: combined_dataset.save_xml(f) logging.info( 'Written combined files: {} and {}'.format( fname_dat, fname_xml)) # can be used to create a progress bar idx += 1 if callback is not None: ok = callback(100 * idx / len(self.reduction_entries)) if not ok: break logging.info('\nFinished reduction run' '-------------------------------------------------------')