def fftslice(self, s, df=0): """ Get the fourier-transformed data of a specific slice of a DataArray of the instance input. :param s: The slice, addressing a selection of the instance input data. Can be conveniently made with `numpy.s_[]`. :type s: slice :param df: An identifier (index or label) of the DataArray to transform. :type df: int or str :return: The fourier-transformed data for the selection. :rtype: pint.Quantity """ s = full_slice(s, self.indata.dimensions) if s[self.axis_to_transform_id] != np.s_[:]: warnings.warn( "FFT of a slice that is not full along the FFT axis might return bad results" ) df_in = self.indata.get_datafield(df) timedata = df_in.data[s].magnitude freqdata = fftpack.fftshift(fftpack.fft( timedata, axis=self.axis_to_transform_id), axes=self.axis_to_transform_id) return u.to_ureg(freqdata, df_in.get_unit())
def __getitem__(self, sel): # Get full addressed slice from selection. full_selection = full_slice(sel, len(self.data.shape)) slicebase_wo_stackaxis = np.delete(full_selection, self.dstackAxisID) shifted_slice_list = [] # Iterate over all selected elements along dstackAxis: for i in iterfy( np.arange(self.data.shape[self.dstackAxisID])[full_selection[ self.dstackAxisID]]): # Generate full slice of data to shift, by inserting i into slicebase: subset_slice = tuple( np.insert(slicebase_wo_stackaxis, self.dstackAxisID, i)) # Get shiftvector for the stack element i: shift = self.generate_shiftvector(i) # Get the shifted data from the Data_Handler method: shifted_data = self.data.get_datafield(0).data.shift_slice( subset_slice, shift, order=self.interpolation_order) # Attach data to list: shifted_slice_list.append(shifted_data) if len(shifted_slice_list ) < 2: # We shifted only a single slice along the stackAxis: return shifted_slice_list[0] else: # We shifted several slices, so we have to stack them together again. return shifted_slice_list[0].__class__.stack(shifted_slice_list)
def filteredslice(self, s, component, df=0): """ Filtered slice of the instance input data. :param s: A slice addressing a region of the data to return. Can be conveniently made with `numpy.s_[]`. :type s: slice :param component: The frequency component to return. :type component: int :param df: The DataArray in the given DataSet to filter, can be specified if multiple are present. Given as a valid identifier (index or label). :type df: int or str :return: The filtered data. :rtype: numpy.ndarray """ s = full_slice(s, self.indata.dimensions) if s[self.filter_axis_id] != np.s_[:]: warnings.warn( "Frequency filtering a slice that is not full along the filter axis might return bad results" ) df_in = self.indata.get_datafield(df) timedata = df_in.data[s] filtered_data = self.butters[component].filtered( timedata, axis=self.filter_axis_id) return u.to_ureg(filtered_data, df_in.get_unit())
def source_slice_from_target_slice(self, target_slice): """ Builds a slice addressing the input data from a slice addressing the result data, by inserting a full selection along the fit axis. :param target_slice: A slice addressing a selection on the result data. :return: A slice addressing the corresponding selection in the input data. """ slice_list = list(full_slice(target_slice)) slice_list.insert(self.fitaxis_ID, np.s_[:]) return tuple(slice_list)
def corrected_slice(self, sel, dtype=None): """ Return the shifted data for a selection (slice) of the data. :param sel: A selection (slice) addressing a range of the data. :param dtype: The numeric data type of the corrected data. By default, the type of the original data is kept. This means that for an approximation to non-integer values, a float type must be given here. :type dtype: numpy.dtype or castable :return: DataArray containing the shifted data """ # Get full addressed slice from selection. full_selection = full_slice(sel, len(self.data.shape)) slicebase_wo_stackaxis = np.delete(full_selection, self.dstackAxisID) shifted_slice_list = [] # Iterate over all selected elements along dstackAxis: for i in iterfy( np.arange(self.data.shape[self.dstackAxisID])[full_selection[ self.dstackAxisID]]): # Generate full slice of data to shift, by inserting i into slicebase: subset_slice = tuple( np.insert(slicebase_wo_stackaxis, self.dstackAxisID, i)) # Get shiftvector for the stack element i: shift = self.generate_shiftvector(i) # Get the shifted data from the Data_Handler method: shifted_data = self.data.get_datafield(0).data.shift_slice( subset_slice, shift, order=self.interpolation_order, output=dtype) # Attach data to list: shifted_slice_list.append(shifted_data) if len(shifted_slice_list ) < 2: # We shifted only a single slice along the stackAxis: return shifted_slice_list[0] else: # We shifted several slices, so we have to stack them together again. return shifted_slice_list[0].__class__.stack(shifted_slice_list)
def corrected_data(self, h5target=None): """Return the full dataset with maxima-map corrected data. Therefore in each xy pixel the data gets shifted along the energy axis""" # Address the DataArray with all the data fulldata = self.data.get_datafield(0) assert isinstance(fulldata, snomtools.data.datasets.DataArray) if h5target: # --- Prepare data to iterable slices in chunks, calculate driftcorrected data and write it to dh ---: # Probe HDF5 initialization to optimize buffer size for xy chunk along full energy and time axis: chunk_size = snomtools.data.h5tools.probe_chunksize( shape=self.data.shape) min_cache_size = np.prod(self.data.shape, dtype=np.int64) // (self.data.shape[self.dxAxisID]) // \ (self.data.shape[self.dyAxisID]) * chunk_size[self.dxAxisID] * \ chunk_size[self.dyAxisID] * 4 # 32bit floats require 4 bytes. use_cache_size = min_cache_size + 128 * 1024**2 # Add 128 MB just to be sure. # Initialize data handler to write to: dh = snomtools.data.datasets.Data_Handler_H5( unit=str(self.data.datafields[0].units), shape=self.data.shape, chunk_cache_mem_size=use_cache_size) if verbose: import time start_time = time.time() print(time.ctime()) xychunks = self.data.shape[self.dxAxisID] * self.data.shape[ self.dyAxisID] // fulldata.data.chunks[ self.dyAxisID] // fulldata.data.chunks[self.dxAxisID] chunks_done = 0 print("Calculating {0} driftcorrected slices...".format( xychunks)) # Get full slice for all the data in the xy chunk: full_selection = full_slice(np.s_[:], len(self.data.shape)) # Delete y Axis to prepare insertion of iteration variable for y slicebase_wo_yaxis = np.delete(full_selection, self.dyAxisID) # Create a cache array with the full size in energy and time axis, therefore remove xy from fulldata.shape datasize = list(fulldata.shape) xy_indexes = [self.dyAxisID, self.dxAxisID] xy_indexes.sort() xy_indexes.reverse() for dimension in xy_indexes: datasize.pop(dimension) # Cache array is later used for every xy to cache the shifted data of each xy pixel's stack cache_array = np.empty(shape=tuple(datasize), dtype=np.float32) # Work on the slices that are contained in the same chunk for xy -> fast for chunkslice in fulldata.data.iterchunkslices( dims=(self.dyAxisID, self.dxAxisID)): if verbose: step_starttime = time.time() # Create big cache array in which the calculated cache arrays will be buffered so only one write process per chunk occurs ->fast bigger_cache_array = np.empty(shape=sliced_shape( chunkslice, fulldata.shape), dtype=np.float32) # Address the full data of the chunkslice as numpy array fulldata_chunk = snomtools.data.datasets.Data_Handler_np( fulldata.data.ds_data[chunkslice], fulldata.get_unit()) # define yslice as y axis in chunkslice yslice = chunkslice[self.dyAxisID] assert isinstance(yslice, slice) # find end of y-data: either end of slice or end of y-axis, if yslice.stop is not defined if yslice.stop is None: upper_lim = fulldata.shape[self.dyAxisID] else: upper_lim = yslice.stop # Iterate over all elements along dyAxis in the chunkslice for i in range(yslice.start, upper_lim): # Inserting i as iterator to slicebase without yaxis: intermediate_slice = np.insert(slicebase_wo_yaxis, self.dyAxisID, i) # Create a slice with relative coordinates. "yslice.start" is the absolute position of the data and "i - yslice.start" the relative position in the slice intermediate_slice_relative = np.insert( slicebase_wo_yaxis, self.dyAxisID, i - yslice.start) # Delete x Axis analogous to y axis earlier slicebase_wo_xyaxis = np.delete(intermediate_slice, self.dxAxisID) slicebase_wo_xyaxis_relative = np.delete( intermediate_slice_relative, self.dxAxisID) # define xslice as x axis in chunkslice xslice = chunkslice[self.dxAxisID] assert isinstance(xslice, slice) # find end of x-data: either end of slice or end of x-axis, if xslice.stop is not defined if xslice.stop is None: upper_lim = fulldata.shape[self.dxAxisID] else: upper_lim = xslice.stop # Iterate over all elements along dxAxisin the chunkslice: for j in range(xslice.start, upper_lim): # subset_slice = tuple(np.insert(slicebase_wo_xyaxis, self.dxAxisID, j)) # Insert "j-xslice.start" as relative iteration variable at the x-Axis position in the slice subset_slice_relative = tuple( np.insert(slicebase_wo_xyaxis_relative, self.dxAxisID, j - xslice.start)) # Get shiftvector for the stack element at y,x coordinates i,j: shift = self.generate_shiftvector((i, j)) if self.subpixel: # -- calculate shifted data via .shift_slice -- # Get the shifted data from the Data_Handler method and put it to cache array: fulldata_chunk.shift_slice( subset_slice_relative, shift, output=cache_array, order=self.interpolation_order) # Write shifted data to corresponding place in the bigger cache array: bigger_cache_array[ subset_slice_relative] = cache_array else: # -- calculate shifted data via shifted numpy arrays. Only int shift -- # cast shift in the coordinate of the energy axis to int shift = np.rint(shift[self.deAxisID]).astype(int) if shift == 0: # if shift=0 write data in the subset_slice_relative to bigger cache array bigger_cache_array[ subset_slice_relative] = fulldata_chunk.magnitude[ subset_slice_relative] else: # create slices to cut out the kept data, address it's target position and fill the rest with Nan sourceslice = list(subset_slice_relative) targetslice = list(subset_slice_relative) restslice = list(subset_slice_relative) # Since energy axis is shifted, the slices are changed in the deAxisID axis if shift < 0: # shift <0 -> data has to be shifted down s = abs(shift) sourceslice[self.deAxisID] = np.s_[ s:] # data starting from shift to end is kept targetslice[ self. deAxisID] = np.s_[: -s] # data should be in the slice starting at 0 ending at end-s restslice[self.deAxisID] = np.s_[ -s:] # positions end-s until end should be Nan else: s = abs(shift) # shift >0 -> data has to be shifted up sourceslice[ self. deAxisID] = np.s_[: -s] # data starting from 0 to end-s is kept targetslice[self.deAxisID] = np.s_[ s:] # data should start at s restslice[ self. deAxisID] = np.s_[: s] # empty space from 0 to s should be Nan # Remove x and y dimension so the size fits for dimension in xy_indexes: targetslice.pop(dimension) restslice.pop(dimension) # Write the data using the generated slices for addressing the source in fulldata and the target in cache_array cache_array[tuple( restslice )] = np.nan # write Nan to restslice positions cache_array[tuple( targetslice )] = fulldata_chunk.magnitude[tuple( sourceslice )] # write data from sourceslice to positions of targetslice # Write cache_array to it's subset_slice_relative position in the bigger_cache_array bigger_cache_array[ subset_slice_relative] = cache_array # After the whole chunkslice is shifted, pass it to the h5 data handler dh[chunkslice] = bigger_cache_array if verbose: chunks_done += 1 print('data interpolated and written in {0:.2f} s'.format( time.time() - step_starttime)) tpf = ((time.time() - start_time) / float(chunks_done)) etr = tpf * (xychunks - chunks_done) print( "Slice {0:d} / {1:d}, Time/slice {3:.2f}s ETR: {2:.1f}s" .format(chunks_done, xychunks, etr, tpf)) # Initialize DataArray with data from dh: newda = snomtools.data.datasets.DataArray( dh, label=fulldata.label, plotlabel=fulldata.plotlabel, h5target=dh.h5target) # if no h5target is given: else: newda = snomtools.data.datasets.DataArray( self[:], label=fulldata.label, plotlabel=fulldata.plotlabel) # Put all the shifted data and old axes together to new DataSet: newds = snomtools.data.datasets.DataSet(self.data.label + " maximacorrected", (newda, ), self.data.axes, self.data.plotconf, h5target=h5target) return newds
def corrected_data(self, h5target=None, dtype=None): """ Return the full driftcorrected dataset. 2D data is shifted for each position along the stack to negate the drift. Shifting is done with DataHandler_H5/_np.shift_slice methods, which have scipy.ndimage.interpolation.shift under the hood. If h5target is given, the calculations are done chunk-wise for optimal performance. :param h5target: A hdf5 target (path for hdf5-File or h5py Group) to write to. :param dtype: The numeric data type of the corrected data. By default, the type of the original data is kept. This means that for an approximation to non-integer values, a float type must be given here. :type dtype: numpy.dtype or castable :return: The driftcorrected DataSet. """ oldda = self.data.get_datafield(0) if dtype is None: dtype = oldda.dtype else: dtype = np.dtype(dtype) if h5target: # ToDO:implement chunkwise iteration. e.g. t,E,y,x resolved has chunks (12,6,41,41) with dim (383,81,650,650) = 1.6 GB # Optimize buffer size: use_cache_size = buffer_needed(self.data.shape, [ np.s_[:] if dim != self.dstackAxisID else 0 for dim in range(self.data.dimensions) ], dtype=dtype) # Initialize data handler to write to: dh = snomtools.data.datasets.Data_Handler_H5( unit=str(self.data.datafields[0].units), shape=self.data.shape, chunk_cache_mem_size=use_cache_size, dtype=dtype) # Calculate driftcorrected data and write it to dh: if verbose: import time start_time = time.time() print(time.ctime()) print("Calculating {0} driftcorrected slices...".format( self.data.shape[self.dstackAxisID])) # Get full slice for all the data: full_selection = full_slice(np.s_[:], len(self.data.shape)) slicebase_wo_stackaxis = np.delete(full_selection, self.dstackAxisID) # Iterate over all elements along dstackAxis: for i in range(self.data.shape[self.dstackAxisID]): # Generate full slice of data to shift, by inserting i into slicebase: subset_slice = tuple( np.insert(slicebase_wo_stackaxis, self.dstackAxisID, i)) # Get shiftvector for the stack element i: shift = self.generate_shiftvector(i) if verbose: step_starttime = time.time() # Get the shifted data from the Data_Handler method: shifted_data = self.data.get_datafield(0).data.shift_slice( subset_slice, shift, order=self.interpolation_order, output=dtype) if verbose: print('interpolation done in {0:.2f} s'.format( time.time() - step_starttime)) step_starttime = time.time() # Write shifted data to corresponding place in dh: dh[subset_slice] = shifted_data if verbose: print('data written in {0:.2f} s'.format(time.time() - step_starttime)) tpf = ((time.time() - start_time) / float(i + 1)) etr = tpf * (self.data.shape[self.dstackAxisID] - i + 1) print( "Slice {0:d} / {1:d}, Time/slice {3:.2f}s ETR: {2:.1f}s" .format(i, self.data.shape[self.dstackAxisID], etr, tpf)) # Initialize DataArray with data from dh: newda = snomtools.data.datasets.DataArray( dh, label=oldda.label, plotlabel=oldda.plotlabel, h5target=dh.h5target) else: newda = snomtools.data.datasets.DataArray( self.corrected_slice(np.s_[:], dtype=dtype), label=oldda.label, plotlabel=oldda.plotlabel) # Put all the shifted data and old axes together to new DataSet: newds = snomtools.data.datasets.DataSet(self.data.label + " driftcorrected", (newda, ), self.data.axes, self.data.plotconf, h5target=h5target) return newds
def buffer_needed(shape=None, access=None, chunks=None, data=None, dtype=None, safety_margin=True): """ Calculate the buffer size needed for an access pattern. The data to work on can be described by providing the Data_Handler_H5 itself, or providing its shape and chunk size. The dtype can be given in the same way, or is assumed as the system-default float. If data and explicit parameters are given, only the missing parameters are taken from data. :param shape: The shape of the data to work on. :type shape: tuple of int :param access: A slice corresponding to the used access pattern. :type access: tuple **or** slice **or** int :param chunks: The chunk size of the data to work on. :type chunks: tuple of int :param data: Data to use as reference for the parameters. Must have the attributes `shape` and `chunks` if not given explicitly. :type data: snomtools.data.datasets.Data_Handler_H5, or anything with corresponding attributes. :param dtype: The dtype of the data to work on. :param safety_margin: Add a safety-margin to the calculated needed buffer size. Can be given explicitly in bytes, or if `True`, the default buffer size `chunk_cache_mem_size_default` is added. :type safety_margin: bool or int :return: The needed buffer size in bytes. :rtype: int """ # Handle given parameters: if dtype is None: dtype = np.float if shape is not None: if chunks is None: chunks = probe_chunksize(shape, dtype=dtype) elif data is not None: shape = data.shape if chunks is None: chunks = data.chunks if dtype is None: dtype = data.dtype else: raise ValueError("Insufficient data given.") access = full_slice(access, len(shape)) if not safety_margin: safety_margin = 0 else: if safety_margin is True: safety_margin = chunk_cache_mem_size_default # Calculate needed chunks: chunks_needed = [0 for dim in range(len(shape))] for dim in range(len(shape)): # for each dimension if access[ dim] == np.s_[:]: # full slice: All chunks including possible overhang. chunks_needed[dim] = shape[dim] // chunks[dim] if shape[dim] % chunks[dim]: chunks_needed[dim] += 1 elif type(access[dim]) == int: # Only one chunk. chunks_needed[dim] = 1 else: # A fancier slice: Look at chunk alignment and look for each chunk if there is an element selected. chunk_alignment = np.array( [i // chunks[dim] for i in range(shape[dim])]) n_chunks = shape[dim] // chunks[dim] if shape[dim] % chunks[dim]: n_chunks += 1 for c in range(n_chunks): if c in chunk_alignment[access[dim]]: chunks_needed[dim] += 1 chunks_needed = np.prod( chunks_needed, dtype=np.uint64 ) # Total chunks is product of chunks of each dimension. elements_needed = chunks_needed * np.prod( chunks, dtype=np.uint64) # Elements per chunk is product of chunk size. return int(elements_needed) * np.dtype(dtype).itemsize + safety_margin
def corrected_data(self, h5target=None): """Return the full driftcorrected dataset.""" oldda = self.data.get_datafield(0) assert isinstance(oldda, snomtools.data.datasets.DataArray) if h5target: # Probe HDF5 initialization to optimize buffer size: chunk_size = snomtools.data.h5tools.probe_chunksize( shape=self.data.shape) min_cache_size = np.prod(self.data.shape, dtype=np.int64) // (self.data.shape[self.dxAxisID]) // \ (self.data.shape[self.dyAxisID]) * chunk_size[self.dxAxisID] * \ chunk_size[self.dyAxisID] * 4 # 32bit floats require 4 bytes. use_cache_size = min_cache_size + 128 * 1024**2 # Add 128 MB just to be sure. # Initialize data handler to write to: dh = snomtools.data.datasets.Data_Handler_H5( unit=str(self.data.datafields[0].units), shape=self.data.shape, chunk_cache_mem_size=use_cache_size) # Calculate driftcorrected data and write it to dh: if verbose: import time start_time = time.time() print(time.ctime()) xychunks = self.data.shape[self.dxAxisID] * self.data.shape[ self.dyAxisID] // oldda.data.chunks[ self.dyAxisID] // oldda.data.chunks[self.dxAxisID] chunks_done = 0 print("Calculating {0} driftcorrected slices...".format( xychunks)) # Get full slice for all the data: full_selection = full_slice(np.s_[:], len(self.data.shape)) # Delete y Axis slicebase_wo_yaxis = np.delete(full_selection, self.dyAxisID) datasize = list(oldda.shape) xy_indexes = [self.dyAxisID, self.dxAxisID] xy_indexes.sort() xy_indexes.reverse() for dimension in xy_indexes: datasize.pop(dimension) cache_array = np.empty(shape=tuple(datasize), dtype=np.float32) for chunkslice in oldda.data.iterchunkslices(dims=(self.dyAxisID, self.dxAxisID)): if verbose: step_starttime = time.time() bigger_cache_array = np.empty(shape=sliced_shape( chunkslice, oldda.shape), dtype=np.float32) oldda_chunk = snomtools.data.datasets.Data_Handler_np( oldda.data.ds_data[chunkslice], oldda.get_unit()) yslice = chunkslice[self.dyAxisID] assert isinstance(yslice, slice) if yslice.stop is None: upper_lim = oldda.shape[self.dyAxisID] else: upper_lim = yslice.stop for i in range(yslice.start, upper_lim): # Iterate over all elements along dyAxis, therefore inserting i as iterator to slicebase: intermediate_slice = np.insert(slicebase_wo_yaxis, self.dyAxisID, i) intermediate_slice_relative = np.insert( slicebase_wo_yaxis, self.dyAxisID, i - yslice.start) # Delete x Axis slicebase_wo_xyaxis = np.delete(intermediate_slice, self.dxAxisID) slicebase_wo_xyaxis_relative = np.delete( intermediate_slice_relative, self.dxAxisID) xslice = chunkslice[self.dxAxisID] assert isinstance(xslice, slice) if xslice.stop is None: upper_lim = oldda.shape[self.dxAxisID] else: upper_lim = xslice.stop # Iterate over all elements along dxAxis, therefore inserting j as iterator to slicebase: for j in range(xslice.start, upper_lim): # Iterate over all elements along dxAxis: subset_slice = tuple( np.insert(slicebase_wo_xyaxis, self.dxAxisID, j)) subset_slice_relative = tuple( np.insert(slicebase_wo_xyaxis_relative, self.dxAxisID, j - xslice.start)) # Get shiftvector for the stack element at y,x coordinates i,j: shift = self.generate_shiftvector((i, j)) if self.subpixel: # Get the shifted data from the Data_Handler method: oldda_chunk.shift_slice( subset_slice_relative, shift, output=cache_array, order=self.interpolation_order) # Write shifted data to corresponding place in dh: bigger_cache_array[ subset_slice_relative] = cache_array else: shift = np.rint(shift[self.deAxisID]).astype(int) if shift == 0: bigger_cache_array[ subset_slice_relative] = oldda_chunk.magnitude[ subset_slice_relative] else: oldslice = list(subset_slice_relative) newslice = list(subset_slice_relative) restslice = list(subset_slice_relative) if shift < 0: s = abs(shift) oldslice[self.deAxisID] = np.s_[s:] newslice[self.deAxisID] = np.s_[:-s] restslice[self.deAxisID] = np.s_[-s:] else: s = abs(shift) oldslice[self.deAxisID] = np.s_[:-s] newslice[self.deAxisID] = np.s_[s:] restslice[self.deAxisID] = np.s_[:s] for dimension in xy_indexes: newslice.pop(dimension) restslice.pop(dimension) cache_array[tuple(restslice)] = np.nan cache_array[tuple( newslice)] = oldda_chunk.magnitude[tuple( oldslice)] bigger_cache_array[ subset_slice_relative] = cache_array dh[chunkslice] = bigger_cache_array if verbose: chunks_done += 1 print('data interpolated and written in {0:.2f} s'.format( time.time() - step_starttime)) tpf = ((time.time() - start_time) / float(chunks_done)) etr = tpf * (xychunks - chunks_done) print( "Slice {0:d} / {1:d}, Time/slice {3:.2f}s ETR: {2:.1f}s" .format(chunks_done, xychunks, etr, tpf)) # Initialize DataArray with data from dh: newda = snomtools.data.datasets.DataArray( dh, label=oldda.label, plotlabel=oldda.plotlabel, h5target=dh.h5target) else: newda = snomtools.data.datasets.DataArray( self[:], label=oldda.label, plotlabel=oldda.plotlabel) # Put all the shifted data and old axes together to new DataSet: newds = snomtools.data.datasets.DataSet(self.data.label + " maximacorrected", (newda, ), self.data.axes, self.data.plotconf, h5target=h5target) return newds
def corrected_data(self, h5target=None): """Return the full driftcorrected dataset.""" oldda = self.data.get_datafield(0) if h5target: # Probe HDF5 initialization to optimize buffer size: chunk_size = snomtools.data.h5tools.probe_chunksize( shape=self.data.shape) min_cache_size = np.prod(self.data.shape, dtype=np.int64) // self.data.shape[self.dstackAxisID] * \ chunk_size[ self.dstackAxisID] * 4 # 32bit floats require 4 bytes. use_cache_size = min_cache_size + 128 * 1024**2 # Add 128 MB just to be sure. # Initialize data handler to write to: dh = snomtools.data.datasets.Data_Handler_H5( unit=str(self.data.datafields[0].units), shape=self.data.shape, chunk_cache_mem_size=use_cache_size) # Calculate driftcorrected data and write it to dh: if verbose: import time start_time = time.time() print(str(start_time)) print("Calculating {0} driftcorrected slices...".format( self.data.shape[self.dstackAxisID])) # Get full slice for all the data: full_selection = full_slice(np.s_[:], len(self.data.shape)) slicebase_wo_stackaxis = np.delete(full_selection, self.dstackAxisID) # Iterate over all elements along dstackAxis: for i in range(self.data.shape[self.dstackAxisID]): # Generate full slice of data to shift, by inserting i into slicebase: subset_slice = tuple( np.insert(slicebase_wo_stackaxis, self.dstackAxisID, i)) # Get shiftvector for the stack element i: shift = self.generate_shiftvector(i) if verbose: step_starttime = time.time() # Get the shifted data from the Data_Handler method: shifted_data = self.data.get_datafield(0).data.shift_slice( subset_slice, shift, order=self.interpolation_order) if verbose: print('interpolation done in {0:.2f} s'.format( time.time() - step_starttime)) step_starttime = time.time() # Write shifted data to corresponding place in dh: dh[subset_slice] = shifted_data if verbose: print('data written in {0:.2f} s'.format(time.time() - step_starttime)) tpf = ((time.time() - start_time) / float(i + 1)) etr = tpf * (self.data.shape[self.dstackAxisID] - i + 1) print( "Slice {0:d} / {1:d}, Time/slice {3:.2f}s ETR: {2:.1f}s" .format(i, self.data.shape[self.dstackAxisID], etr, tpf)) # Initialize DataArray with data from dh: newda = snomtools.data.datasets.DataArray( dh, label=oldda.label, plotlabel=oldda.plotlabel, h5target=dh.h5target) else: newda = snomtools.data.datasets.DataArray( self[:], label=oldda.label, plotlabel=oldda.plotlabel) # Put all the shifted data and old axes together to new DataSet: newds = snomtools.data.datasets.DataSet(self.data.label + " driftcorrected", (newda, ), self.data.axes, self.data.plotconf, h5target=h5target) return newds