def setup_method(self, method): # Create three signals with dimensions: # s1 : <BaseSignal, title: , dimensions: (4, 3, 2|2, 3)> # s2 : <BaseSignal, title: , dimensions: (2, 3|4, 3, 2)> # s12 : <BaseSignal, title: , dimensions: (2, 3|4, 3, 2)> # Where s12 data is transposed in respect to s2 dc1 = np.random.random((2, 3, 4, 3, 2)) dc2 = np.rollaxis(np.rollaxis(dc1, -1), -1) s1 = signals.BaseSignal(dc1.copy()) s2 = signals.BaseSignal(dc2) s12 = signals.BaseSignal(dc1.copy()) for i, axis in enumerate(s1.axes_manager._axes): if i < 3: axis.navigate = True else: axis.navigate = False for i, axis in enumerate(s2.axes_manager._axes): if i < 2: axis.navigate = True else: axis.navigate = False for i, axis in enumerate(s12.axes_manager._axes): if i < 3: axis.navigate = False else: axis.navigate = True self.s1 = s1 self.s2 = s2 self.s12 = s12
def test_error_axes(): rng = np.random.RandomState(123) s = signals.BaseSignal(rng.random((20, 100))) with pytest.raises(AttributeError, match="not possible to decompose a dataset"): s.decomposition()
def setup_method(self, method): s = signals.BaseSignal(np.zeros(1)) self.factors = np.ones([2, 3]) self.loadings = np.ones([2, 3]) s.learning_results.factors = self.factors.copy() s.learning_results.loadings = self.loadings.copy() self.s = s
def _create_signal(shape, dim, dtype, metadata): data = np.arange(np.product(shape)).reshape(shape).astype(dtype) if dim == 1: if len(shape) > 2: s = signals.EELSSpectrum(data) if metadata: s.set_microscope_parameters(beam_energy=100., convergence_angle=1., collection_angle=10.) else: s = signals.EDSTEMSpectrum(data) if metadata: s.set_microscope_parameters(beam_energy=100., live_time=1., tilt_stage=2., azimuth_angle=3., elevation_angle=4., energy_resolution_MnKa=5.) else: s = signals.BaseSignal(data).transpose(signal_axes=dim) if metadata: s.metadata.General.date = "2016-08-06" s.metadata.General.time = "10:55:00" s.metadata.General.title = "Test title" for i, axis in enumerate(s.axes_manager._axes): i += 1 axis.offset = i * 0.5 axis.scale = i * 100 axis.name = "%i" % i if axis.navigate: axis.units = "m" else: axis.units = "eV" return s
def _read_signal_from_group(self, name, group, load_to_memory=True): self._log.debug('Calling _read_signal_from_group') from hyperspy import signals # Extract essential data: data = group.get('data') if load_to_memory: data = np.asanyarray(data) # EMD does not have a standard way to describe the signal axis. # Therefore we return a BaseSignal signal = signals.BaseSignal(data) # Set signal properties: signal.set_signal_origin = group.attrs.get('signal_origin', '') signal.set_signal_type = group.attrs.get('signal_type', '') # Iterate over all dimensions: for i in range(len(data.shape)): dim = group.get('dim{}'.format(i + 1)) axis = signal.axes_manager._axes[i] axis.name = dim.attrs.get('name', '') units = re.findall('[^_\W]+', dim.attrs.get('units', '')) axis.units = ''.join(units) try: axis.scale = dim[1] - dim[0] axis.offset = dim[0] # Hyperspy then uses defaults (1.0 and 0.0)! except (IndexError, TypeError) as e: self._log.warning( 'Could not calculate scale/offset of axis {}: {}'.format( i, e)) # Extract metadata: metadata = {} for key, value in group.attrs.items(): metadata[key] = value # Add signal: self.add_signal(signal, name, metadata)
def setup_method(self, method): self.s = signals.BaseSignal(np.random.rand(1, 2, 3, 4, 5, 6)) for ax, name in zip(self.s.axes_manager._axes, 'abcdef'): ax.name = name # just to make sure in case default changes self.s.axes_manager.set_signal_dimension(6) self.s.estimate_poissonian_noise_variance()
def setUp(self): s = signals.BaseSignal(np.zeros(1)) self.factors = np.ones([2, 3]) self.loadings = np.ones([2, 3]) s.learning_results.factors = self.factors.copy() s.learning_results.loadings = self.loadings.copy() s.learning_results.bss_factors = self.factors.copy() s.learning_results.bss_loadings = self.loadings.copy() self.s = s
def test_ragged_slicing(slice_): array_ragged = np.empty((2, 3), dtype=object) for iy, ix in np.ndindex(array_ragged.shape): array_ragged[iy, ix] = np.random.randint(0, 20, size=np.random.randint(2, 10)) s = signals.BaseSignal(array_ragged, ragged=True) s_lazy = s.as_lazy() np.testing.assert_allclose(s.inav[slice_].data[0], s_lazy.inav[slice_].data[0])
def test_broadcast_in_place(self): s1 = self.s1 s1.axes_manager.set_signal_dimension(1) # (3|2) s2 = signals.BaseSignal(np.ones((4, 2, 4, 3))) s2c = s2 s2.axes_manager.set_signal_dimension(2) # (3, 4| 2, 4) print(s2) print(s1) s2 += s1 assert_array_equal(s2.data, 2 * np.ones((4, 2, 4, 3))) nt.assert_is(s2, s2c)
def test_broadcast_in_place(self): s1 = self.s1 s1 = s1.transpose(signal_axes=1) s2 = signals.BaseSignal(np.ones((4, 2, 4, 3))) s2 = s2.transpose(signal_axes=2) s2c = s2 print(s2) print(s1) s2 += s1 assert_array_equal(s2.data, 2 * np.ones((4, 2, 4, 3))) nt.assert_is(s2, s2c)
def _read_signal_from_group(self, name, group, lazy=False): self._log.debug('Calling _read_signal_from_group') from hyperspy import signals # Extract essential data: data = group.get('data') if lazy: data = da.from_array(data, chunks=data.chunks) else: data = np.asanyarray(data) # EMD does not have a standard way to describe the signal axis. # Therefore we return a BaseSignal signal = signals.BaseSignal(data) # Set signal properties: signal.set_signal_origin = group.attrs.get('signal_origin', '') signal.set_signal_type = group.attrs.get('signal_type', '') # Iterate over all dimensions: for i in range(len(data.shape)): dim = group.get('dim{}'.format(i + 1)) axis = signal.axes_manager._axes[i] axis_name = dim.attrs.get('name', '') if isinstance(axis_name, bytes): axis_name = axis_name.decode('utf-8') axis.name = axis_name axis_units = dim.attrs.get('units', '') if isinstance(axis_units, bytes): axis_units = axis_units.decode('utf-8') units = re.findall(r'[^_\W]+', axis_units) axis.units = ''.join(units) try: if len(dim) == 1: axis.scale = 1. self._log.warning( 'Could not calculate scale of axis {}. ' 'Setting scale to 1'.format(i)) else: axis.scale = dim[1] - dim[0] axis.offset = dim[0] # HyperSpy then uses defaults (1.0 and 0.0)! except (IndexError, TypeError) as e: self._log.warning( 'Could not calculate scale/offset of ' 'axis {}: {}'.format(i, e)) # Extract metadata: metadata = {} for key, value in group.attrs.items(): metadata[key] = value if signal.data.dtype == np.object: self._log.warning('HyperSpy could not load the data in {}, ' 'skipping it'.format(name)) else: # Add signal: self.add_signal(signal, name, metadata)
def test_broadcast_in_place(self): if self.s1._lazy: pytest.skip("Inplace not supported by LazySignals") s1 = self.s1 s1 = s1.transpose(signal_axes=1) s2 = signals.BaseSignal(np.ones((4, 2, 4, 3))) s2 = s2.transpose(signal_axes=2) s2c = s2 print(s2) print(s1) s2 += s1 assert_array_equal(s2.data, 2 * np.ones((4, 2, 4, 3))) assert s2 is s2c
def setup_method(self, method): s = signals.BaseSignal(np.empty(1)) s.learning_results.explained_variance_ratio = np.asarray([ 10e-1, 5e-2, 9e-3, 1e-3, 9e-5, 5e-5, 3.0e-5, 2.2e-5, 1.9e-5, 1.8e-5, 1.7e-5, 1.6e-5, ]) self.s = s
def setup_method(self, method): self.signal = signals.BaseSignal(np.arange(10)) self.signal.axes_manager.set_signal_dimension(1) self.signal.axes_manager[0].scale = 0.5 self.signal.axes_manager[0].offset = 0.25 self.data = self.signal.data.copy()
def setup_method(self, method): self.signal = signals.BaseSignal(np.arange(24).reshape((2, 3, 4))) self.data = self.signal.data.copy() self.signal.axes_manager._axes[0].navigate = False self.signal.axes_manager._axes[1].navigate = True self.signal.axes_manager._axes[2].navigate = False
def denoised_data_to_signal(self): signal = signals.BaseSignal(self.Y) if self.signal_type == "spectrum": return signal.as_signal1D(2) if self.signal_type == "image": return signal.as_signal2D((1,2))
def setUp(self): s = signals.BaseSignal(np.empty(1)) self.s = s
def setup_method(self, method): s = signals.BaseSignal(np.empty(1)) self.s = s
def setUp(self): self.signal = signals.BaseSignal(np.arange(24).reshape((2, 3, 4))) self.data = self.signal.data.copy() self.signal.axes_manager.set_signal_dimension(0)
def test_error_axes(): s = signals.BaseSignal(generate_low_rank_matrix()) with pytest.raises(AttributeError, match="not possible to decompose a dataset"): s.decomposition()
def test_lazy_to_device(): s = signals.BaseSignal(np.arange(10)).as_lazy() with pytest.raises(BaseException): s.to_device()
def setup_method(self, method): self.signal = signals.BaseSignal(np.arange(24).reshape((2, 3, 4))).T self.data = self.signal.data.copy()
def setup_method(self, method): self.signal = signals.BaseSignal( np.arange(2**5).reshape((2, 2, 2, 2, 2))) self.signal.axes_manager.set_signal_dimension(1) self.data = self.signal.data.copy()
def test_squeeze(shape): s = signals.BaseSignal(np.random.random(shape)) s2 = s.transpose(signal_axes=[0, 1, 2]) s3 = s2.squeeze() assert s2.data.squeeze().shape == s3.data.shape assert s3.axes_manager.shape == (8, 6, 2, 7)
def test_to_host(): data = np.arange(10) s = signals.BaseSignal(data) s.to_host() s.data is data
def estimate_parameters(self, signal, x1, x2, only_current=False, out=False): """Estimate the parameters by the two area method Parameters ---------- signal : Signal1D instance x1 : float Defines the left limit of the spectral range to use for the estimation. x2 : float Defines the right limit of the spectral range to use for the estimation. only_current : bool If False, estimates the parameters for the full dataset. out : bool If True, returns the result arrays directly without storing in the parameter maps/values. The returned order is (A, r). Returns ------- {bool, tuple of values} """ super(PowerLaw, self)._estimate_parameters(signal) axis = signal.axes_manager.signal_axes[0] i1, i2 = axis.value_range_to_indices(x1, x2) if not (i2 + i1) % 2 == 0: i2 -= 1 if i2 == i1: i2 += 2 i3 = (i2 + i1) // 2 x1 = axis.index2value(i1) x2 = axis.index2value(i2) x3 = axis.index2value(i3) if only_current is True: s = signal.get_current_signal() else: s = signal if s._lazy: import dask.array as da log = da.log I1 = s.isig[i1:i3].integrate1D(2j).data I2 = s.isig[i3:i2].integrate1D(2j).data else: shape = s.data.shape[:-1] I1_s = signals.BaseSignal(np.empty(shape, dtype='float')) I2_s = signals.BaseSignal(np.empty(shape, dtype='float')) # Use the `out` parameters to avoid doing the deepcopy s.isig[i1:i3].integrate1D(2j, out=I1_s) s.isig[i3:i2].integrate1D(2j, out=I2_s) I1 = I1_s.data I2 = I2_s.data log = np.log with np.errstate(divide='raise'): try: r = 2 * log(I1 / I2) / log(x2 / x1) k = 1 - r A = k * I2 / (x2**k - x3**k) if s._lazy: r = r.map_blocks(np.nan_to_num) A = A.map_blocks(np.nan_to_num) else: r = np.nan_to_num(r) A = np.nan_to_num(A) except (RuntimeWarning, FloatingPointError): _logger.warning('Power law paramaters estimation failed ' 'because of a "divide by zero" error.') return False if only_current is True: self.r.value = r self.A.value = A return True if out: return A, r else: if self.A.map is None: self._create_arrays() self.A.map['values'][:] = A self.A.map['is_set'][:] = True self.r.map['values'][:] = r self.r.map['is_set'][:] = True self.fetch_stored_values() return True