def test_compare_backends_sparse(lt_ctx, default_empad, buffered_empad): roi = np.zeros(default_empad.shape.nav, dtype=bool).reshape((-1,)) roi[0] = True roi[1] = True roi[8] = True roi[-1] = True mm_f0 = lt_ctx.run_udf(dataset=default_empad, udf=PickUDF(), roi=roi)['intensity'] buffered_f0 = lt_ctx.run_udf(dataset=buffered_empad, udf=PickUDF(), roi=roi)['intensity'] assert np.allclose(mm_f0, buffered_f0)
async def test_fd_limit(async_executor): import resource import psutil # set soft limit, throws errors but allows to raise it # again afterwards: proc = psutil.Process() oldlimit = resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE, (proc.num_fds() + 24, oldlimit[1])) print("fds", proc.num_fds()) try: data = _mk_random(size=(1, 16, 16), dtype='<u2') dataset = MemoryDataSet(data=data, tileshape=(1, 16, 16), num_partitions=1) roi = np.ones((1, ), dtype=bool) udf = PickUDF() for i in range(32): print(i) print(proc.num_fds()) async for part in UDFRunner([udf]).run_for_dataset_async( dataset=dataset, executor=async_executor, cancel_id="42", roi=roi, ): pass finally: resource.setrlimit(resource.RLIMIT_NOFILE, oldlimit)
def test_reshape_nav(default_frms6, lt_ctx): udf = PickUDF() roi = np.zeros(default_frms6.shape.nav, dtype=bool) flat_roi = reshaped_view(roi, -1) flat_roi[:8] = True ref = lt_ctx.run_udf(dataset=default_frms6, udf=udf, roi=roi) ds_1 = lt_ctx.load("frms6", path=FRMS6_TESTDATA_PATH, nav_shape=(8, ), enable_offset_correction=True) result_1 = lt_ctx.run_udf(dataset=ds_1, udf=udf) shape_1 = lt_ctx.run_udf(dataset=ds_1, udf=SumSigUDF()) assert shape_1['intensity'].data.shape == (8, ) assert np.allclose(result_1['intensity'].raw_data, ref['intensity'].raw_data) ds_2 = lt_ctx.load("frms6", path=FRMS6_TESTDATA_PATH, nav_shape=(2, 2, 2), enable_offset_correction=True) result_2 = lt_ctx.run_udf(dataset=ds_2, udf=udf) shape_2 = lt_ctx.run_udf(dataset=ds_2, udf=SumSigUDF()) assert shape_2['intensity'].data.shape == (2, 2, 2) assert np.allclose(result_2['intensity'].raw_data, ref['intensity'].raw_data)
def _update_collection_index(axis, value): libertem_metadata = copy.deepcopy(src.metadata.get('libertem-io')) if not libertem_metadata: return file_parameters = libertem_metadata['file_parameters'] file_type = file_parameters.pop('type') current_index = libertem_metadata['display_slice']['start'] current_index = np.unravel_index(current_index, target.data.shape) if value == current_index[axis]: return executor = Registry.get_component('libertem_executor') if not executor: return executor = executor.ensure_sync() ds = dataset.load(file_type, executor, **file_parameters) roi = np.zeros(ds.shape.nav, dtype=bool) if axis == 0: roi[value, current_index[1]] = True current_index = (value, current_index[1]) else: roi[current_index[0], value] = True current_index = (current_index[0], value) result = UDFRunner(PickUDF()).run_for_dataset(ds, executor, roi=roi) result_array = np.squeeze(np.array(result['intensity'])) new_metadata = copy.deepcopy(src.metadata) new_display_slice = np.ravel_multi_index(current_index, target.data.shape) new_metadata['libertem-io']['display_slice'][ 'start'] = new_display_slice new_xdata = self.__api.create_data_and_metadata( result_array, metadata=new_metadata) src.set_data_and_metadata(new_xdata)
def test_reshape_nav(default_k2is, lt_ctx): udf = PickUDF() roi = np.zeros(default_k2is.shape.nav, dtype=bool) flat_roi = reshaped_view(roi, -1) flat_roi[:8] = True ref = lt_ctx.run_udf(dataset=default_k2is, udf=udf, roi=roi) ds_1 = lt_ctx.load( "k2is", path=K2IS_TESTDATA_PATH, nav_shape=(8, ), ) result_1 = lt_ctx.run_udf(dataset=ds_1, udf=udf) shape_1 = lt_ctx.run_udf(dataset=ds_1, udf=SumSigUDF()) assert shape_1['intensity'].data.shape == (8, ) assert np.allclose(result_1['intensity'].raw_data, ref['intensity'].raw_data) ds_2 = lt_ctx.load( "k2is", path=K2IS_TESTDATA_PATH, nav_shape=(2, 2, 2), ) result_2 = lt_ctx.run_udf(dataset=ds_2, udf=udf) shape_2 = lt_ctx.run_udf(dataset=ds_2, udf=SumSigUDF()) assert shape_2['intensity'].data.shape == (2, 2, 2) assert np.allclose(result_2['intensity'].raw_data, ref['intensity'].raw_data)
def dataset_correction_verification(ds, roi, lt_ctx, exclude=None): """ compare correct function w/ corrected pick """ for i in range(1): shape = (-1, *tuple(ds.shape.sig)) uncorr = CorrectionSet() data = lt_ctx.run_udf(udf=PickUDF(), dataset=ds, roi=roi, corrections=uncorr) gain = np.random.random(ds.shape.sig) + 1 dark = np.random.random(ds.shape.sig) - 0.5 if exclude is None: exclude = [(np.random.randint(0, s), np.random.randint(0, s)) for s in tuple(ds.shape.sig)] exclude_coo = sparse.COO(coords=exclude, data=True, shape=ds.shape.sig) corrset = CorrectionSet(dark=dark, gain=gain, excluded_pixels=exclude_coo) # This one uses native input data pick_res = lt_ctx.run_udf(udf=PickUDF(), dataset=ds, corrections=corrset, roi=roi) corrected = correct(buffer=data['intensity'].raw_data.reshape(shape), dark_image=dark, gain_map=gain, excluded_pixels=exclude, inplace=False) print("Exclude: ", exclude) print(pick_res['intensity'].raw_data.dtype) print(corrected.dtype) assert np.allclose(pick_res['intensity'].raw_data.reshape(shape), corrected)
def test_pick(lt_ctx): data = _mk_random(size=(16, 16, 16, 16), dtype="float32") dataset = MemoryDataSet(data=data, tileshape=(3, 7, 7), num_partitions=7, sig_dims=2) roi = np.random.choice([True, False], size=dataset.shape.nav) udf = PickUDF() res = lt_ctx.run_udf(dataset=dataset, udf=udf, roi=roi) assert np.allclose(data[roi], res['intensity'].data) assert data.dtype == res['intensity'].data.dtype
def dataset_correction_masks(ds, roi, lt_ctx, exclude=None): """ compare correction via sparse mask multiplication w/ correct function """ for i in range(1): shape = (-1, *tuple(ds.shape.sig)) uncorr = CorrectionSet() data = lt_ctx.run_udf(udf=PickUDF(), dataset=ds, roi=roi, corrections=uncorr) gain = np.random.random(ds.shape.sig) + 1 dark = np.random.random(ds.shape.sig) - 0.5 if exclude is None: exclude = [ (np.random.randint(0, s), np.random.randint(0, s)) for s in tuple(ds.shape.sig) ] exclude_coo = sparse.COO(coords=exclude, data=True, shape=ds.shape.sig) corrset = CorrectionSet(dark=dark, gain=gain, excluded_pixels=exclude_coo) def mask_factory(): s = tuple(ds.shape.sig) return sparse.eye(np.prod(s)).reshape((-1, *s)) # This one casts to float mask_res = lt_ctx.run_udf( udf=ApplyMasksUDF(mask_factory), dataset=ds, corrections=corrset, roi=roi, ) # This one uses native input data corrected = correct( buffer=data['intensity'].raw_data.reshape(shape), dark_image=dark, gain_map=gain, excluded_pixels=exclude, inplace=False ) print("Exclude: ", exclude) print(mask_res['intensity'].raw_data.dtype) print(corrected.dtype) assert np.allclose( mask_res['intensity'].raw_data.reshape(shape), corrected )
def test_pick_empty_roi(lt_ctx): data = _mk_random(size=(16, 16, 16, 16), dtype="float32") dataset = MemoryDataSet(data=data, tileshape=(3, 7, 7), num_partitions=7, sig_dims=2) roi = np.zeros(dataset.shape.nav, dtype=bool) udf = PickUDF() res = lt_ctx.run_udf(dataset=dataset, udf=udf, roi=roi) assert np.allclose(data[roi], res['intensity'].data) assert data[roi].shape == res['intensity'].data.shape assert data.dtype == res['intensity'].data.dtype
def test_positive_sync_offset(default_frms6, lt_ctx): udf = PickUDF() sync_offset = 2 roi = np.zeros(default_frms6.shape.nav, dtype=bool) flat_roi = reshaped_view(roi, -1) flat_roi[2:10] = True ref = lt_ctx.run_udf(dataset=default_frms6, udf=udf, roi=roi) ds = lt_ctx.load("frms6", path=FRMS6_TESTDATA_PATH, nav_shape=(4, 2), sync_offset=sync_offset, enable_offset_correction=True) result = lt_ctx.run_udf(dataset=ds, udf=udf) assert np.allclose(result['intensity'].raw_data, ref['intensity'].raw_data)
def read_data_and_metadata_from_stream(self, stream): executor = Registry.get_component('libertem_executor') if executor is None: logging.error( 'No libertem executor could be retrieved from the Registry.') return executor = executor.ensure_sync() file_parameters = dataset.detect(stream, executor=executor) file_type = file_parameters.pop('type', None) if file_type is None: file_type = 'raw' file_parameters = {'path': stream} file_params = dict() def params_callback(file_params_): file_params.update(file_params_) self.__api.queue_task( lambda: self.show_file_param_dialog(file_type, params_callback)) self.__show_file_param_dialog_finished_event.wait() self.__show_file_param_dialog_finished_event.clear() self.__file_param_dialog_closed_event.wait() file_params.pop('name', None) file_parameters.update(file_params) ds = dataset.load(file_type, executor, **file_parameters) roi = np.zeros(ds.shape.nav, dtype=bool) roi_flat = roi.ravel() roi_flat[0] = True result = UDFRunner(PickUDF()).run_for_dataset(ds, executor, roi=roi) result_array = np.squeeze(np.array(result['intensity'])) file_parameters['type'] = file_type metadata = { 'libertem-io': { 'file_parameters': file_parameters, 'display_slice': { 'start': 0, 'stop': 0 } } } return self.__api.create_data_and_metadata(result_array, metadata=metadata)
def test_negative_sync_offset(default_k2is, lt_ctx): udf = PickUDF() # native_sync_offset is 250 sync_offset = -2 roi_1 = np.zeros(default_k2is.shape.nav, dtype=bool) flat_roi_1 = reshaped_view(roi_1, -1) flat_roi_1[:8] = True ref = lt_ctx.run_udf(dataset=default_k2is, udf=udf, roi=roi_1) roi_2 = np.zeros(default_k2is.shape.nav, dtype=bool) flat_roi_2 = reshaped_view(roi_2, -1) flat_roi_2[252:260] = True ds = lt_ctx.load("k2is", path=K2IS_TESTDATA_PATH, sync_offset=sync_offset) result = lt_ctx.run_udf(dataset=ds, udf=udf, roi=roi_2) assert np.allclose(result['intensity'].raw_data, ref['intensity'].raw_data)
def test_positive_sync_offset_1(default_k2is, lt_ctx): udf = PickUDF() # native_sync_offset is 250 sync_offset = 252 roi = np.zeros(default_k2is.shape.nav, dtype=bool) flat_roi = reshaped_view(roi, -1) flat_roi[2:10] = True ref = lt_ctx.run_udf(dataset=default_k2is, udf=udf, roi=roi) ds = lt_ctx.load( "k2is", path=K2IS_TESTDATA_PATH, nav_shape=(4, 2), sync_offset=sync_offset, ) result = lt_ctx.run_udf(dataset=ds, udf=udf) assert np.allclose(result['intensity'].raw_data, ref['intensity'].raw_data)
def test_pick(lt_ctx, delayed_ctx): data = _mk_random(size=(16, 16, 16, 16), dtype="float32") # data = np.ones((16, 16, 16, 16), dtype="float32") # data = np.arange(0, 16*16*16*16, dtype="float32").reshape((16, 16, 16, 16)) dataset = MemoryDataSet(data=data, tileshape=(3, 7, 16), num_partitions=7, sig_dims=2) roi = np.random.choice([True, False], size=dataset.shape.nav) roi[0] = True udf = PickUDF() res = lt_ctx.run_udf(dataset=dataset, udf=udf, roi=roi) res_delayed = delayed_ctx.run_udf(dataset=dataset, udf=udf, roi=roi) assert np.allclose(data[roi], res['intensity'].data) assert np.allclose(data[roi], res_delayed['intensity'].data) assert data.dtype == res['intensity'].data.dtype assert data.dtype == res_delayed['intensity'].data.dtype
def prime_numba_cache(ds): dtypes = (np.float32, None) for dtype in dtypes: roi = np.zeros(ds.shape.nav, dtype=bool).reshape((-1, )) roi[max(-ds._meta.sync_offset, 0)] = True from libertem.udf.sum import SumUDF from libertem.udf.raw import PickUDF from libertem.io.corrections.corrset import CorrectionSet from libertem.io.dataset.base import Negotiator # need to have at least one UDF; here we run for both sum and pick # to reduce the initial latency when switching to pick mode udfs = [SumUDF(), PickUDF()] neg = Negotiator() for udf in udfs: for corr_dtype in (np.float32, None): if corr_dtype is not None: corrections = CorrectionSet( dark=np.zeros(ds.shape.sig, dtype=corr_dtype)) else: corrections = None found_first_tile = False for p in ds.get_partitions(): if found_first_tile: break p.set_corrections(corrections) tiling_scheme = neg.get_scheme( udfs=[udf], dataset=ds, approx_partition_shape=p.shape, read_dtype=dtype, roi=roi, corrections=corrections, ) for t in p.get_tiles(tiling_scheme=tiling_scheme, roi=roi): found_first_tile = True break
def test_negative_sync_offset(default_mrc, lt_ctx): # nav shape 4 udf = PickUDF() sync_offset = -2 roi = np.zeros(default_mrc.shape.nav, dtype=bool) flat_roi = reshaped_view(roi, -1) flat_roi[:2] = True ref = lt_ctx.run_udf(dataset=default_mrc, udf=udf, roi=roi) ds_with_offset = lt_ctx.load( "mrc", path=MRC_TESTDATA_PATH, nav_shape=(2, 2), sync_offset=sync_offset ) result_with_offset = lt_ctx.run_udf(dataset=ds_with_offset, udf=udf) shape = lt_ctx.run_udf(dataset=ds_with_offset, udf=SumSigUDF()) print(result_with_offset['intensity'].raw_data.shape) assert shape['intensity'].data.shape == (2, 2) assert np.allclose( result_with_offset['intensity'].raw_data[2:], ref['intensity'].raw_data )
def get_udf(self): return PickUDF()
def test_with_roi(default_frms6, lt_ctx): udf = PickUDF() roi = np.zeros(default_frms6.shape.nav, dtype=bool) roi[0, 0] = 1 res = lt_ctx.run_udf(udf=udf, dataset=default_frms6, roi=roi) assert np.array(res['intensity']).shape == (1, 264, 264)
def test_with_roi(default_mib, lt_ctx): udf = PickUDF() roi = np.zeros(default_mib.shape.nav, dtype=bool) roi[0] = 1 res = lt_ctx.run_udf(udf=udf, dataset=default_mib, roi=roi) np.array(res['intensity']).shape == (1, 256, 256)