def test_tileshape_adjustment_7(): sig_shape = (123, 456) tile_shape = (14, 42) base_shape = (7, 1) excluded_coords = np.array([(14, ), (42, )]) excluded_pixels = sparse.COO(coords=excluded_coords, shape=sig_shape, data=True) corr = CorrectionSet(excluded_pixels=excluded_pixels) adjusted = corr.adjust_tileshape(tile_shape=tile_shape, sig_shape=sig_shape, base_shape=base_shape) assert adjusted == (21, 41) _validate(excluded_coords=excluded_coords, adjusted=adjusted, sig_shape=sig_shape)
def test_tileshape_adjustment_10(): sig_shape = (122, 455) tile_shape = (8, 1) base_shape = (2, 1) excluded_coords = np.array([(121, ), (454, )]) excluded_pixels = sparse.COO(coords=excluded_coords, shape=sig_shape, data=True) corr = CorrectionSet(excluded_pixels=excluded_pixels) adjusted = corr.adjust_tileshape(tile_shape=tile_shape, sig_shape=sig_shape, base_shape=base_shape) assert adjusted == (8, 3) _validate(excluded_coords=excluded_coords, adjusted=adjusted, sig_shape=sig_shape)
def test_tileshape_adjustment_6_3(): sig_shape = (123, 456) tile_shape = (1, 1) base_shape = (1, 1) excluded_coords = np.array([range(123), range(0, 246, 2)]) excluded_pixels = sparse.COO(coords=excluded_coords, shape=sig_shape, data=True) corr = CorrectionSet(excluded_pixels=excluded_pixels) adjusted = corr.adjust_tileshape(tile_shape=tile_shape, sig_shape=sig_shape, base_shape=base_shape) assert adjusted == (123, 256) _validate(excluded_coords=excluded_coords, adjusted=adjusted, sig_shape=sig_shape)
def test_patch_pixels_only_excluded_pixels(lt_ctx, default_raw, default_raw_data): udf = SumUDF() excluded_pixels = sparse.COO(np.zeros((128, 128))) corr = CorrectionSet(excluded_pixels=excluded_pixels) res = lt_ctx.run_udf(dataset=default_raw, udf=udf, corrections=corr) assert np.allclose(res['intensity'], np.sum(default_raw_data, axis=(0, 1)))
def test_correction_size_overflow(): data = _mk_random(size=(32, 1860, 2048)) dataset = MemoryDataSet( data=data, num_partitions=1, sig_dims=2, base_shape=(1, 930, 16), force_need_decode=True, ) neg = Negotiator() p = next(dataset.get_partitions()) udf = UDFWithLargeDepth() excluded_coords = np.array([(930, ), (16, )]) excluded_pixels = sparse.COO(coords=excluded_coords, shape=dataset.shape.sig, data=True) corr = CorrectionSet(excluded_pixels=excluded_pixels) scheme = neg.get_scheme( udfs=[udf], approx_partition_shape=p.shape, dataset=dataset, read_dtype=np.float32, roi=None, corrections=corr, ) print(scheme._debug) assert scheme._debug["need_decode"] assert scheme.shape.sig.dims == 2 assert tuple(scheme.shape) == (4, 1860, 32)
def test_real_correction(self, shared_dist_ctx, large_raw_file, benchmark, gain, dark, num_excluded): filename, shape, dtype = large_raw_file nav_dims = shape[:2] sig_dims = shape[2:] if gain == 'use gain': gain_map = (np.random.random(sig_dims) + 1).astype(np.float64) elif gain == 'no gain': gain_map = None else: raise ValueError if dark == 'use dark': dark_image = np.random.random(sig_dims).astype(np.float64) elif dark == 'no dark': dark_image = None else: raise ValueError if num_excluded > 0: excluded_coords = exclude_pixels(sig_dims=sig_dims, num_excluded=num_excluded) assert excluded_coords.shape[1] == num_excluded exclude = sparse.COO(coords=excluded_coords, shape=sig_dims, data=True) else: exclude = None print("Nav dims: ", nav_dims) print("Sig dims:", sig_dims) corrset = CorrectionSet( dark=dark_image, gain=gain_map, excluded_pixels=exclude, ) udf = NoOpUDF() ds = shared_dist_ctx.load( 'RAW', path=str(filename), scan_size=shape[:2], dtype=dtype, detector_size=shape[2:], ) benchmark.pedantic( shared_dist_ctx.run_udf, kwargs=dict( dataset=ds, udf=udf, corrections=corrset, ), warmup_rounds=0, rounds=5, iterations=1, )
def test_tileshape_adjustment_6_1(): sig_shape = (123, 456) tile_shape = (122, 1) base_shape = (1, 1) excluded_coords = np.array([range(123), np.zeros(123, dtype=int)]) excluded_pixels = sparse.COO(coords=excluded_coords, shape=sig_shape, data=True) corr = CorrectionSet(excluded_pixels=excluded_pixels) adjusted = corr.adjust_tileshape(tile_shape=tile_shape, sig_shape=sig_shape, base_shape=base_shape) print(adjusted) assert adjusted == (123, 2) _validate(excluded_coords=excluded_coords, adjusted=adjusted, sig_shape=sig_shape)
def test_correction_set_dark_one(lt_ctx, default_raw, default_raw_data, gain, dark): udf = SumUDF() corr = CorrectionSet(dark=dark, gain=gain) res = lt_ctx.run_udf(dataset=default_raw, udf=udf, corrections=corr) assert np.allclose(res['intensity'], np.sum(default_raw_data - 1, axis=(0, 1)))
def test_patch_pixels(lt_ctx, default_raw, default_raw_data): udf = SumUDF() # test with empty excluded_pixels array corr = CorrectionSet(excluded_pixels=np.array([(), ()]).astype(np.int64), gain=np.ones((128, 128))) res = lt_ctx.run_udf(dataset=default_raw, udf=udf, corrections=corr) assert np.allclose(res['intensity'], np.sum(default_raw_data, axis=(0, 1)))
def test_tileshape_adjustment_8(): sig_shape = (1014, 1024) tile_shape = (1, 1) base_shape = (1, 1) # These magic numbers are "worst case" to produce collisions # 2*3*4*5*6*7 excluded_coords = np.array([(720, 210, 306), (120, 210, 210)]) excluded_pixels = sparse.COO(coords=excluded_coords, shape=sig_shape, data=True) corr = CorrectionSet(excluded_pixels=excluded_pixels) adjusted = corr.adjust_tileshape(tile_shape=tile_shape, sig_shape=sig_shape, base_shape=base_shape) print(adjusted) assert adjusted != (1014, 1024) _validate(excluded_coords=excluded_coords, adjusted=adjusted, sig_shape=sig_shape)
def test_comparison_roi(default_seq, default_seq_raw, lt_ctx_fast): corrset = CorrectionSet() roi = np.random.choice([True, False], size=tuple(default_seq.shape.nav), p=[0.5, 0.5]) udf = ValidationUDF(reference=default_seq_raw[roi]) lt_ctx_fast.run_udf(udf=udf, dataset=default_seq, roi=roi, corrections=corrset)
def test_tileshape_adjustment_many(large_raw, lt_ctx): udf = EarlyExitUDF() exclude = sparse.COO(coords=exclude_pixels(sig_dims=tuple( large_raw.shape.sig), num_excluded=1000), shape=tuple(large_raw.shape.sig), data=True) corr = CorrectionSet(excluded_pixels=exclude) with pytest.raises(EarlyExit): lt_ctx.run_udf(dataset=large_raw, udf=udf, corrections=corr)
def test_patch_corr_empty(lt_ctx): data = np.ones((13, 17, 19)) excluded_coords = np.array([ (1, 2, 3), ]).astype(np.int64) excluded_pixels = sparse.COO(coords=excluded_coords, shape=(19, ), data=True) ds = lt_ctx.load("memory", data=data, sig_dims=1) udf = SumUDF() with pytest.raises(RepairValueError): corr = CorrectionSet(excluded_pixels=excluded_pixels, gain=np.ones((19, )), dark=np.ones((19, ))) corr = CorrectionSet(excluded_pixels=excluded_pixels, gain=np.ones((19, )), dark=np.ones((19, )), allow_empty=True) res = lt_ctx.run_udf(dataset=ds, udf=udf, corrections=corr) # The value will be unpatched and remain 0 after gain and dark correction are applied assert np.allclose(res['intensity'], 0)
def test_tileshape_adjustment_bench(benchmark, base_shape, excluded_coords): sig_shape = (1024, 1024) tile_shape = base_shape excluded_pixels = sparse.COO(coords=excluded_coords, shape=sig_shape, data=True) corr = CorrectionSet(excluded_pixels=excluded_pixels) adjusted = benchmark(corr.adjust_tileshape, tile_shape=tile_shape, sig_shape=sig_shape, base_shape=base_shape) print("Base shape", base_shape) print("Excluded coords", excluded_coords) print("Adjusted", adjusted)
def test_tileshape_adjustment_fuzz(): for n in range(10): sig_shape = (np.random.randint(1, 2**12), np.random.randint(1, 2**12)) print("Sig shape", sig_shape) tile_shape = (1, 1) base_shape = (1, 1) size = max(1, max(sig_shape) // 10) excluded_coords = np.vstack([ np.random.randint(0, sig_shape[0], size=size), np.random.randint(0, sig_shape[1], size=size), ]) print("excluded_coords", excluded_coords.shape, excluded_coords) excluded_pixels = sparse.COO(coords=excluded_coords, shape=sig_shape, data=True) corr = CorrectionSet(excluded_pixels=excluded_pixels, allow_empty=True) adjusted = corr.adjust_tileshape(tile_shape=tile_shape, sig_shape=sig_shape, base_shape=base_shape) print(adjusted) _validate(excluded_coords=excluded_coords, adjusted=adjusted, sig_shape=sig_shape)
def test_patch_corr_odd(lt_ctx_fast): data = np.ones((13, 17, 19, 23, 29, 31)) excluded_coords = np.array([(2, 5), (2, 5), (2, 5)]).astype(np.int64) excluded_pixels = sparse.COO(coords=excluded_coords, shape=(23, 29, 31), data=True) ds = lt_ctx_fast.load("memory", data=data, sig_dims=3) udf = SumUDF() corr = CorrectionSet(excluded_pixels=excluded_pixels, gain=np.ones((23, 29, 31)), dark=np.ones((23, 29, 31))) res = lt_ctx_fast.run_udf(dataset=ds, udf=udf, corrections=corr) assert np.allclose(res['intensity'], 0)
def __init__( self, meta: DataSetMeta, partition_slice: Slice, fileset: FileSet, start_frame: int, num_frames: int, io_backend: IOBackend, decoder: Optional[Decoder] = None, ): super().__init__( meta=meta, partition_slice=partition_slice, io_backend=io_backend, decoder=decoder, ) if start_frame < self.meta.image_count: self._fileset = fileset.get_for_range( max(0, start_frame), max(0, start_frame + num_frames - 1)) self._start_frame = start_frame self._num_frames = num_frames self._corrections = CorrectionSet() if num_frames <= 0: raise ValueError("invalid number of frames: %d" % num_frames)
def get_correction_data(self): return CorrectionSet( dark=self._dark_frame, gain=self._gain_map, )
def test_comparison(default_seq, default_seq_raw, lt_ctx_fast): corrset = CorrectionSet() udf = ValidationUDF(reference=reshaped_view(default_seq_raw, ( -1, *tuple(default_seq.shape.sig)))) lt_ctx_fast.run_udf(udf=udf, dataset=default_seq, corrections=corrset)
def get_correction_data(self): return CorrectionSet( dark=self._dark, gain=self._gain, excluded_pixels=self._excluded_pixels, )
def test_correction_set_zero_gain(lt_ctx, default_raw, gain, dark): udf = SumUDF() corr = CorrectionSet(dark=dark, gain=gain) res = lt_ctx.run_udf(dataset=default_raw, udf=udf, corrections=corr) assert np.allclose(res['intensity'], 0)