def test_crop_disks_from_frame(): mask = blobfinder.RadialGradient({ 'radius': 2, 'padding': 0, }) peaks = [ [0, 0], [2, 2], [5, 5], ] frame = _mk_random(size=(6, 6), dtype="float32") crop_disks = list(blobfinder.crop_disks_from_frame(peaks, frame, mask)) # # how is the region around the peak cropped? like this (x denotes the peak position), # this is an example for radius 2, padding 0 -> crop_size = 4 # # --------- # | | | | | # |-|-|-|-| # | | | | | # |-|-|-|-| # | | |x| | # |-|-|-|-| # | | | | | # --------- # first peak: top-leftmost; only the bottom right part of the crop_buf should be filled: assert crop_disks[0][1] == (slice(2, 4), slice(2, 4)) # second peak: the whole crop area fits into the frame -> use full crop_buf assert crop_disks[1][1] == (slice(0, 4), slice(0, 4)) # third peak: bottom-rightmost; almost-symmetric to first case assert crop_disks[2][1] == (slice(0, 3), slice(0, 3))
def test_correlation_method_fullframe(lt_ctx, cls, dtype, kwargs): shape = np.array([128, 128]) zero = shape / 2 + np.random.uniform(-1, 1, size=2) a = np.array([34.3, 0.]) + np.random.uniform(-1, 1, size=2) b = np.array([0., 42.19]) + np.random.uniform(-1, 1, size=2) indices = np.mgrid[-2:3, -2:3] indices = np.concatenate(indices.T) radius = 8 data, indices, peaks = cbed_frame(*shape, zero, a, b, indices, radius) dataset = MemoryDataSet(data=data, tileshape=(1, *shape), num_partitions=1, sig_dims=2) template = m.radial_gradient(centerX=radius + 1, centerY=radius + 1, imageSizeX=2 * radius + 2, imageSizeY=2 * radius + 2, radius=radius) match_patterns = [ blobfinder.RadialGradient(radius=radius, search=radius * 1.5), blobfinder.BackgroundSubtraction(radius=radius, radius_outer=radius * 1.5, search=radius * 1.8), blobfinder.RadialGradientBackgroundSubtraction(radius=radius, radius_outer=radius * 1.5, search=radius * 1.8), blobfinder.UserTemplate(template=template, search=radius * 1.5) ] print("zero: ", zero) print("a: ", a) print("b: ", b) for match_pattern in match_patterns: print("refining using template %s" % type(match_pattern)) udf = cls(match_pattern=match_pattern, peaks=peaks.astype(dtype), **kwargs) res = lt_ctx.run_udf(dataset=dataset, udf=udf) print(peaks - res['refineds'].data[0]) # import matplotlib.pyplot as plt # fig, ax = plt.subplots() # plt.imshow(data[0]) # for p in np.flip(res['refineds'].data[0], axis=-1): # ax.add_artist(plt.Circle(p, radius, fill=False, color='y')) # plt.show() assert np.allclose(res['refineds'].data[0], peaks, atol=0.5)
def test_run_refine_affinematch(lt_ctx): for i in range(1): try: shape = np.array([128, 128]) zero = shape / 2 + np.random.uniform(-1, 1, size=2) a = np.array([27.17, 0.]) + np.random.uniform(-1, 1, size=2) b = np.array([0., 29.19]) + np.random.uniform(-1, 1, size=2) indices = np.mgrid[-2:3, -2:3] indices = np.concatenate(indices.T) radius = 10 data, indices, peaks = cbed_frame(*shape, zero, a, b, indices, radius) dataset = MemoryDataSet(data=data, tileshape=(1, *shape), num_partitions=1, sig_dims=2) matcher = grm.Matcher() match_pattern = blobfinder.RadialGradient(radius=radius) affine_indices = peaks - zero for j in range(5): zzero = zero + np.random.uniform(-1, 1, size=2) aa = np.array([1, 0]) + np.random.uniform(-0.05, 0.05, size=2) bb = np.array([0, 1]) + np.random.uniform(-0.05, 0.05, size=2) (res, real_indices) = blobfinder.run_refine( ctx=lt_ctx, dataset=dataset, zero=zzero, a=aa, b=bb, indices=affine_indices, matcher=matcher, match_pattern=match_pattern, match='affine') assert np.allclose(res['zero'].data[0], zero, atol=0.5) assert np.allclose(res['a'].data[0], [1, 0], atol=0.05) assert np.allclose(res['b'].data[0], [0, 1], atol=0.05) except Exception: print("zero = np.array([%s, %s])" % tuple(zero)) print("a = np.array([%s, %s])" % tuple(a)) print("b = np.array([%s, %s])" % tuple(b)) print("zzero = np.array([%s, %s])" % tuple(zzero)) print("aa = np.array([%s, %s])" % tuple(aa)) print("bb = np.array([%s, %s])" % tuple(bb)) raise
def test_run_refine_fastmatch(lt_ctx): shape = np.array([128, 128]) zero = shape / 2 + np.random.uniform(-1, 1, size=2) a = np.array([27.17, 0.]) + np.random.uniform(-1, 1, size=2) b = np.array([0., 29.19]) + np.random.uniform(-1, 1, size=2) indices = np.mgrid[-2:3, -2:3] indices = np.concatenate(indices.T) drop = np.random.choice([True, False], size=len(indices), p=[0.9, 0.1]) indices = indices[drop] radius = 10 data, indices, peaks = cbed_frame(*shape, zero, a, b, indices, radius) dataset = MemoryDataSet(data=data, tileshape=(1, *shape), num_partitions=1, sig_dims=2) matcher = grm.Matcher() template = m.radial_gradient(centerX=radius + 1, centerY=radius + 1, imageSizeX=2 * radius + 2, imageSizeY=2 * radius + 2, radius=radius) match_patterns = [ blobfinder.RadialGradient(radius=radius), blobfinder.Circular(radius=radius), blobfinder.BackgroundSubtraction(radius=radius), blobfinder.RadialGradientBackgroundSubtraction(radius=radius), blobfinder.UserTemplate(template=template) ] print("zero: ", zero) print("a: ", a) print("b: ", b) for match_pattern in match_patterns: print("refining using template %s" % type(match_pattern)) (res, real_indices) = blobfinder.run_refine( ctx=lt_ctx, dataset=dataset, zero=zero + np.random.uniform(-1, 1, size=2), a=a + np.random.uniform(-1, 1, size=2), b=b + np.random.uniform(-1, 1, size=2), matcher=matcher, match_pattern=match_pattern) print(peaks - grm.calc_coords(res['zero'].data[0], res['a'].data[0], res['b'].data[0], indices)) assert np.allclose(res['zero'].data[0], zero, atol=0.5) assert np.allclose(res['a'].data[0], a, atol=0.2) assert np.allclose(res['b'].data[0], b, atol=0.2)
def test_smoke(lt_ctx): """ just check if the analysis runs without throwing exceptions: """ data = _mk_random(size=(16 * 16, 16, 16), dtype="float32") dataset = MemoryDataSet(data=data, tileshape=(1, 16, 16), num_partitions=2, sig_dims=2) match_pattern = blobfinder.RadialGradient(radius=4) blobfinder.run_blobfinder(ctx=lt_ctx, dataset=dataset, num_peaks=1, match_pattern=match_pattern)
def test_crop_disks_from_frame(): match_pattern = blobfinder.RadialGradient(radius=2, search=2) crop_size = match_pattern.get_crop_size() peaks = [ [0, 0], [2, 2], [5, 5], ] frame = _mk_random(size=(6, 6), dtype="float32") crop_buf = np.zeros((len(peaks), 2 * crop_size, 2 * crop_size)) blobfinder.correlation.crop_disks_from_frame(peaks=np.array(peaks), frame=frame, crop_size=crop_size, out_crop_bufs=crop_buf) # # how is the region around the peak cropped? like this (x denotes the peak position), # this is an example for radius 2, padding 0 -> crop_size = 4 # # --------- # | | | | | # |-|-|-|-| # | | | | | # |-|-|-|-| # | | |x| | # |-|-|-|-| # | | | | | # --------- # first peak: top-leftmost; only the bottom right part of the crop_buf should be filled: assert np.all(crop_buf[0] == [ [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, frame[0, 0], frame[0, 1]], [0, 0, frame[1, 0], frame[1, 1]], ]) # second peak: the whole crop area fits into the frame -> use full crop_buf assert np.all(crop_buf[1] == frame[0:4, 0:4]) # third peak: bottom-rightmost; almost-symmetric to first case print(crop_buf[2]) assert np.all(crop_buf[2] == [ [frame[3, 3], frame[3, 4], frame[3, 5], 0], [frame[4, 3], frame[4, 4], frame[4, 5], 0], [frame[5, 3], frame[5, 4], frame[5, 5], 0], [0, 0, 0, 0], ])
def test_run_refine_blocktests(lt_ctx, cls): shape = np.array([128, 128]) zero = shape / 2 a = np.array([27.17, 0.]) b = np.array([0., 29.19]) indices = np.mgrid[-2:3, -2:3] indices = np.concatenate(indices.T) radius = 7 match_pattern = blobfinder.RadialGradient(radius=radius) crop_size = match_pattern.get_crop_size() data, indices, peaks = cbed_frame(*shape, zero=zero, a=a, b=b, indices=indices, radius=radius, margin=crop_size) dataset = MemoryDataSet(data=data, tileshape=(1, *shape), num_partitions=1, sig_dims=2) # The crop buffer is float32 # FIXME adapt as soon as UDFs have dtype support nbytes = (2 * crop_size)**2 * np.dtype(np.float32).itemsize for limit in (1, nbytes - 1, nbytes, nbytes + 1, (len(peaks) - 1) * nbytes - 1, (len(peaks) - 1) * nbytes, (len(peaks) - 1) * nbytes + 1, len(peaks) * nbytes - 1, len(peaks) * nbytes, len(peaks) * nbytes + 1, *np.random.randint( low=1, high=len(peaks) * nbytes + 3, size=5)): udf = cls(peaks=peaks, match_pattern=match_pattern, __limit=limit) res = lt_ctx.run_udf(udf=udf, dataset=dataset) print(limit) print(res['refineds'].data[0]) print(peaks) print(peaks - res['refineds'].data[0]) assert np.allclose(res['refineds'].data[0], peaks, atol=0.5)
def test_run_refine_fullframe(lt_ctx): shape = np.array([128, 128]) zero = shape / 2 + np.random.uniform(-1, 1, size=2) a = np.array([27.17, 0.]) + np.random.uniform(-1, 1, size=2) b = np.array([0., 29.19]) + np.random.uniform(-1, 1, size=2) indices = np.mgrid[-2:3, -2:3] indices = np.concatenate(indices.T) radius = 10 data, indices, peaks = cbed_frame(*shape, zero, a, b, indices, radius) dataset = MemoryDataSet(data=data, tileshape=(1, *shape), num_partitions=1, sig_dims=2) matcher = grm.Matcher() match_pattern = blobfinder.RadialGradient(radius=radius) print("zero: ", zero) print("a: ", a) print("b: ", b) (res, real_indices) = blobfinder.run_refine( ctx=lt_ctx, dataset=dataset, zero=zero + np.random.uniform(-0.5, 0.5, size=2), a=a + np.random.uniform(-0.5, 0.5, size=2), b=b + np.random.uniform(-0.5, 0.5, size=2), matcher=matcher, match_pattern=match_pattern, correlation='fullframe', ) print(peaks - grm.calc_coords(res['zero'].data[0], res['a'].data[0], res['b'].data[0], indices)) assert np.allclose(res['zero'].data[0], zero, atol=0.5) assert np.allclose(res['a'].data[0], a, atol=0.2) assert np.allclose(res['b'].data[0], b, atol=0.2)
def test_run_refine_affinematch(lt_ctx): shape = np.array([256, 256]) zero = shape / 2 + np.random.uniform(-1, 1, size=2) a = np.array([27.17, 0.]) + np.random.uniform(-1, 1, size=2) b = np.array([0., 29.19]) + np.random.uniform(-1, 1, size=2) indices = np.mgrid[-3:4, -3:4] indices = np.concatenate(indices.T) radius = 10 data, indices, peaks = cbed_frame(*shape, zero, a, b, indices, radius) dataset = MemoryDataSet(data=data, tileshape=(1, *shape), num_partitions=1, sig_dims=2) matcher = grm.Matcher() match_pattern = blobfinder.RadialGradient(radius=radius) affine_indices = peaks - zero print("zero: ", zero) print("a: ", a) print("b: ", b) (res, real_indices) = blobfinder.run_refine( ctx=lt_ctx, dataset=dataset, zero=zero + np.random.uniform(-1, 1, size=2), a=np.array([1, 0]) + np.random.uniform(-0.05, 0.05, size=2), b=np.array([0, 1]) + np.random.uniform(-0.05, 0.05, size=2), indices=affine_indices, matcher=matcher, match_pattern=match_pattern, match='affine') assert np.allclose(res['zero'].data[0], zero, atol=0.5) assert np.allclose(res['a'].data[0], [1, 0], atol=0.05) assert np.allclose(res['b'].data[0], [0, 1], atol=0.05)