def rendJitTriang(x, y, n, jsig, mcp, imageBounds, pixelSize): sizeX = int((imageBounds.x1 - imageBounds.x0) / pixelSize) sizeY = int((imageBounds.y1 - imageBounds.y0) / pixelSize) im = shmarray.zeros((sizeX, sizeY)) x = shmarray.create_copy(x) y = shmarray.create_copy(y) if type(jsig) == numpy.ndarray: jsig = shmarray.create_copy(jsig) nCPUs = multiprocessing.cpu_count() tasks = (n / nCPUs) * numpy.ones(nCPUs, 'i') tasks[:(n % nCPUs)] += 1 processes = [ multiprocessing.Process(target=rendJitTri, args=(im, x, y, jsig, mcp, imageBounds, pixelSize, nIt)) for nIt in tasks ] for p in processes: p.start() for p in processes: p.join() return im / n
def rendJitTriang2(x,y,n,jsig, mcp, imageBounds, pixelSize): sizeX = int((imageBounds.x1 - imageBounds.x0) / pixelSize) sizeY = int((imageBounds.y1 - imageBounds.y0) / pixelSize) if multiProc and not multiprocessing.current_process().daemon: im = shmarray.zeros((sizeX, sizeY)) im1 = shmarray.zeros((sizeX, sizeY)) x = shmarray.create_copy(x) y = shmarray.create_copy(y) if type(jsig) == np.ndarray: jsig = shmarray.create_copy(jsig) nCPUs = multiprocessing.cpu_count() tasks = int(n / nCPUs) * np.ones(nCPUs, 'i') tasks[:int(n%nCPUs)] += 1 processes = [multiprocessing.Process(target = rendJitTri2, args=(im, im1, x, y, jsig, mcp, imageBounds, pixelSize, nIt)) for nIt in tasks] for p in processes: p.start() for p in processes: p.join() else: im = np.zeros((sizeX, sizeY)) im1 = np.zeros((sizeX, sizeY)) rendJitTri2(im, im1, x, y, jsig, mcp, imageBounds, pixelSize, n) imn = im/(im1+1) #n return imn
def rendJitTet(x,y,z,n,jsig, jsigz, mcp, imageBounds, pixelSize, sliceSize=100): # FIXME - signature now differs from visHelpersMin #import gen3DTriangs sizeX = int((imageBounds.x1 - imageBounds.x0) / pixelSize) sizeY = int((imageBounds.y1 - imageBounds.y0) / pixelSize) sizeZ = int((imageBounds.z1 - imageBounds.z0) / sliceSize) # convert from [nm] to [pixels] x = (x - imageBounds.x0) / pixelSize y = (y - imageBounds.y0) / pixelSize z = (z - imageBounds.z0) / sliceSize jsig = jsig / pixelSize jsigz = jsigz / sliceSize if multiProc and not multiprocessing.current_process().daemon: im = shmarray.zeros((sizeX, sizeY, sizeZ), order='F') x = shmarray.create_copy(x) y = shmarray.create_copy(y) z = shmarray.create_copy(z) if type(jsig) == np.ndarray: jsig = shmarray.create_copy(jsig) if type(jsigz) == np.ndarray: jsigz = shmarray.create_copy(jsigz) nCPUs = multiprocessing.cpu_count() tasks = int(n / nCPUs) * np.ones(nCPUs, 'i') tasks[:int(n % nCPUs)] += 1 processes = [multiprocessing.Process(target = rendJTet, args=(im, y, x,z, jsig, jsigz, mcp, nIt)) for nIt in tasks] for p in processes: p.start() for p in processes: p.join() return im/n else: im = np.zeros((sizeX, sizeY, sizeZ), order='F') rendJTet(im, y, x, z, jsig, jsigz, mcp, n) return im/n
def rendJitTriang(x, y, n, jsig, mcp, imageBounds, pixelSize, seeds=None, geometric_mean=True, mdh=None): """ Parameters ---------- x : ndarray x positions [nm] y : ndarray y positions [nm] n : number of jittered renderings to average into final rendering jsig : ndarray (or scalar float) standard deviations [nm] of normal distributions to sample when jittering for each point mcp : float Monte Carlo sampling probability (0, 1] imageBounds : PYME.IO.ImageBounds ImageBounds instance - range in each dimension should ideally be an integer multiple of pixelSize. pixelSize : float size of pixels to be rendered [nm] seeds : ndarray [optional] supplied seeds if we want to strictly reconstruct a previously generated image geometric_mean : bool [optional] Flag to scale intensity by geometric mean (True) or [localizations / um^2] (False) mdh: PYME.IO.MetaDataHandler.MDHandlerBase or subclass [optional] metadata handler to store seeds to Returns ------- im : ndarray 2D Jittered Triangulation rendering. Notes ----- Triangles which reach outside of the image bounds are dropped and not included in the rendering. """ sizeX = int((imageBounds.x1 - imageBounds.x0) / pixelSize) sizeY = int((imageBounds.y1 - imageBounds.y0) / pixelSize) if geometric_mean: fcn = _rend_jit_tri_geometric else: fcn = rendJitTri if multiProc and not multiprocessing.current_process().daemon: im = shmarray.zeros((sizeX, sizeY)) x = shmarray.create_copy(x) y = shmarray.create_copy(y) if type(jsig) == numpy.ndarray: jsig = shmarray.create_copy(jsig) # We will generate 1 process for each seed, defaulting to generating a seed for each CPU core if seeds are not # passed explicitly. Rendering with explicitly passed seeds will be deterministic, but performance will not be # optimal unless n_seeds = n_CPUs seeds = _generate_subprocess_seeds(multiprocessing.cpu_count(), mdh, seeds) iterations = _iterations_per_task(n, len(seeds)) processes = [ multiprocessing.Process(target=fcn, args=(im, x, y, jsig, mcp, imageBounds, pixelSize, nIt, s)) for nIt, s in zip(iterations, seeds) ] for p in processes: p.start() for p in processes: p.join() else: im = numpy.zeros((sizeX, sizeY)) # Technically we could just call fcn( ....,n), but we replicate the logic above and divide into groups of tasks # so that we can reproduce a previously generated image seeds = _generate_subprocess_seeds(1, mdh, seeds) iterations = _iterations_per_task(n, len(seeds)) for nIt, s in zip(iterations, seeds): # NB - in normal usage, this loop only evaluates once, with nIt=n fcn(im, x, y, jsig, mcp, imageBounds, pixelSize, nIt, seed=s) if geometric_mean: return (1.e6 / (im / n + 1)) * (im > n) else: return im / n