Esempio n. 1
0
def rendJitTriang2(x,y,n,jsig, mcp, imageBounds, pixelSize):
    sizeX = int((imageBounds.x1 - imageBounds.x0) / pixelSize)
    sizeY = int((imageBounds.y1 - imageBounds.y0) / pixelSize)
    
    if multiProc and not multiprocessing.current_process().daemon:
        im = shmarray.zeros((sizeX, sizeY))
        im1 = shmarray.zeros((sizeX, sizeY))

        x = shmarray.create_copy(x)
        y = shmarray.create_copy(y)
        if type(jsig) == np.ndarray:
            jsig = shmarray.create_copy(jsig)


        nCPUs = multiprocessing.cpu_count()

        tasks = int(n / nCPUs) * np.ones(nCPUs, 'i')
        tasks[:int(n%nCPUs)] += 1

        processes = [multiprocessing.Process(target = rendJitTri2, args=(im, im1, x, y, jsig, mcp, imageBounds, pixelSize, nIt)) for nIt in tasks]

        for p in processes:
            p.start()

        for p in processes:
            p.join()

    else:
        im = np.zeros((sizeX, sizeY))
        im1 = np.zeros((sizeX, sizeY))

        rendJitTri2(im, im1, x, y, jsig, mcp, imageBounds, pixelSize, n)

    imn =  im/(im1+1) #n
    return imn
    def rendJitTriang(x, y, n, jsig, mcp, imageBounds, pixelSize):
        sizeX = int((imageBounds.x1 - imageBounds.x0) / pixelSize)
        sizeY = int((imageBounds.y1 - imageBounds.y0) / pixelSize)

        im = shmarray.zeros((sizeX, sizeY))

        x = shmarray.create_copy(x)
        y = shmarray.create_copy(y)
        if type(jsig) == numpy.ndarray:
            jsig = shmarray.create_copy(jsig)

        nCPUs = multiprocessing.cpu_count()

        tasks = (n / nCPUs) * numpy.ones(nCPUs, 'i')
        tasks[:(n % nCPUs)] += 1

        processes = [
            multiprocessing.Process(target=rendJitTri,
                                    args=(im, x, y, jsig, mcp, imageBounds,
                                          pixelSize, nIt)) for nIt in tasks
        ]

        for p in processes:
            p.start()

        for p in processes:
            p.join()

        return im / n
Esempio n. 3
0
def fit_quad_surfaces_P(data, radius, fitPos=False, NFits=0):
    from scipy.spatial import cKDTree
    #kdt = kdtree.KDTree(pts)
    kdt = cKDTree(data)

    if NFits == 0:
        NFits = data.shape[0]

    #surfs = []

    #def _task(i):
    #    return fitPtA(kdt, pts, i)

    nCPUs = multiprocessing.cpu_count()
    #nCPUs = 1

    #if fitPos:
    nParams = 8
    res = shmarray.zeros((nParams, NFits))
    pos = shmarray.zeros((3, NFits))
    nPs = shmarray.zeros(NFits)
    #rt = shmarray.zeros((2,NFits))

    fnums = range(NFits)

    processes = [
        multiprocessing.Process(target=fit_quad_surfaces_t,
                                args=(data, kdt, fnums[i::nCPUs], res, pos,
                                      nPs)) for i in range(nCPUs)
    ]

    for p in processes:
        print(p)
        p.start()

    for p in processes:
        print(p)
        p.join()

    return res, pos, nPs
Esempio n. 4
0
def rendJitTet(x,y,z,n,jsig, jsigz, mcp, imageBounds, pixelSize, sliceSize=100):
    # FIXME - signature now differs from visHelpersMin
    
    #import gen3DTriangs
    sizeX = int((imageBounds.x1 - imageBounds.x0) / pixelSize)
    sizeY = int((imageBounds.y1 - imageBounds.y0) / pixelSize)
    sizeZ = int((imageBounds.z1 - imageBounds.z0) / sliceSize)

    # convert from [nm] to [pixels]
    x = (x - imageBounds.x0) / pixelSize
    y = (y - imageBounds.y0) / pixelSize
    z = (z - imageBounds.z0) / sliceSize

    jsig = jsig / pixelSize
    jsigz = jsigz / sliceSize
    
    
    if multiProc and not multiprocessing.current_process().daemon:
        im = shmarray.zeros((sizeX, sizeY, sizeZ), order='F')

        x = shmarray.create_copy(x)
        y = shmarray.create_copy(y)
        z = shmarray.create_copy(z)

        if type(jsig) == np.ndarray:
            jsig = shmarray.create_copy(jsig)

        if type(jsigz) == np.ndarray:
            jsigz = shmarray.create_copy(jsigz)


        nCPUs = multiprocessing.cpu_count()

        tasks = int(n / nCPUs) * np.ones(nCPUs, 'i')
        tasks[:int(n % nCPUs)] += 1

        processes = [multiprocessing.Process(target = rendJTet, args=(im, y, x,z, jsig, jsigz, mcp, nIt)) for nIt in tasks]

        for p in processes:
            p.start()

        for p in processes:
            p.join()

        return im/n

    else:
        im = np.zeros((sizeX, sizeY, sizeZ), order='F')

        rendJTet(im, y, x, z, jsig, jsigz, mcp, n)

        return im/n
Esempio n. 5
0
def fitPtsAP(pts, NFits=0):
    from scipy.spatial import cKDTree
    #kdt = kdtree.KDTree(pts)
    kdt = cKDTree(pts)

    if NFits == 0:
        NFits = pts.shape[0]

    #surfs = []

    #def _task(i):
    #    return fitPtA(kdt, pts, i)

    nCPUs = multiprocessing.cpu_count()
    nCPUs = 1

    res = shmarray.zeros((10, NFits))
    pos = shmarray.zeros((3, NFits))
    nPs = shmarray.zeros(NFits)
    #rt = shmarray.zeros((2,NFits))

    fnums = range(NFits)

    processes = [
        multiprocessing.Process(target=fitPtsAt,
                                args=(pts, kdt, fnums[i::nCPUs], res, pos,
                                      nPs)) for i in range(nCPUs)
    ]

    for p in processes:
        print(p)
        p.start()

    for p in processes:
        print(p)
        p.join()

    return res, pos, nPs
Esempio n. 6
0
    def __init__(self, T, shm=False, extraSpaceFactor=1.5, calcDistances=True):
        self.Nverts = len(T.x)
        if shm:
            from PYME.util.shmarray import shmarray
            self.edgeArray = shmarray.zeros(
                int(extraSpaceFactor * self.Nverts), dtype)
        else:
            self.edgeArray = numpy.zeros(int(extraSpaceFactor * self.Nverts),
                                         dtype)

        #record how many vertices there are
        self.edgeArray[-1]['numIncidentEdges'] = self.Nverts

        #say where we can start adding extra rows
        self.edgeArray[-1]['nextRecordIndex'] = self.Nverts + 1

        addEdges(self.edgeArray, T.edges)

        if calcDistances:
            self.calcDistances((T.x, T.y))
Esempio n. 7
0
def fit_quad_surfaces_Pr(data, radius, fitPos=False, NFits=0):
    """
    Fits quadratic surfaces to each point in the data set using the neighbouring points within a radius r to define the
    surface. This version distributes the processing across multiple processes in order to speed things up.
    
    Parameters
    ----------
    data: [N,3] ndarray
        The point positions
        
    radius: float
        The radius in nm around each point to use as support for the surface through that point. The surface patch will
        be fit to all points within this radius. Implicitly this sets the scale of the smoothing - i.e. the scale over
        which the true object surface can be assumed to have quadratic form.
        
    fitPos: bool
        Should the surface be allowed to depart from the control point (i.e. the point which was used to define the fit
        support neighbourhood). A value of False constrains the surface such that it always passes through the control
        point, whereas True lets the surface move. True should give more accurate surfaces when the point density is high
        and surfaces are well separated, False results in a better constrained fit and deals somewhat better with the case
        when multiple surfaces are close by (forces the fit into the local minimum corresponding to the surface that the
        control point is on).
    NFits: int
        Only fit the first NFits points. Largely exists for debugging to allow faster and more interactive computation.

    Returns
    -------

    a numpy array of results with the dtype surfit.SURF_PATCH_FLAT
    """
    from scipy.spatial import cKDTree
    #generate a kdtree to allow us to rapidly find a points neighbours
    kdt = cKDTree(data)
    
    if NFits == 0:
        NFits = data.shape[0]
    
    nCPUs = multiprocessing.cpu_count()
    #nCPUs = 1
    
    #generate a results array in shared memory. The worker processes will each write their chunk of results into this array
    #this make the calling semantics of the the actual call below pass by reference for the results
    #there is a bit of magic going on behind the scenes for this to work - see PYME.util.shmarray
    results = shmarray.zeros(NFits, SURF_PATCH_DTYPE_FLAT)
    
    #calculate a list of points at which to fit a surface
    fnums = range(NFits)
    
    #create a process for each cpu and assign them a chunk of fits
    #note that the slicing of fnums[i::nCPUs] effectively interleaves the fit allocations - ie one process works on every
    #nCPUth point. This was done as a simple way of allocating the tasks evenly, but might not be optimal in terms of e.g.
    #cache coherency. The process creation here will be significantly more efficient on *nix platforms which use copy on
    #write forking when compared to windows which will end up copying both the data and kdt structures
    if multiprocessing.current_process().name == 'MainProcess':  # avoid potentially trying to spawn children from daemon
        processes = [multiprocessing.Process(target=fit_quad_surfaces_tr,
                                             args=(data, kdt, fnums[i::nCPUs], results, radius, fitPos)) for i in range(nCPUs)]
        # launch all the processes
        logger.debug('launching quadratic surface patch fitting processes')
        for p in processes:
            p.start()
        # wait for them to complete
        for p in processes:
            p.join()
    else:
        logger.debug('fitting quadratic surface patches in main process')
        fit_quad_surfaces_tr(data, kdt, fnums, results, radius, fitPos)
    # each process should have written their results into our shared memory array, return this
    return results
def tsp_chunk_two_opt_multiproc(positions,
                                epsilon,
                                points_per_chunk,
                                n_proc=1):
    # divide points spatially
    positions = positions.astype(np.float32)
    section, n_sections = split_points_kmeans(positions, points_per_chunk)
    I = np.argsort(section)
    section = section[I]
    positions = positions[I, :]

    # split out points
    n_cpu = n_proc if n_proc > 0 else multiprocessing.cpu_count()
    tasks = int(n_sections / n_cpu) * np.ones(n_cpu, 'i')
    tasks[:int(n_sections % n_cpu)] += 1

    if positions.shape[0] < np.iinfo(np.uint16).max:
        route = shmarray.zeros(positions.shape[0], dtype=np.uint16)
    else:
        route = shmarray.zeros(positions.shape[0], dtype='i')

    uni, counts = np.unique(section, return_counts=True)
    logger.debug('%d points total, section counts: %s' % (counts.sum(),
                                                          (counts, )))
    if (counts > 1000).any():
        logger.warning(
            '%d counts in a bin, traveling salesperson algorithm may be very slow'
            % counts.max())

    ind_task_start = 0
    ind_pos_start = 0
    processes = []

    cumcount = counts.cumsum()
    cumtasks = tasks.cumsum()
    print('%d tasks' % cumtasks[-1])
    t = time.time()
    if n_cpu == 1:
        two_opt_section(positions, 0, counts, tasks[0], epsilon, route)
    else:
        for ci in range(n_cpu):
            ind_task_end = cumtasks[ci]
            ind_pos_end = cumcount[ind_task_end - 1]

            subcounts = counts[ind_task_start:ind_task_end]

            p = multiprocessing.Process(
                target=two_opt_section,
                args=(positions[ind_pos_start:ind_pos_end, :], ind_pos_start,
                      subcounts, tasks[ci], epsilon, route))
            p.start()
            processes.append(p)
            ind_task_start = ind_task_end
            ind_pos_start = ind_pos_end

        [p.join() for p in processes]
    print('Chunked TSPs finished after ~%.2f s, connecting chunks' %
          (time.time() - t))

    sorted_pos = positions[route, :]
    # make cuts at the corner of each section
    new_sections = np.empty_like(section)
    n_new_sections = 0
    start = 0
    cut_positions = []
    for sind in range(n_sections):  # we got section 0 for free with the copy
        section_count = counts[sind]
        pos = sorted_pos[start:start + section_count]
        corner0, corner1 = np.argsort(pos[:, 0] + pos[:, 1])[np.array(
            [0, -1])] + start
        corner2, corner3 = np.argsort(pos[:, 0] - pos[:, 1])[np.array(
            [0, -1])] + start
        corners = np.sort([corner0, corner1, corner2, corner3])
        cut_positions.extend([
            corners[0], corners[1] - 1, corners[1], corners[2] - 1, corners[2],
            corners[3] - 1
        ])
        for ci in range(3):
            new_sections[corners[ci]:corners[ci + 1] + 1] = n_new_sections
            n_new_sections += 1

        start += section_count
        # label_start +=
    cut_positions[-1] += 1  # move the last corner end of whole thing
    cut_positions = np.sort(cut_positions)
    # np.testing.assert_array_equal(new_sections[cut_positions], np.repeat(np.arange(n_new_sections), 2))

    t = time.time()
    print('linking sections')
    linked_route = link_route(sorted_pos, cut_positions, new_sections, epsilon)
    print('sections linked in %.2f s' % (time.time() - t))

    # np.testing.assert_array_equal(sorted_pos[linked_route], positions[route][linked_route])
    # import matplotlib.pyplot as plt
    # from matplotlib import cm
    # colors = cm.get_cmap('prism', n_new_sections)
    # plt.figure()
    # plt.plot(sorted_pos[linked_route, 0], sorted_pos[linked_route, 1], color='k')
    # for pi in range(len(section)):
    #     plt.scatter(sorted_pos[pi, 0], sorted_pos[pi, 1], marker='$' + str(new_sections[pi]) + '$',
    #                 color=colors(new_sections[pi]))
    # plt.show()

    # don't forget the linked route sorts the first-pass route, which sorts the section-sorted positions -> take care of that here
    return I[route][linked_route]
Esempio n. 9
0
def rendJitTriang(x,
                  y,
                  n,
                  jsig,
                  mcp,
                  imageBounds,
                  pixelSize,
                  seeds=None,
                  geometric_mean=True,
                  mdh=None):
    """

    Parameters
    ----------
    x : ndarray
        x positions [nm]
    y : ndarray
        y positions [nm]
    n : number of jittered renderings to average into final rendering
    jsig : ndarray (or scalar float)
        standard deviations [nm] of normal distributions to sample when jittering for each point
    mcp : float
        Monte Carlo sampling probability (0, 1]
    imageBounds : PYME.IO.ImageBounds
        ImageBounds instance - range in each dimension should ideally be an integer multiple of pixelSize.
    pixelSize : float
        size of pixels to be rendered [nm]
    seeds : ndarray
        [optional] supplied seeds if we want to strictly reconstruct a previously generated image
    geometric_mean : bool
        [optional] Flag to scale intensity by geometric mean (True) or [localizations / um^2] (False)
    mdh: PYME.IO.MetaDataHandler.MDHandlerBase or subclass
        [optional] metadata handler to store seeds to

    Returns
    -------
    im : ndarray
        2D Jittered Triangulation rendering.

    Notes
    -----
    Triangles which reach outside of the image bounds are dropped and not included in the rendering.
    """
    sizeX = int((imageBounds.x1 - imageBounds.x0) / pixelSize)
    sizeY = int((imageBounds.y1 - imageBounds.y0) / pixelSize)

    if geometric_mean:
        fcn = _rend_jit_tri_geometric
    else:
        fcn = rendJitTri

    if multiProc and not multiprocessing.current_process().daemon:
        im = shmarray.zeros((sizeX, sizeY))

        x = shmarray.create_copy(x)
        y = shmarray.create_copy(y)
        if type(jsig) == numpy.ndarray:
            jsig = shmarray.create_copy(jsig)

        # We will generate 1 process for each seed, defaulting to generating a seed for each CPU core if seeds are not
        # passed explicitly. Rendering with explicitly passed seeds will be deterministic, but performance will not be
        # optimal unless n_seeds = n_CPUs
        seeds = _generate_subprocess_seeds(multiprocessing.cpu_count(), mdh,
                                           seeds)
        iterations = _iterations_per_task(n, len(seeds))

        processes = [
            multiprocessing.Process(target=fcn,
                                    args=(im, x, y, jsig, mcp, imageBounds,
                                          pixelSize, nIt, s))
            for nIt, s in zip(iterations, seeds)
        ]

        for p in processes:
            p.start()

        for p in processes:
            p.join()

    else:
        im = numpy.zeros((sizeX, sizeY))

        # Technically we could just call fcn( ....,n), but we replicate the logic above and divide into groups of tasks
        # so that we can reproduce a previously generated image
        seeds = _generate_subprocess_seeds(1, mdh, seeds)
        iterations = _iterations_per_task(n, len(seeds))

        for nIt, s in zip(iterations, seeds):
            # NB - in normal usage, this loop only evaluates once, with nIt=n
            fcn(im, x, y, jsig, mcp, imageBounds, pixelSize, nIt, seed=s)

    if geometric_mean:
        return (1.e6 / (im / n + 1)) * (im > n)
    else:
        return im / n
Esempio n. 10
0
def tsp_chunk_two_opt_multiproc(positions,
                                epsilon,
                                points_per_chunk,
                                n_proc=1):
    # assume density is uniform
    x_min, y_min = positions.min(axis=0)
    x_max, y_max = positions.max(axis=0)

    sections_per_side = int(np.sqrt((positions.shape[0] / points_per_chunk)))
    size_x = (x_max - x_min) / sections_per_side
    size_y = (y_max - y_min) / sections_per_side

    # bin points into our "pixels"
    X = np.round(positions[:, 0] / size_x).astype(int)
    Y = np.round(positions[:, 1] / size_y).astype(int)

    # number the sections
    section = X + Y * (Y.max() + 1)
    # keep all section numbers positive, starting at zero
    section -= section.min()
    n_sections = int(section.max() + 1)
    I = np.argsort(section)
    section = section[I]
    positions = positions[I, :]

    # split out points
    n_cpu = n_proc if n_proc > 0 else multiprocessing.cpu_count()
    tasks = int(n_sections / n_cpu) * np.ones(n_cpu, 'i')
    tasks[:int(n_sections % n_cpu)] += 1

    route = shmarray.zeros(positions.shape[0], dtype='i')

    uni, counts = np.unique(section, return_counts=True)
    logger.debug('%d points total, section counts: %s' % (counts.sum(),
                                                          (counts, )))
    if (counts > 1000).any():
        logger.warning(
            '%d counts in a bin, traveling salesperson algorithm may be very slow'
            % counts.max())

    ind_task_start = 0
    ind_pos_start = 0
    processes = []

    cumcount = counts.cumsum()
    cumtasks = tasks.cumsum()
    t = time.time()
    if n_cpu == 1:
        two_opt_section(positions, 0, counts, tasks[0], epsilon, route)
        pivot_indices = np.sort(
            np.concatenate([[0], cumcount[:-1],
                            cumcount - 1]))  # get start/stop indices for each
    else:
        for ci in range(n_cpu):
            ind_task_end = cumtasks[ci]
            ind_pos_end = cumcount[ind_task_end - 1]

            subcounts = counts[ind_task_start:ind_task_end]

            p = multiprocessing.Process(
                target=two_opt_section,
                args=(positions[ind_pos_start:ind_pos_end, :], ind_pos_start,
                      subcounts, tasks[ci], epsilon, route))
            p.start()
            processes.append(p)
            ind_task_start = ind_task_end
            ind_pos_start = ind_pos_end

        # next we need to join our sections. Prepare for this while the other processes are executing
        pivot_indices = np.sort(
            np.concatenate([[0], cumcount[:-1],
                            cumcount - 1]))  # get start/stop indices for each

        [p.join() for p in processes]
    print('Chunked TSPs finished after ~%.2f s, connecting chunks' %
          (time.time() - t))

    # do a two-opt on just the section start/ends, with ability to reverse the section
    # pivot positions won't be correct unless they're already sorted. No need to sort section because its the same
    pivot_positions = positions[route, :][pivot_indices]
    # spike the exit criteria low since the cost is cheap and the gains are high
    section_order, reversals = reversal_two_opt(section[pivot_indices],
                                                pivot_positions, epsilon / 1e3)

    final_route = np.copy(route)
    start = cumcount[0]
    # new_pivot_inds = []  # uncomment for plotting
    for sind in range(1,
                      n_sections):  # we got section 0 for free with the copy
        cur_section = section_order[sind]
        section_count = counts[cur_section]
        if reversals[sind]:
            final_route[start:start + section_count] = route[
                cumcount[cur_section - 1]:cumcount[cur_section]][::-1]
        else:
            final_route[start:start + section_count] = route[
                cumcount[cur_section - 1]:cumcount[cur_section]]
        # new_pivot_inds.append(start)  # uncomment for plotting
        # new_pivot_inds.append(start + section_count - 1)  # uncomment for plotting
        start += section_count

    # ----------- uncomment for plotting
    # import matplotlib.pyplot as plt
    # from matplotlib import cm
    # colors = cm.get_cmap('prism', n_sections)
    # plt.figure()
    # sorted_pos = positions[route, :]
    # plt.plot(positions[final_route, 0], positions[final_route, 1], color='k')
    # plt.scatter(positions[final_route, 0][new_pivot_inds], positions[final_route, 1][new_pivot_inds], color='k')
    # for pi in range(len(section)):
    #     plt.scatter(sorted_pos[pi, 0], sorted_pos[pi, 1], marker='$' + str(section[pi]) + '$',
    #                 color=colors(section[pi]))
    # plt.show()

    # don't forget the final sort sorts the already section-sorted positions -> take care of that here
    return I[final_route]