Esempio n. 1
0
    def precomputeWeights(self, pw_id, tmpV_id, prjD_id, lw_id):
        if self.precomputed:
            return

        #Compute and invert the lineweights
        astra.data3d.store(lw_id, 0)
        astra.data3d.store(tmpV_id, 1)
        self.performFP(lw_id, tmpV_id)
        mpi.run(self.opInvert, [lw_id])

        #Compute and invert the pixelweights
        astra.data3d.store(pw_id, 0)
        astra.data3d.store(prjD_id, 1)
        self.performBP(prjD_id, pw_id)
        mpi.run(self.opInvert, [pw_id])
        self.precomputed = True
Esempio n. 2
0
def grad3(src_id, gradX_id, gradY_id, gradZ_id, scale=None):
    """
    Compute discrete gradients in X, Y, Z directions.

    All four IDs must be mpi/distributed data3d volumes of the same
    dimensions.

    If scale is specified, the output is multiplied by scale.

    :param src_id: ID of input volume
    :type src_id: :class:`int`
    :param gradX_id: ID of output volume for gradient in X direction
    :type gradX_id: :class:`int`
    :param gradY_id: ID of output volume for gradient in Y direction
    :type gradY_id: :class:`int`
    :param gradZ_id: ID of output volume for gradient in Z direction
    :type gradZ_id: :class:`int`
    :param scale: if specified, multiply output by this
    :type scale: :class:`float`
    """
    def _grad3_internal(src_id, gradX_id, gradY_id, gradZ_id, scale):
        dataS = astra.data3d.get_shared_local(src_id)
        dataDX = astra.data3d.get_shared_local(gradX_id)
        dataDY = astra.data3d.get_shared_local(gradY_id)
        dataDZ = astra.data3d.get_shared_local(gradZ_id)
        dataDX[:] = dataS
        dataDX[1:, :, :] -= dataS[0:-1, :, :]
        if scale is not None:
            dataDX *= scale
        dataDY[:] = dataS
        dataDY[1:, :, :] -= dataS[0:-1, :, :]
        if scale is not None:
            dataDY *= scale
        dataDZ[:] = dataS
        dataDZ[1:, :, :] -= dataS[0:-1, :, :]
        if scale is not None:
            dataDZ *= scale

    mpi.run(_grad3_internal, [src_id, gradX_id, gradY_id, gradZ_id, scale])
    astra.data3d.sync(gradX_id)
    astra.data3d.sync(gradY_id)
    astra.data3d.sync(gradZ_id)
Esempio n. 3
0
def linear_combination(out_id, a, id1, b, id2):
    """
    Evaluate out_id = a * id1 + b * id2

    All three IDs must be mpi/distributed data3d volumes of the same
    dimensions.

    out_id and/or id1 and/or id2 are allowed to be the same.
    """
    def _lincomb_internal(out_id, a, id1, b, id2):
        # TODO: Allow setting number of threads used by numexpr somehow
        import numexpr
        c = astra.data3d.get_shared_local(id1)
        d = astra.data3d.get_shared_local(id2)
        dst = astra.data3d.get_shared_local(out_id)
        numexpr.evaluate("a*c + b*d", out=dst)

    mpi.run(_lincomb_internal,
            [out_id, np.float32(a), id1,
             np.float32(b), id2])
Esempio n. 4
0
    def run(self, iters):
        #Retrieve the geometry and use it to allocate temporary buffers
        vol_geom = astra.data3d.get_geometry(self.rec_id)
        proj_geom = astra.data3d.get_geometry(self.prj_id)

        pixelWeight_id = astra.data3d.create('-vol', vol_geom)
        tmpVolume_id = astra.data3d.create('-vol', vol_geom)
        projData_id = astra.data3d.create('-sino', proj_geom)
        lineWeight_id = astra.data3d.create('-sino', proj_geom)

        #Compute the weights before we start the iteration steps
        self.precomputeWeights(pixelWeight_id, tmpVolume_id, projData_id,
                               lineWeight_id)

        #Iterate
        for i in range(iters):
            #FP part
            astra.data3d.store(projData_id, 0)
            self.performFP(projData_id, self.rec_id)
            mpi.run(self.opAddScaledMulScalar,
                    [projData_id, self.prj_id, 1.0, -1.0])
            mpi.run(self.opMul, [projData_id, lineWeight_id])

            #BP part
            astra.data3d.store(tmpVolume_id, 0)
            self.performBP(projData_id, tmpVolume_id)
            mpi.run(self.opAddMul, [self.rec_id, tmpVolume_id, pixelWeight_id])
Esempio n. 5
0
def grad3_adj(dst_id, gradX_id, gradY_id, gradZ_id, scale=None):
    """
    Compute adjoint of grad3.

    All four IDs must be mpi/distributed data3d volumes of the same
    dimensions.

    If scale is specified, the output is multiplied by scale.

    :param dst_id: ID of output volume
    :type dst_id: :class:`int`
    :param gradX_id: ID of input volume with gradient in X direction
    :type gradX_id: :class:`int`
    :param gradY_id: ID of input volume with gradient in Y direction
    :type gradY_id: :class:`int`
    :param gradZ_id: ID of input volume with gradient in Z direction
    :type gradZ_id: :class:`int`
    :param scale: if specified, multiply output by this
    :type scale: :class:`float`
    """
    def _grad3_adj_internal(dst_id, gradX_id, gradY_id, gradZ_id, scale):
        dataSX = astra.data3d.get_shared_local(gradX_id)
        dataSY = astra.data3d.get_shared_local(gradY_id)
        dataSZ = astra.data3d.get_shared_local(gradZ_id)
        dataD = astra.data3d.get_shared_local(dst_id)
        dataD[:] = dataSX
        dataD[0:-1, :, :] -= dataSX[1:, :, :]
        dataD += dataSY
        dataD[:, 0:-1, :] -= dataSY[:, 1:, :]
        dataD += dataSZ
        dataD[:, :, 0:-1] -= dataSZ[:, :, 1:]
        if scale is not None:
            dataD *= scale

    mpi.run(_grad3_adj_internal, [dst_id, gradX_id, gradY_id, gradZ_id, scale])
    astra.data3d.sync(dst_id)
Esempio n. 6
0
def dot(id1, id2):
    """
    Compute dot product of the volumes id1, id2.

    Both IDs must be mpi/distributed data3d volumes of the same
    dimensions.

    :param id1: ID of first volume
    :type id1: :class:`int`
    :param id2: ID of second volume
    :type id2: :class:`int`
    :returns: :class:`float` -- The dot product of the two volumes
    """
    def _dot_internal(id1, id2):
        d1 = astra.data3d.get_shared_local(id1)
        d2 = astra.data3d.get_shared_local(id2)
        # Only look at the slices we're actually responsible
        # for, to avoid duplicating the overlapping slices.
        s = mpi.getObjectResponsibleSlices(id1)
        return np.dot(d1[s].ravel(), d2[s].ravel())

    return sum(mpi.run(_dot_internal, [id1, id2]))
Esempio n. 7
0
    def run(self, iters):
        print("Hello World, running the Segmentation plugin")


        # Set up the SIRT reconstruction and run it
        cfg = astra.astra_dict('SIRT3D_CUDA')
        #cfg = astra.astra_dict('CGLS3D_CUDA')
        cfg['ReconstructionDataId'] = self.rec_id
        cfg['ProjectionDataId']     = self.prj_id
        alg_id = astra.algorithm.create(cfg)
        astra.algorithm.run(alg_id, self.sirta_iter)
        astra.algorithm.delete(alg_id)

        vol_geom  = astra.data3d.get_geometry(self.rec_id)
        proj_geom = astra.data3d.get_geometry(self.prj_id)

        mask_id     = astra.data3d.create('-vol', vol_geom)
        seg_id      = astra.data3d.create('-vol', vol_geom)
        tempVol_id  = astra.data3d.create('-vol', vol_geom)
        tempPrj_id  = astra.data3d.create('-proj3d', proj_geom)


        for iter in range(self.segment_iter):
            mpi.run(self.segmentData, [self.segment_rho, self.segment_tau, self.rec_id, seg_id])
            astra.data3d.sync(seg_id)

            print("Mark borders")
            mpi.run(self.markSegmentBorders2, [seg_id, mask_id])
            print("Mark borders, done")

            #TODO random mask pixels

            mpi.run(self.subMulArray,[mask_id, seg_id, tempVol_id, 1.0])
            astra.data3d.sync(tempVol_id)

            cfg = astra.astra_dict('FP3D_CUDA')
            cfg['VolumeDataId']     = tempVol_id
            cfg['ProjectionDataId'] = tempPrj_id
            alg_id = astra.algorithm.create(cfg)
            astra.algorithm.run(alg_id)
            astra.algorithm.delete(alg_id)

            mpi.run(self.opArray,[self.prj_id, tempPrj_id, tempPrj_id, operator.sub])
            astra.data3d.sync(tempPrj_id)

            mpi.run(self.opArray,[self.rec_id, mask_id, self.rec_id, operator.mul])
            astra.data3d.sync(self.rec_id)

            #tempPrj_id = sino - FP ( (1-mask_id) * seg_id)  #sino is prj_id
            #x = rec_id * (mask) 


            cfg = astra.astra_dict('SIRT3D_CUDA')
            cfg['ReconstructionDataId'] = self.rec_id
            cfg['ProjectionDataId']     = tempPrj_id
            cfg['option'] = {'ReconstructionMaskId' : mask_id }
            alg_id = astra.algorithm.create(cfg)
            astra.algorithm.run(alg_id, self.sirtb_iter)
            astra.algorithm.delete(alg_id)

            mpi.run(self.opArray,[tempVol_id, self.rec_id, self.rec_id, operator.add])
            astra.data3d.sync(self.rec_id)
                
            #rec = tempVol_id + rec_id
            #rec = seg_id *(1-mask_id) + rec_id

        mpi.run(self.segmentData, [self.segment_rho, self.segment_tau, self.rec_id, self.rec_id])
Esempio n. 8
0
astra.algorithm.run(alg_id, sirta_iter)

data3 = astra.data3d.get(rec_id)
pylab.figure(1)
pylab.imshow(data3[:, :, 65])

#Release SIRT memory
astra.algorithm.delete(alg_id)

mask_id = astra.data3d.create('-vol', vol_geom)
seg_id = astra.data3d.create('-vol', vol_geom)
tempVol_id = astra.data3d.create('-vol', vol_geom)
tempPrj_id = astra.data3d.create('-proj3d', proj_geom)

for iter in range(0, segment_iter):
    mpi.run(segmentData, [segment_rho, segment_tau, rec_id, seg_id])
    astra.data3d.sync(seg_id)

    #"Mark borders"
    mpi.run(markSegmentBorders2, [seg_id, mask_id])
    #Random mask pixels, not implemented in this example

    mpi.run(subMulArray, [mask_id, seg_id, tempVol_id, 1.0])
    astra.data3d.sync(tempVol_id)

    cfg = astra.astra_dict('FP3D_CUDA')
    cfg['VolumeDataId'] = tempVol_id
    cfg['ProjectionDataId'] = tempPrj_id
    alg_id = astra.algorithm.create(cfg)
    astra.algorithm.run(alg_id)
    astra.algorithm.delete(alg_id)
Esempio n. 9
0
#Setup the MPI domain distribution
proj_geom, vol_geom = mpi.create(proj_geom,
                                 vol_geom,
                                 nGhostcellsVolume=5,
                                 nGhostcellsProjection=0)

proj_id = astra.data3d.create('-proj3d', proj_geom)
rec_id = astra.data3d.create('-vol', vol_geom)

#Use imSize2 when reading source images that have already been downScaled
imSize2 = [imSize[0] / downScaleFactor, imSize[1] / downScaleFactor]
imSize2 = imSize  #Using the original source images

#let each process read their own subset of data
mpi.run(readDistributedData, ['angles', filepath, filename, imSize2, proj_id])

# Set up the parameters for a reconstruction algorithm using the GPU
cfg = astra.astra_dict('SIRT3D_CUDA')
#cfg = astra.astra_dict('CGLS3D_CUDA')
cfg['ReconstructionDataId'] = rec_id
cfg['ProjectionDataId'] = proj_id

# Create the algorithm object from the configuration structure
alg_id = astra.algorithm.create(cfg)

# Run 150 iterations of the algorithm
astra.algorithm.run(alg_id, 150)

#Each process writes their own piece of result data
filepath = '/media/Data1/PUFoam_17.7um/VolumeJB/'