Пример #1
0
def mudcm2nii(datain, Cnt):
    '''DICOM mu-map to NIfTI'''
    mu, pos, ornt = nimpa.dcm2im(datain['mumapDCM'])
    mu *= 0.0001
    A = pos['AFFINE']
    A[0, 0] *= -1
    A[0, 3] *= -1
    A[1, 3] += A[1, 1]
    nimpa.array2nii(mu[:, ::-1, :], A,
                    os.path.join(os.path.dirname(datain['mumapDCM']), 'mu.nii.gz'))

    # ------get necessary data for creating a blank reference image (to which resample)-----
    # gantry offset
    goff, tpo = mmraux.lm_pos(datain, Cnt)
    ihdr, csainfo = mmraux.hdr_lm(datain)
    # ptart horizontal bed position
    p = re.compile(r'start horizontal bed position.*\d{1,3}\.*\d*')
    m = p.search(ihdr)
    fi = ihdr[m.start():m.end()].find('=')
    hbedpos = 0.1 * float(ihdr[m.start() + fi + 1:m.end()])

    B = np.diag(np.array([-10 * Cnt['SO_VXX'], 10 * Cnt['SO_VXY'], 10 * Cnt['SO_VXZ'], 1]))
    B[0, 3] = 10 * (.5 * Cnt['SO_IMX'] * Cnt['SO_VXX'] + goff[0])
    B[1, 3] = 10 * ((-.5 * Cnt['SO_IMY'] + 1) * Cnt['SO_VXY'] - goff[1])
    B[2, 3] = 10 * ((-.5 * Cnt['SO_IMZ'] + 1) * Cnt['SO_VXZ'] - goff[2] + hbedpos)
    im = np.zeros((Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMX']), dtype=np.float32)
    nimpa.array2nii(im, B, os.path.join(os.path.dirname(datain['mumapDCM']), 'muref.nii.gz'))
    # -------------------------------------------------------------------------------------
    fmu = os.path.join(os.path.dirname(datain['mumapDCM']), 'mu_r.nii.gz')
    if os.path.isfile(Cnt['RESPATH']):
        run([
            Cnt['RESPATH'], '-ref',
            os.path.join(os.path.dirname(datain['mumapDCM']), 'muref.nii.gz'), '-flo',
            os.path.join(os.path.dirname(datain['mumapDCM']), 'mu.nii.gz'), '-res', fmu, '-pad',
            '0'])
    else:
        log.error('path to resampling executable is incorrect!')
        raise IOError('Error launching NiftyReg for image resampling.')

    return fmu
Пример #2
0
def cropxy(im, imsize, datain, Cnt, store_pth=''):
    '''Crop image transaxially to the size in tuple <imsize>.
    Return the image and the affine matrix.
    '''
    if not imsize[0]%2==0 and not imsize[1]%2==0:
        print 'e> image size has to be an even number!'
        return None

    # cropping indexes
    i0 = (Cnt['SO_IMX']-imsize[0])/2
    i1 = (Cnt['SO_IMY']+imsize[1])/2

    B = image_affine(datain, Cnt, gantry_offset=False)
    B[0,3] -= 10*Cnt['SO_VXX']*i0
    B[1,3] += 10*Cnt['SO_VXY']*(Cnt['SO_IMY']-i1)

    cim = im[:, i0:i1, i0:i1]

    if store_pth!='':
        nimpa.array2nii( cim[::-1,::-1,:], B, store_pth, descrip='cropped')
        if Cnt['VERBOSE']:  print 'i> saved cropped image to:', store_pth

    return cim, B
Пример #3
0
def cropxy(im, imsize, datain, Cnt, store_pth=''):
    '''
    Crop image transaxially to the size in tuple <imsize>.
    Return the image and the affine matrix.
    '''
    if not imsize[0] % 2 == 0 and not imsize[1] % 2 == 0:
        log.error('image size has to be an even number!')
        return None

    # cropping indexes
    i0 = int((Cnt['SO_IMX'] - imsize[0]) / 2)
    i1 = int((Cnt['SO_IMY'] + imsize[1]) / 2)

    B = image_affine(datain, Cnt, gantry_offset=False)
    B[0, 3] -= 10 * Cnt['SO_VXX'] * i0
    B[1, 3] += 10 * Cnt['SO_VXY'] * (Cnt['SO_IMY'] - i1)

    cim = im[:, i0:i1, i0:i1]

    if store_pth != '':
        nimpa.array2nii(cim[::-1, ::-1, :], B, store_pth, descrip='cropped')
        log.info('saved cropped image to:\n{}'.format(store_pth))

    return cim, B
Пример #4
0
def mmrchain(
    datain,  # all input data in a dictionary
    scanner_params,  # all scanner parameters in one dictionary
    # containing constants, transaxial and axial
    # LUTs.
    outpath='',  # output path for results
    frames=['fluid', [0, 0]],  # definition of time frames.
    mu_h=[],  # hardware mu-map.
    mu_o=[],  # object mu-map.
    tAffine=[],  # affine transformations for the mu-map for
    # each time frame separately.
    itr=4,  # number of OSEM iterations
    fwhm=0.,  # Gaussian Smoothing FWHM
    recmod=-1,  # reconstruction mode: -1: undefined, chosen
    # automatically. 3: attenuation and scatter
    # correction, 1: attenuation correction
    # only, 0: no correction (randoms only).
    histo=[],  # input histogram (from list-mode data);
    # if not given, it will be performed.
    trim=False,
    trim_scale=2,
    trim_interp=1,  # interpolation for upsampling used in PVC
    trim_memlim=True,  # reduced use of memory for machines
    # with limited memory (slow though)
    pvcroi=[],  # ROI used for PVC.  If undefined no PVC
    # is performed.
    pvcreg_tool='niftyreg',  # the registration tool used in PVC
    store_rois=False,  # stores the image of PVC ROIs
    # as defined in pvcroi.
    psfkernel=[],
    pvcitr=5,
    fcomment='',  # text comment used in the file name of
    # generated image files
    ret_sinos=False,  # return prompt, scatter and randoms
    # sinograms for each reconstruction
    store_img=True,
    store_img_intrmd=False,
    store_itr=[],  # store any reconstruction iteration in
    # the list.  ignored if the list is empty.
    del_img_intrmd=False):
    log = logging.getLogger(__name__)

    # decompose all the scanner parameters and constants
    Cnt = scanner_params['Cnt']
    txLUT = scanner_params['txLUT']
    axLUT = scanner_params['axLUT']

    # -------------------------------------------------------------------------
    # FRAMES
    # check for the provided dynamic frames
    if isinstance(frames, list):
        # Can be given in three ways:
        # * a 1D list (duration of each frame is listed)
        # * a more concise 2D list--repetition and duration lists in
        #   each entry.  Must start with the 'def' entry.
        # * a 2D list with fluid timings: must start with the string
        #   'fluid' or 'timings'.  a 2D list with consecutive lists
        #   describing start and end of the time frame, [t0, t1];
        #   The number of time frames for this option is unlimited,
        #   provided the t0 and t1 are within the acquisition times.

        # 2D starting with entry 'fluid' or 'timings'
        if  isinstance(frames[0], basestring) and (frames[0]=='fluid' or frames[0]=='timings') \
            and all([isinstance(t,list) and len(t)==2 for t in frames[1:]]):
            t_frms = frames[1:]

        # if 2D definitions, starting with entry 'def':
        elif isinstance(frames[0], basestring) and frames[0]=='def' \
            and all([isinstance(t,list) and len(t)==2 for t in frames[1:]]):
            # get total time and list of all time frames
            dfrms = dynamic_timings(frames)
            t_frms = dfrms[1:]

        # if 1D:
        elif all([isinstance(t, integers) for t in frames]):
            # get total time and list of all time frames
            dfrms = dynamic_timings(frames)
            t_frms = dfrms[1:]

        else:
            log.error(
                'osemdyn: frames definitions are not given in the correct list format: 1D [15,15,30,30,...] or 2D list [[2,15], [2,30], ...]'
            )
    else:
        log.error(
            'osemdyn: provided dynamic frames definitions are not in either Python list or nympy array.'
        )
        raise TypeError('Wrong data type for dynamic frames')
    # number of dynamic time frames
    nfrm = len(t_frms)
    # -------------------------------------------------------------------------

    # -------------------------------------------------------------------------
    # create folders for results
    if outpath == '':
        petdir = os.path.join(datain['corepath'], 'reconstructed')
        fmudir = os.path.join(datain['corepath'], 'mumap-obj')
        pvcdir = os.path.join(datain['corepath'], 'PRCL')
    else:
        petdir = os.path.join(outpath, 'PET')
        fmudir = os.path.join(outpath, 'mumap-obj')
        pvcdir = os.path.join(outpath, 'PRCL')

    # folder for co-registered mu-maps (for motion compensation)
    fmureg = os.path.join(fmudir, 'registered')
    # folder for affine transformation MR/CT->PET
    petaff = os.path.join(petdir, 'faffine')

    # folder for reconstructed images (dynamic or static depending on number of frames).
    if nfrm > 1:
        petimg = os.path.join(petdir, 'multiple-frames')
        pvcdir = os.path.join(pvcdir, 'multiple-frames')
    elif nfrm == 1:
        petimg = os.path.join(petdir, 'single-frame')
        pvcdir = os.path.join(pvcdir, 'single-frame')
    else:
        log.error('confused!')
        raise TypeError('Unrecognised time frames!')
    # create now the folder
    nimpa.create_dir(petimg)
    # create folder
    nimpa.create_dir(petdir)
    # -------------------------------------------------------------------------

    # -------------------------------------------------------------------------
    # MU-MAPS
    # get the mu-maps, if given;  otherwise will use blank mu-maps.
    if tAffine:
        muod = obtain_image(mu_o, imtype='object mu-map')
    else:
        muod = obtain_image(mu_o, Cnt=Cnt, imtype='object mu-map')

    # hardware mu-map
    muhd = obtain_image(mu_h, Cnt, imtype='hardware mu-map')

    # choose the mode of reconstruction based on the provided (or not) mu-maps
    if recmod == -1:
        if muod['exists'] and muhd['exists']:
            recmod = 3
        elif muod['exists'] or muhd['exists']:
            recmod = 1
            log.warning('partial mu-map:  scatter correction is switched off.')
        else:
            recmod = 0
            log.warning(
                'no mu-map provided: scatter and attenuation corrections are switched off.'
            )
    # -------------------------------------------------------------------------

    #import pdb; pdb.set_trace()

    # output dictionary
    output = {}
    output['recmod'] = recmod
    output['frames'] = t_frms
    output['#frames'] = nfrm

    # if affine transformation is given the baseline mu-map in NIfTI file or dictionary has to be given
    if not tAffine:
        log.debug('using the provided mu-map the same way for all frames.')
    else:
        if len(tAffine) != nfrm:
            log.error(
                'the number of affine transformations in the list has to be the same as the number of dynamic frames!'
            )
            raise IndexError('Inconsistent number of frames.')
        elif not isinstance(tAffine, list):
            log.error(
                'tAffine has to be a list of either 4x4 numpy arrays of affine transformations or a list of file path strings!'
            )
            raise IndexError('Expecting a list.')
        elif not 'fim' in muod:
            log.error(
                'when tAffine is given, the object mu-map has to be provided either as a dictionary or NIfTI file!'
            )
            raise NameError('No path to object mu-map.')

        # check if all are file path strings to the existing files
        if all([isinstance(t, basestring) for t in tAffine]):
            if all([os.path.isfile(t) for t in tAffine]):
                # the internal list of affine transformations
                faff_frms = tAffine
                log.debug(
                    'using provided paths to affine transformations for each dynamic frame.'
                )
            else:
                log.error('not all provided paths are valid!')
                raise IOError('Wrong paths.')
        # check if all are numpy arrays
        elif all([isinstance(t, (np.ndarray, np.generic)) for t in tAffine]):
            # create the folder for dynamic affine transformations
            nimpa.create_dir(petaff)
            faff_frms = []
            for i in range(nfrm):
                fout = os.path.join(petaff, 'affine_frame(' + str(i) + ').txt')
                np.savetxt(fout, tAffine[i], fmt='%3.9f')
                faff_frms.append(fout)
            log.debug(
                'using provided numpy arrays affine transformations for each dynamic frame.'
            )
        else:
            raise StandardError(
                'Affine transformations for each dynamic frame could not be established.'
            )

        # -------------------------------------------------------------------------------------
        # get ref image for mu-map resampling
        # -------------------------------------------------------------------------------------
        if 'fmuref' in muod:
            fmuref = muod['fmuref']
            log.debug(
                'reusing the reference mu-map from the object mu-map dictionary.'
            )
        else:
            # create folder if doesn't exists
            nimpa.create_dir(fmudir)
            # ref file name
            fmuref = os.path.join(fmudir, 'muref.nii.gz')
            # ref affine
            B = image_affine(datain, Cnt, gantry_offset=False)
            # ref image (blank)
            im = np.zeros((Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMX']),
                          dtype=np.float32)
            # store ref image
            nimpa.array2nii(im, B, fmuref)
            log.debug('generated a reference mu-map in' + fmuref)
        # -------------------------------------------------------------------------------------

        output['fmuref'] = fmuref
        output['faffine'] = faff_frms

    # output list of intermidiate file names for mu-maps and PET images (useful for dynamic imaging)
    if tAffine: output['fmureg'] = []
    if store_img_intrmd: output['fpeti'] = []

    # dynamic images in one numpy array
    dynim = np.zeros((nfrm, Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMY']),
                     dtype=np.float32)
    #if asked, output only scatter+randoms sinogram for each frame
    if ret_sinos and itr > 1 and recmod > 2:
        dynmsk = np.zeros((nfrm, Cnt['NSN11'], Cnt['NSANGLES'], Cnt['NSBINS']),
                          dtype=np.float32)
        dynrsn = np.zeros((nfrm, Cnt['NSN11'], Cnt['NSANGLES'], Cnt['NSBINS']),
                          dtype=np.float32)
        dynssn = np.zeros((nfrm, Cnt['NSN11'], Cnt['NSANGLES'], Cnt['NSBINS']),
                          dtype=np.float32)
        dynpsn = np.zeros((nfrm, Cnt['NSN11'], Cnt['NSANGLES'], Cnt['NSBINS']),
                          dtype=np.float32)

    # import pdb; pdb.set_trace()

    # starting frame index with reasonable prompt data
    ifrmP = 0
    # iterate over frame index
    for ifrm in range(nfrm):
        # start time of a current (ifrm-th) dynamic frame
        t0 = int(t_frms[ifrm][0])
        # end time of a current (ifrm-th) dynamic frame
        t1 = int(t_frms[ifrm][1])
        # --------------
        # check if there is enough prompt data to do a reconstruction
        # --------------
        log.info('dynamic frame times t0, t1:%r, %r' % (t0, t1))
        if not histo:
            hst = mmrhist(datain, scanner_params, t0=t0, t1=t1)
        else:
            hst = histo
            log.info('using provided histogram')
        if np.sum(hst['dhc']) > 0.99 * np.sum(hst['phc']):
            log.warning(
                'the amount of random events is the greatest part of prompt events => omitting reconstruction'
            )
            ifrmP = ifrm + 1
            continue
        # --------------------
        # transform the mu-map if given the affine transformation for each frame
        if tAffine:
            # create the folder for aligned (registered for motion compensation) mu-maps
            nimpa.create_dir(fmureg)
            # the converted nii image resample to the reference size
            fmu = os.path.join(
                fmureg, 'mumap_dyn_frm' + str(ifrm) + fcomment + '.nii.gz')
            # command for resampling
            if os.path.isfile(Cnt['RESPATH']):
                cmd = [
                    Cnt['RESPATH'], '-ref', fmuref, '-flo', muod['fim'],
                    '-trans', faff_frms[ifrm], '-res', fmu, '-pad', '0'
                ]
                if log.getEffectiveLevel() > log.DEBUG:
                    cmd.append('-voff')
                call(cmd)
            else:
                log.error(
                    'path to the executable for resampling is incorrect!')
                raise IOError('Incorrect NiftyReg (resampling) executable.')
            # get the new mu-map from the just resampled file
            muodct = nimpa.getnii(fmu, output='all')
            muo = muodct['im']
            A = muodct['affine']
            muo[muo < 0] = 0
            output['fmureg'].append(fmu)
        else:
            muo = muod['im']
        #---------------------

        # output image file name
        if nfrm > 1:
            frmno = '_frm' + str(ifrm)
        else:
            frmno = ''

        # run OSEM reconstruction of a single time frame
        recimg = mmrrec.osemone(datain, [muhd['im'], muo],
                                hst,
                                scanner_params,
                                recmod=recmod,
                                itr=itr,
                                fwhm=fwhm,
                                outpath=petimg,
                                frmno=frmno,
                                fcomment=fcomment + '_i',
                                store_img=store_img_intrmd,
                                store_itr=store_itr,
                                ret_sinos=ret_sinos)
        # form dynamic numpy array
        dynim[ifrm, :, :, :] = recimg.im
        if ret_sinos and itr > 1 and recmod > 2:
            dynpsn[ifrm, :, :, :] = hst['psino']
            dynssn[ifrm, :, :, :] = recimg.ssn
            dynrsn[ifrm, :, :, :] = recimg.rsn
            dynmsk[ifrm, :, :, :] = recimg.amsk

        if store_img_intrmd: output['fpeti'].append(recimg.fpet)
        if nfrm == 1: output['tuple'] = recimg

    output['im'] = np.squeeze(dynim)
    if ret_sinos and itr > 1 and recmod > 2:
        output['sinos'] = {
            'psino': dynpsn,
            'ssino': dynssn,
            'rsino': dynrsn,
            'amask': dynmsk
        }

    # ----------------------------------------------------------------------
    # trim the PET image
    # images have to be stored for PVC
    if pvcroi: store_img_intrmd = True
    if trim:
        # create file name
        if 'lm_dcm' in datain:
            fnm = os.path.basename(datain['lm_dcm'])[:20]
        elif 'lm_ima' in datain:
            fnm = os.path.basename(datain['lm_ima'])[:20]
        # trim PET and upsample
        petu = nimpa.trimim(dynim,
                            affine=image_affine(datain, Cnt),
                            scale=trim_scale,
                            int_order=trim_interp,
                            outpath=petimg,
                            fname=fnm,
                            fcomment=fcomment,
                            store_img_intrmd=store_img_intrmd,
                            memlim=trim_memlim,
                            verbose=log.getEffectiveLevel() < logging.INFO)

        output.update({
            'trimmed': {
                'im': petu['im'],
                'fpet': petu['fimi'],
                'affine': petu['affine']
            }
        })
    # ----------------------------------------------------------------------

    # ----------------------------------------------------------------------
    #run PVC if requested and required input given
    if pvcroi:
        if not os.path.isfile(datain['T1lbl']):
            log.error(
                'no label image from T1 parcellations and/or ROI definitions!')
            raise StandardError('No ROIs')
        else:
            # get the PSF kernel for PVC
            if not psfkernel:
                psfkernel = nimpa.psf_measured(scanner='mmr', scale=trim_scale)
            else:
                if isinstance(
                        psfkernel,
                    (np.ndarray, np.generic)) and psfkernel.shape != (3, 17):
                    log.error(
                        'the PSF kernel has to be an numpy array with the shape of (3, 17)!'
                    )
                    raise IndexError('PSF: wrong shape or not a matrix')

        #> file names for NIfTI images of PVC ROIs and PVC corrected PET
        froi = []
        fpvc = []

        #> perform PVC for each time frame
        dynpvc = np.zeros(petu['im'].shape, dtype=np.float32)
        for i in range(ifrmP, nfrm):
            # transform the parcellations (ROIs) if given the affine transformation for each frame
            if not tAffine:
                log.warning(
                    'affine transformation are not provided: will generate for the time frame.'
                )
                faffpvc = ''
                #raise StandardError('No affine transformation')
            else:
                faffpvc = faff_frms[i]
            # chose file name of individual PVC images
            if nfrm > 1:
                fcomment_pvc = '_frm' + str(i) + fcomment
            else:
                fcomment_pvc = fcomment
            #============================
            # perform PVC
            petpvc_dic = nimpa.pvc_iyang(petu['fimi'][i],
                                         datain,
                                         Cnt,
                                         pvcroi,
                                         psfkernel,
                                         tool=pvcreg_tool,
                                         itr=pvcitr,
                                         faff=faffpvc,
                                         fcomment=fcomment_pvc,
                                         outpath=pvcdir,
                                         store_rois=store_rois,
                                         store_img=store_img_intrmd)
            #============================
            if nfrm > 1:
                dynpvc[i, :, :, :] = petpvc_dic['im']
            else:
                dynpvc = petpvc_dic['im']

            fpvc.append(petpvc_dic['fpet'])

            if store_rois: froi.append(petpvc_dic['froi'])

        #> update output dictionary
        output.update({'impvc': dynpvc})
        if store_img_intrmd: output.update({'fpvc': fpvc})
        if store_rois: output.update({'froi': froi})
    # ----------------------------------------------------------------------

    if store_img:
        # description for saving NIFTI image
        # attenuation number: if only bed present then it is 0.5
        attnum = (1 * muhd['exists'] + 1 * muod['exists']) / 2.
        descrip =    'alg=osem'                     \
                    +';att='+str(attnum*(recmod>0)) \
                    +';sct='+str(1*(recmod>1))      \
                    +';spn='+str(Cnt['SPN'])        \
                    +';sub=14'                      \
                    +';itr='+str(itr)               \
                    +';fwhm='+str(fwhm)             \
                    +';nfrm='+str(nfrm)

        # squeeze the not needed dimensions
        dynim = np.squeeze(dynim)

        # NIfTI file name for the full PET image (single or multiple frame)

        # save the image to NIfTI file
        if nfrm == 1:
            t0 = hst['t0']
            t1 = hst['t1']
            if t1 == t0:
                t0 = 0
                t1 = hst['dur']
            fpet = os.path.join(
                    petimg,
                    os.path.basename(recimg.fpet)[:8] \
                    +'_t-'+str(t0)+'-'+str(t1)+'sec' \
                    +'_itr-'+str(itr) )
            fpeto = fpet + fcomment + '.nii.gz'
            nimpa.prc.array2nii(dynim[::-1, ::-1, :],
                                recimg.affine,
                                fpeto,
                                descrip=descrip)
        else:
            fpet = os.path.join(
                    petimg,
                    os.path.basename(recimg.fpet)[:8]\
                    +'_nfrm-'+str(nfrm)+'_itr-'+str(itr)
                )
            fpeto = fpet + fcomment + '.nii.gz'
            nimpa.prc.array2nii(dynim[:, ::-1, ::-1, :],
                                recimg.affine,
                                fpeto,
                                descrip=descrip)

        # get output file names for trimmed/PVC images
        if trim:
            # folder for trimmed and dynamic
            pettrim = os.path.join(petimg, 'trimmed')
            # make folder
            nimpa.create_dir(pettrim)
            # trimming scale added to NIfTI descritoption
            descrip_trim = descrip + ';trim_scale=' + str(trim_scale)
            # file name for saving the trimmed image
            fpetu = os.path.join(
                pettrim,
                os.path.basename(fpet) + '_trimmed-upsampled-scale-' +
                str(trim_scale))
            # in case of PVC
            if pvcroi:
                # itertive Yang (iY) added to NIfTI descritoption
                descrip_pvc = descrip_trim + ';pvc=iY'
                # file name for saving the PVC NIfTI image
                fpvc = fpetu + '_PVC' + fcomment + '.nii.gz'
                output['trimmed']['fpvc'] = fpvc

            # update the trimmed image file name
            fpetu += fcomment + '.nii.gz'
            # store the file name in the output dictionary
            output['trimmed']['fpet'] = fpetu

        output['fpet'] = fpeto

        # save images
        if nfrm == 1:
            if trim:
                nimpa.prc.array2nii(petu['im'][::-1, ::-1, :],
                                    petu['affine'],
                                    fpetu,
                                    descrip=descrip_trim)
            if pvcroi:
                nimpa.prc.array2nii(dynpvc[::-1, ::-1, :],
                                    petu['affine'],
                                    fpvc,
                                    descrip=descrip_pvc)
        elif nfrm > 1:
            if trim:
                nimpa.prc.array2nii(petu['im'][:, ::-1, ::-1, :],
                                    petu['affine'],
                                    fpetu,
                                    descrip=descrip_trim)
            if pvcroi:
                nimpa.prc.array2nii(dynpvc[:, ::-1, ::-1, :],
                                    petu['affine'],
                                    fpvc,
                                    descrip=descrip_pvc)

    if del_img_intrmd:
        if pvcroi:
            for fi in fpvc:
                os.remove(fi)
        if trim:
            for fi in petu['fimi']:
                os.remove(fi)

    return output
Пример #5
0
def mmrchain(
    datain,                 # all input data in a dictionary
    scanner_params,         # all scanner parameters in one dictionary
                            # containing constants, transaxial and axial
                            # LUTs.
    outpath=None,           # output path for results
    fout=None,              # full file name (any folders and extensions are disregarded)
    frames=None,            # definition of time frames, default: ['fluid', [0, 0]]
    mu_h=None,              # hardware mu-map.
    mu_o=None,              # object mu-map.
    tAffine=None,           # affine transformations for the mu-map for
                            # each time frame separately.
    itr=4,                  # number of OSEM iterations
    fwhm=0.,                # Gaussian Post-Smoothing FWHM
    psf=None,               # Resolution Modelling
    recmod=-1,              # reconstruction mode: -1: undefined, chosen
                            # automatically. 3: attenuation and scatter
                            # correction, 1: attenuation correction
                            # only, 0: no correction (randoms only).
    histo=None,             # input histogram (from list-mode data);
                            # if not given, it will be performed.
    decay_ref_time=None,    # decay corrects relative to the reference
                            # time provided; otherwise corrects to the scan
                            # start time.
    trim=False,
    trim_scale=2,
    trim_interp=0,          # interpolation for upsampling used in PVC
    trim_memlim=True,       # reduced use of memory for machines
                            # with limited memory (slow though)
    pvcroi=None,            # ROI used for PVC.  If undefined no PVC
                            # is performed.
    pvcreg_tool='niftyreg', # the registration tool used in PVC
    store_rois=False,       # stores the image of PVC ROIs
                            # as defined in pvcroi.
    pvcpsf=None,
    pvcitr=5,
    fcomment='',            # text comment used in the file name of
                            # generated image files
    ret_sinos=False,        # return prompt, scatter and randoms
                            # sinograms for each reconstruction
    ret_histo=False,        # return histogram (LM processing output) for
                            # each image frame
    store_img=True,
    store_img_intrmd=False,
    store_itr=None,         # store any reconstruction iteration in
                            # the list.  ignored if the list is empty.
    del_img_intrmd=False,
):
    if frames is None:
        frames = ['fluid', [0, 0]]
    if mu_h is None:
        mu_h = []
    if mu_o is None:
        mu_o = []
    if pvcroi is None:
        pvcroi = []
    if pvcpsf is None:
        pvcpsf = []
    if store_itr is None:
        store_itr = []

    # decompose all the scanner parameters and constants
    Cnt = scanner_params['Cnt']

    # -------------------------------------------------------------------------
    # HISOTGRAM PRECEEDS FRAMES
    if histo is not None and 'psino' in histo:
        frames = ['fluid', [histo['t0'], histo['t1']]]
    else:
        histo = None
        log.warning(
            'the given histogram does not contain a prompt sinogram--will generate a histogram.')

    # FRAMES
    # check for the provided dynamic frames
    if isinstance(frames, list):
        # Can be given in three ways:
        # * a 1D list (duration of each frame is listed)
        # * a more concise 2D list--repetition and duration lists in
        #   each entry.  Must start with the 'def' entry.
        # * a 2D list with fluid timings: must start with the string
        #   'fluid' or 'timings'.  a 2D list with consecutive lists
        #   describing start and end of the time frame, [t0, t1];
        #   The number of time frames for this option is unlimited,
        #   provided the t0 and t1 are within the acquisition times.

        # 2D starting with entry 'fluid' or 'timings'
        if (isinstance(frames[0], str) and frames[0] in ('fluid', 'timings')
                and all(isinstance(t, list) and len(t) == 2 for t in frames[1:])):
            t_frms = frames[1:]
        # if 2D definitions, starting with entry 'def':
        elif (isinstance(frames[0], str) and frames[0] == 'def'
              and all(isinstance(t, list) and len(t) == 2 for t in frames[1:])):
            # get total time and list of all time frames
            dfrms = dynamic_timings(frames)
            t_frms = dfrms[1:]

        # if 1D:
        elif all(isinstance(t, Integral) for t in frames):
            # get total time and list of all time frames
            dfrms = dynamic_timings(frames)
            t_frms = dfrms[1:]

        else:
            log.error('osemdyn: frames definitions are not given\
                in the correct list format: 1D [15,15,30,30,...]\
                or 2D list [[2,15], [2,30], ...]')
    else:
        log.error(
            'provided dynamic frames definitions are incorrect (should be a list of definitions).')
        raise TypeError('Wrong data type for dynamic frames')
    # number of dynamic time frames
    nfrm = len(t_frms)
    # -------------------------------------------------------------------------

    # -------------------------------------------------------------------------
    # create folders for results
    if outpath is None:
        petdir = os.path.join(datain['corepath'], 'reconstructed')
        fmudir = os.path.join(datain['corepath'], 'mumap-obj')
        pvcdir = os.path.join(datain['corepath'], 'PRCL')
    else:
        petdir = os.path.join(outpath, 'PET')
        fmudir = os.path.join(outpath, 'mumap-obj')
        pvcdir = os.path.join(outpath, 'PRCL')

    if fout is not None:
        # > get rid of folders
        fout = os.path.basename(fout)
        # > get rid of extension
        fout = fout.split('.')[0]

    # folder for co-registered mu-maps (for motion compensation)
    fmureg = os.path.join(fmudir, 'registered')
    # folder for affine transformation MR/CT->PET
    petaff = os.path.join(petdir, 'faffine')

    # folder for reconstructed images (dynamic or static depending on number of frames).
    if nfrm > 1:
        petimg = os.path.join(petdir, 'multiple-frames')
        pvcdir = os.path.join(pvcdir, 'multiple-frames')
    elif nfrm == 1:
        petimg = os.path.join(petdir, 'single-frame')
        pvcdir = os.path.join(pvcdir, 'single-frame')
    else:
        raise TypeError('Unrecognised/confusing time frames!')
    # create now the folder
    nimpa.create_dir(petimg)
    # create folder
    nimpa.create_dir(petdir)
    # -------------------------------------------------------------------------

    # -------------------------------------------------------------------------
    # MU-MAPS
    # get the mu-maps, if given;  otherwise will use blank mu-maps.
    if tAffine is not None:
        muod = obtain_image(mu_o, imtype='object mu-map')
    else:
        muod = obtain_image(mu_o, Cnt=Cnt, imtype='object mu-map')

    # hardware mu-map
    muhd = obtain_image(mu_h, Cnt, imtype='hardware mu-map')

    # choose the mode of reconstruction based on the provided (or not) mu-maps
    if muod['exists'] and muhd['exists'] and recmod == -1:
        recmod = 3
    elif (muod['exists'] or muhd['exists']) and recmod == -1:
        recmod = 1
        log.warning('partial mu-map:  scatter correction is switched off.')
    else:
        if recmod == -1:
            recmod = 0
            log.warning(
                'no mu-map provided: scatter and attenuation corrections are switched off.')
    # -------------------------------------------------------------------------

    # import pdb; pdb.set_trace()

    # output dictionary
    output = {}
    output['recmod'] = recmod
    output['frames'] = t_frms
    output['#frames'] = nfrm

    # if affine transformation is given
    # the baseline mu-map in NIfTI file or dictionary has to be given
    if tAffine is None:
        log.info('using the provided mu-map the same way for all frames.')
    else:
        if len(tAffine) != nfrm:
            raise ValueError("the number of affine transformations in the list"
                             " has to be the same as the number of dynamic frames")
        elif not isinstance(tAffine, list):
            raise ValueError("tAffine has to be a list of either 4x4 numpy arrays"
                             " of affine transformations or a list of file path strings")
        elif 'fim' not in muod:
            raise NameError("when tAffine is given, the object mu-map has to be"
                            " provided either as a dictionary or NIfTI file")

        # check if all are file path strings to the existing files
        if all(isinstance(t, str) for t in tAffine):
            if all(os.path.isfile(t) for t in tAffine):
                # the internal list of affine transformations
                faff_frms = tAffine
                log.info('using provided paths to affine transformations for each dynamic frame.')
            else:
                raise IOError('not all provided paths are valid!')
        # check if all are numpy arrays
        elif all(isinstance(t, (np.ndarray, np.generic)) for t in tAffine):
            # create the folder for dynamic affine transformations
            nimpa.create_dir(petaff)
            faff_frms = []
            for i in range(nfrm):
                fout_ = os.path.join(petaff, 'affine_frame(' + str(i) + ').txt')
                np.savetxt(fout_, tAffine[i], fmt='%3.9f')
                faff_frms.append(fout_)
            log.info('using provided numpy arrays affine transformations for each dynamic frame.')
        else:
            raise ValueError(
                'Affine transformations for each dynamic frame could not be established.')

        # -------------------------------------------------------------------------------------
        # get ref image for mu-map resampling
        # -------------------------------------------------------------------------------------
        if 'fmuref' in muod:
            fmuref = muod['fmuref']
            log.info('reusing the reference mu-map from the object mu-map dictionary.')
        else:
            # create folder if doesn't exists
            nimpa.create_dir(fmudir)
            # ref file name
            fmuref = os.path.join(fmudir, 'muref.nii.gz')
            # ref affine
            B = image_affine(datain, Cnt, gantry_offset=False)
            # ref image (blank)
            im = np.zeros((Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMX']), dtype=np.float32)
            # store ref image
            nimpa.array2nii(im, B, fmuref)
            log.info('generated a reference mu-map in:\n{}'.format(fmuref))
        # -------------------------------------------------------------------------------------

        output['fmuref'] = fmuref
        output['faffine'] = faff_frms

    # output list of intermediate file names for mu-maps and PET images
    # (useful for dynamic imaging)
    if tAffine is not None: output['fmureg'] = []

    if store_img_intrmd:
        output['fpeti'] = []
        if fwhm > 0:
            output['fsmoi'] = []

    # > number of3D  sinograms
    if Cnt['SPN'] == 1:
        snno = Cnt['NSN1']
    elif Cnt['SPN'] == 11:
        snno = Cnt['NSN11']
    else:
        raise ValueError('unrecognised span: {}'.format(Cnt['SPN']))

    # dynamic images in one numpy array
    dynim = np.zeros((nfrm, Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMY']), dtype=np.float32)
    # if asked, output only scatter+randoms sinogram for each frame
    if ret_sinos and itr > 1 and recmod > 2:
        dynmsk = np.zeros((nfrm, Cnt['NSEG0'], Cnt['NSANGLES'], Cnt['NSBINS']), dtype=np.float32)
        dynrsn = np.zeros((nfrm, snno, Cnt['NSANGLES'], Cnt['NSBINS']), dtype=np.float32)
        dynssn = np.zeros((nfrm, snno, Cnt['NSANGLES'], Cnt['NSBINS']), dtype=np.float32)
        dynpsn = np.zeros((nfrm, snno, Cnt['NSANGLES'], Cnt['NSBINS']), dtype=np.float32)

    # > returning dictionary of histograms if requested
    if ret_histo:
        hsts = {}

    # import pdb; pdb.set_trace()

    # starting frame index with reasonable prompt data
    ifrmP = 0
    # iterate over frame index
    for ifrm in range(nfrm):
        # start time of a current (ifrm-th) dynamic frame
        t0 = int(t_frms[ifrm][0])
        # end time of a current (ifrm-th) dynamic frame
        t1 = int(t_frms[ifrm][1])
        # --------------
        # check if there is enough prompt data to do a reconstruction
        # --------------
        log.info('dynamic frame times t0={}, t1={}:'.format(t0, t1))
        if histo is None:
            hst = mmrhist(datain, scanner_params, t0=t0, t1=t1)
        else:
            hst = histo
            log.info(
                dedent('''\
                ------------------------------------------------------
                using provided histogram
                ------------------------------------------------------'''))

        if ret_histo:
            hsts[str(t0) + '-' + str(t1)] = hst

        if np.sum(hst['dhc']) > 0.99 * np.sum(hst['phc']):
            log.warning(
                dedent('''\
                ===========================================================================
                amount of randoms is the greater part of prompts => omitting reconstruction
                ==========================================================================='''))
            ifrmP = ifrm + 1
            continue
        # --------------------
        # transform the mu-map if given the affine transformation for each frame
        if tAffine is not None:
            # create the folder for aligned (registered for motion compensation) mu-maps
            nimpa.create_dir(fmureg)
            # the converted nii image resample to the reference size
            fmu = os.path.join(fmureg, 'mumap_dyn_frm' + str(ifrm) + fcomment + '.nii.gz')
            # command for resampling
            if os.path.isfile(Cnt['RESPATH']):
                cmd = [
                    Cnt['RESPATH'], '-ref', fmuref, '-flo', muod['fim'], '-trans', faff_frms[ifrm],
                    '-res', fmu, '-pad', '0']
                if log.getEffectiveLevel() > log.INFO:
                    cmd.append('-voff')
                call(cmd)
            else:
                raise IOError('Incorrect path to NiftyReg (resampling) executable.')
            # get the new mu-map from the just resampled file
            muodct = nimpa.getnii(fmu, output='all')
            muo = muodct['im']
            muo[muo < 0] = 0
            output['fmureg'].append(fmu)
        else:
            muo = muod['im']
        # ---------------------

        # output image file name
        if nfrm > 1:
            frmno = '_frm' + str(ifrm)
        else:
            frmno = ''

        # run OSEM reconstruction of a single time frame
        recimg = mmrrec.osemone(datain, [muhd['im'], muo], hst, scanner_params,
                                decay_ref_time=decay_ref_time, recmod=recmod, itr=itr, fwhm=fwhm,
                                psf=psf, outpath=petimg, frmno=frmno, fcomment=fcomment + '_i',
                                store_img=store_img_intrmd, store_itr=store_itr, fout=fout,
                                ret_sinos=ret_sinos)

        # form dynamic Numpy array
        if fwhm > 0:
            dynim[ifrm, :, :, :] = recimg.imsmo
        else:
            dynim[ifrm, :, :, :] = recimg.im

        if ret_sinos and itr > 1 and recmod > 2:
            dynpsn[ifrm, :, :, :] = np.squeeze(hst['psino'])
            dynssn[ifrm, :, :, :] = np.squeeze(recimg.ssn)
            dynrsn[ifrm, :, :, :] = np.squeeze(recimg.rsn)
            dynmsk[ifrm, :, :, :] = np.squeeze(recimg.amsk)

        if store_img_intrmd:
            output['fpeti'].append(recimg.fpet)
            if fwhm > 0:
                output['fsmoi'].append(recimg.fsmo)

        if nfrm == 1: output['tuple'] = recimg

    output['im'] = np.squeeze(dynim)

    if ret_sinos and itr > 1 and recmod > 2:
        output['sinos'] = {
            'psino': np.squeeze(dynpsn), 'ssino': np.squeeze(dynssn), 'rsino': np.squeeze(dynrsn),
            'amask': np.squeeze(dynmsk)}

    if ret_histo:
        output['hst'] = hsts

    # ----------------------------------------------------------------------
    # trim the PET image
    # images have to be stored for PVC
    if pvcroi: store_img_intrmd = True
    if trim:
        # create file name
        if 'lm_dcm' in datain:
            fnm = os.path.basename(datain['lm_dcm'])[:20]
        elif 'lm_ima' in datain:
            fnm = os.path.basename(datain['lm_ima'])[:20]
        # trim PET and upsample
        petu = nimpa.imtrimup(dynim, affine=image_affine(datain, Cnt), scale=trim_scale,
                              int_order=trim_interp, outpath=petimg, fname=fnm, fcomment=fcomment,
                              store_img_intrmd=store_img_intrmd, memlim=trim_memlim,
                              verbose=log.getEffectiveLevel())

        output.update({
            'trimmed': {'im': petu['im'], 'fpet': petu['fimi'], 'affine': petu['affine']}})
    # ----------------------------------------------------------------------

    # ----------------------------------------------------------------------
    # run PVC if requested and required input given
    if pvcroi:
        if not os.path.isfile(datain['T1lbl']):
            raise Exception('No labels and/or ROIs image definitions found!')
        else:
            # get the PSF kernel for PVC
            if not pvcpsf:
                pvcpsf = nimpa.psf_measured(scanner='mmr', scale=trim_scale)
            else:
                if (
                    isinstance(pvcpsf, (np.ndarray, np.generic)) and
                    pvcpsf.shape != (3, 2 * Cnt['RSZ_PSF_KRNL'] + 1)
                ):  # yapf: disable
                    raise ValueError(
                        'the PSF kernel has to be an numpy array with the shape of ({},{})'.format(
                            3, 2 * Cnt['RSZ_PSF_KRNL'] + 1))

        # > file names for NIfTI images of PVC ROIs and PVC corrected PET
        froi = []
        fpvc = []

        # > perform PVC for each time frame
        dynpvc = np.zeros(petu['im'].shape, dtype=np.float32)
        for i in range(ifrmP, nfrm):
            # transform the parcellations (ROIs) if given the affine transformation for each frame
            if tAffine is None:
                log.warning(
                    'affine transformation are not provided: will generate for the time frame.')
                faffpvc = None
                # raise StandardError('No affine transformation')
            else:
                faffpvc = faff_frms[i]

            # chose file name of individual PVC images
            if nfrm > 1:
                fcomment_pvc = '_frm' + str(i) + fcomment
            else:
                fcomment_pvc = fcomment
            # ===========================
            # perform PVC
            petpvc_dic = nimpa.pvc_iyang(petu['fimi'][i], datain, Cnt, pvcroi, pvcpsf,
                                         tool=pvcreg_tool, itr=pvcitr, faff=faffpvc,
                                         fcomment=fcomment_pvc, outpath=pvcdir,
                                         store_rois=store_rois, store_img=store_img_intrmd)
            # ===========================
            if nfrm > 1:
                dynpvc[i, :, :, :] = petpvc_dic['im']
            else:
                dynpvc = petpvc_dic['im']
            fpvc.append(petpvc_dic['fpet'])

            if store_rois: froi.append(petpvc_dic['froi'])

        # > update output dictionary
        output.update({'impvc': dynpvc})
        output['fprc'] = petpvc_dic['fprc']
        output['imprc'] = petpvc_dic['imprc']

        if store_img_intrmd: output.update({'fpvc': fpvc})
        if store_rois: output.update({'froi': froi})
    # ----------------------------------------------------------------------

    if store_img:
        # description for saving NIFTI image
        # attenuation number: if only bed present then it is 0.5
        attnum = (1 * muhd['exists'] + 1 * muod['exists']) / 2.
        descrip = (f"alg=osem"
                   f";att={attnum*(recmod>0)}"
                   f";sct={1*(recmod>1)}"
                   f";spn={Cnt['SPN']}"
                   f";sub=14"
                   f";itr={itr}"
                   f";fwhm={fwhm}"
                   f";psf={psf}"
                   f";nfrm={nfrm}")

        # squeeze the not needed dimensions
        dynim = np.squeeze(dynim)

        # NIfTI file name for the full PET image (single or multiple frame)

        # save the image to NIfTI file
        if nfrm == 1:
            t0 = hst['t0']
            t1 = hst['t1']
            if t1 == t0:
                t0 = 0
                t1 = hst['dur']
            # > --- file naming and saving ---
            if fout is None:
                fpet = os.path.join(
                    petimg,
                    os.path.basename(recimg.fpet)[:8] + f'_t-{t0}-{t1}sec_itr-{itr}')
                fpeto = f"{fpet}{fcomment}.nii.gz"
            else:
                fpeto = os.path.join(petimg, os.path.basename(fout) + '.nii.gz')

            nimpa.prc.array2nii(dynim[::-1, ::-1, :], recimg.affine, fpeto, descrip=descrip)
            # > --- ---
        else:
            if fout is None:
                fpet = os.path.join(petimg,
                                    os.path.basename(recimg.fpet)[:8] + f'_nfrm-{nfrm}_itr-{itr}')
                fpeto = f"{fpet}{fcomment}.nii.gz"
            else:
                fpeto = os.path.join(petimg, os.path.basename(fout) + f'_nfrm-{nfrm}.nii.gz')

            nimpa.prc.array2nii(dynim[:, ::-1, ::-1, :], recimg.affine, fpeto, descrip=descrip)

        output['fpet'] = fpeto

        # get output file names for trimmed/PVC images
        if trim:
            # folder for trimmed and dynamic
            pettrim = os.path.join(petimg, 'trimmed')
            # make folder
            nimpa.create_dir(pettrim)
            # trimming scale added to NIfTI descritoption
            descrip_trim = f'{descrip};trim_scale={trim_scale}'
            # file name for saving the trimmed image
            if fout is None:
                fpetu = os.path.join(
                    pettrim,
                    os.path.basename(fpet) + f'_trimmed-upsampled-scale-{trim_scale}')
            else:
                fpetu = os.path.join(
                    pettrim,
                    os.path.basename(fout) + f'_trimmed-upsampled-scale-{trim_scale}')
            # in case of PVC
            if pvcroi:
                # itertive Yang (iY) added to NIfTI descritoption
                descrip_pvc = f'{descrip_trim};pvc=iY'
                # file name for saving the PVC NIfTI image
                fpvc = f"{fpetu}_PVC{fcomment}.nii.gz"
                output['trimmed']['fpvc'] = fpvc

            # update the trimmed image file name
            fpetu += f'{fcomment}.nii.gz'
            # store the file name in the output dictionary
            output['trimmed']['fpet'] = fpetu

        # save images
        if nfrm == 1:
            if trim:
                nimpa.prc.array2nii(petu['im'][::-1, ::-1, :], petu['affine'], fpetu,
                                    descrip=descrip_trim)
            if pvcroi:
                nimpa.prc.array2nii(dynpvc[::-1, ::-1, :], petu['affine'], fpvc,
                                    descrip=descrip_pvc)
        elif nfrm > 1:
            if trim:
                nimpa.prc.array2nii(petu['im'][:, ::-1, ::-1, :], petu['affine'], fpetu,
                                    descrip=descrip_trim)
            if pvcroi:
                nimpa.prc.array2nii(dynpvc[:, ::-1, ::-1, :], petu['affine'], fpvc,
                                    descrip=descrip_pvc)

    if del_img_intrmd:
        if pvcroi:
            for fi in fpvc:
                os.remove(fi)
        if trim:
            for fi in petu['fimi']:
                os.remove(fi)

    return output
Пример #6
0
def osemone(datain,
            mumaps,
            hst,
            scanner_params,
            recmod=3,
            itr=4,
            fwhm=0.,
            psf=None,
            mask_radius=29.,
            decay_ref_time=None,
            attnsino=None,
            sctsino=None,
            randsino=None,
            normcomp=None,
            emmskS=False,
            frmno='',
            fcomment='',
            outpath=None,
            fout=None,
            store_img=False,
            store_itr=None,
            ret_sinos=False):
    '''
    OSEM image reconstruction with several modes
    (with/without scatter and/or attenuation correction)

    Args:
      psf: Reconstruction with PSF, passed to `psf_config`
    '''

    # > Get particular scanner parameters: Constants, transaxial and axial LUTs
    Cnt = scanner_params['Cnt']
    txLUT = scanner_params['txLUT']
    axLUT = scanner_params['axLUT']

    # ---------- sort out OUTPUT ------------
    # -output file name for the reconstructed image
    if outpath is None:
        opth = os.path.join(datain['corepath'], 'reconstructed')
    else:
        opth = outpath

    # > file output name (the path is ignored if given)
    if fout is not None:
        # > get rid of folders
        fout = os.path.basename(fout)
        # > get rid of extension
        fout = fout.split('.')[0]

    if store_img is True or store_itr is not None:
        mmraux.create_dir(opth)

    return_ssrb, return_mask = ret_sinos, ret_sinos

    # ----------

    log.info('reconstruction in mode: %d', recmod)

    # get object and hardware mu-maps
    muh, muo = mumaps

    # get the GPU version of the image dims
    mus = mmrimg.convert2dev(muo + muh, Cnt)

    # remove gaps from the prompt sino
    psng = mmraux.remgaps(hst['psino'], txLUT, Cnt)

    # ========================================================================
    # GET NORM
    # -------------------------------------------------------------------------
    if normcomp is None:
        ncmp, _ = mmrnorm.get_components(datain, Cnt)
    else:
        ncmp = normcomp
        log.warning('using user-defined normalisation components')
    nsng = mmrnorm.get_norm_sino(datain,
                                 scanner_params,
                                 hst,
                                 normcomp=ncmp,
                                 gpu_dim=True)
    # ========================================================================

    # ========================================================================
    # ATTENUATION FACTORS FOR COMBINED OBJECT AND BED MU-MAP
    # -------------------------------------------------------------------------
    # > combine attenuation and norm together depending on reconstruction mode
    if recmod == 0:
        asng = np.ones(psng.shape, dtype=np.float32)
    else:
        # > check if the attenuation sino is given as an array
        if isinstance(attnsino, np.ndarray) \
                and attnsino.shape==(Cnt['NSN11'], Cnt['NSANGLES'], Cnt['NSBINS']):
            asng = mmraux.remgaps(attnsino, txLUT, Cnt)
            log.info('using provided attenuation factor sinogram')
        elif isinstance(attnsino, np.ndarray) \
                and attnsino.shape==(Cnt['Naw'], Cnt['NSN11']):
            asng = attnsino
            log.info('using provided attenuation factor sinogram')
        else:
            asng = cu.zeros(psng.shape, dtype=np.float32)
            petprj.fprj(asng.cuvec,
                        cu.asarray(mus).cuvec, txLUT, axLUT,
                        np.array([-1], dtype=np.int32), Cnt, 1)
    # > combine attenuation and normalisation
    ansng = asng * nsng
    # ========================================================================

    # ========================================================================
    # Randoms
    # -------------------------------------------------------------------------
    if isinstance(randsino, np.ndarray) \
            and randsino.shape==(Cnt['NSN11'], Cnt['NSANGLES'], Cnt['NSBINS']):
        rsino = randsino
        rsng = mmraux.remgaps(randsino, txLUT, Cnt)
    else:
        rsino, snglmap = randoms(hst, scanner_params)
        rsng = mmraux.remgaps(rsino, txLUT, Cnt)
    # ========================================================================

    # ========================================================================
    # SCAT
    # -------------------------------------------------------------------------
    if recmod == 2:
        if sctsino is not None:
            ssng = mmraux.remgaps(sctsino, txLUT, Cnt)
        elif sctsino is None and os.path.isfile(datain['em_crr']):
            emd = nimpa.getnii(datain['em_crr'])
            ssn = vsm(
                datain,
                mumaps,
                emd['im'],
                scanner_params,
                histo=hst,
                rsino=rsino,
                prcnt_scl=0.1,
                emmsk=False,
            )
            ssng = mmraux.remgaps(ssn, txLUT, Cnt)
        else:
            raise ValueError(
                "No emission image available for scatter estimation! " +
                " Check if it's present or the path is correct.")
    else:
        ssng = np.zeros(rsng.shape, dtype=rsng.dtype)
    # ========================================================================

    log.info('------ OSEM (%d) -------', itr)
    # ------------------------------------
    Sn = 14  # number of subsets

    # -get one subset to get number of projection bins in a subset
    Sprj, s = get_subsets14(0, scanner_params)
    Nprj = len(Sprj)
    # -init subset array and sensitivity image for a given subset
    sinoTIdx = np.zeros((Sn, Nprj + 1), dtype=np.int32)
    # -init sensitivity images for each subset
    imgsens = np.zeros((Sn, Cnt['SZ_IMY'], Cnt['SZ_IMX'], Cnt['SZ_IMZ']),
                       dtype=np.float32)
    tmpsens = cu.zeros((Cnt['SZ_IMY'], Cnt['SZ_IMX'], Cnt['SZ_IMZ']),
                       dtype=np.float32)
    for n in range(Sn):
        # first number of projection for the given subset
        sinoTIdx[n, 0] = Nprj
        sinoTIdx[n, 1:], s = get_subsets14(n, scanner_params)
        # sensitivity image
        petprj.bprj(tmpsens.cuvec,
                    cu.asarray(ansng[sinoTIdx[n, 1:], :]).cuvec, txLUT, axLUT,
                    sinoTIdx[n, 1:], Cnt)
        imgsens[n] = tmpsens
    del tmpsens
    # -------------------------------------

    # -mask for reconstructed image.  anything outside it is set to zero
    msk = mmrimg.get_cylinder(
        Cnt, rad=mask_radius, xo=0, yo=0, unival=1, gpu_dim=True) > 0.9

    # -init image
    img = np.ones((Cnt['SZ_IMY'], Cnt['SZ_IMX'], Cnt['SZ_IMZ']),
                  dtype=np.float32)

    # -decay correction
    lmbd = np.log(2) / resources.riLUT[Cnt['ISOTOPE']]['thalf']
    if Cnt['DCYCRR'] and 't0' in hst and 'dur' in hst:
        # > decay correct to the reference time (e.g., injection time) if provided
        # > otherwise correct in reference to the scan start time (using the time
        # > past from the start to the start time frame)
        if decay_ref_time is not None:
            tref = decay_ref_time
        else:
            tref = hst['t0']

        dcycrr = np.exp(
            lmbd * tref) * lmbd * hst['dur'] / (1 - np.exp(-lmbd * hst['dur']))
        # apply quantitative correction to the image
        qf = ncmp['qf'] / resources.riLUT[Cnt['ISOTOPE']]['BF'] / float(
            hst['dur'])
        qf_loc = ncmp['qf_loc']

    elif not Cnt['DCYCRR'] and 't0' in hst and 'dur' in hst:
        dcycrr = 1.
        # apply quantitative correction to the image
        qf = ncmp['qf'] / resources.riLUT[Cnt['ISOTOPE']]['BF'] / float(
            hst['dur'])
        qf_loc = ncmp['qf_loc']

    else:
        dcycrr = 1.
        qf = 1.
        qf_loc = 1.

    # -affine matrix for the reconstructed images
    B = mmrimg.image_affine(datain, Cnt)

    # resolution modelling
    psfkernel = psf_config(psf, Cnt)

    # -time it
    stime = time.time()

    # import pdb; pdb.set_trace()

    # ========================================================================
    # OSEM RECONSTRUCTION
    # -------------------------------------------------------------------------
    with trange(itr,
                desc="OSEM",
                disable=log.getEffectiveLevel() > logging.INFO,
                leave=log.getEffectiveLevel() <= logging.INFO) as pbar:

        for k in pbar:

            petprj.osem(img, psng, rsng, ssng, nsng, asng, sinoTIdx, imgsens,
                        msk, psfkernel, txLUT, axLUT, Cnt)

            if np.nansum(img) < 0.1:
                log.warning(
                    'it seems there is not enough true data to render reasonable image'
                )
                # img[:]=0
                itr = k
                break
            if recmod >= 3 and k < itr - 1 and itr > 1:
                sct_time = time.time()
                sct = vsm(datain,
                          mumaps,
                          mmrimg.convert2e7(img, Cnt),
                          scanner_params,
                          histo=hst,
                          rsino=rsino,
                          emmsk=emmskS,
                          return_ssrb=return_ssrb,
                          return_mask=return_mask)

                if isinstance(sct, dict):
                    ssn = sct['sino']
                else:
                    ssn = sct

                ssng = mmraux.remgaps(ssn, txLUT, Cnt)
                pbar.set_postfix(scatter="%.3gs" % (time.time() - sct_time))
            # save images during reconstruction if requested
            if store_itr and (k + 1) in store_itr:
                im = mmrimg.convert2e7(img * (dcycrr * qf * qf_loc), Cnt)

                if fout is None:
                    fpet = os.path.join(opth, (
                        os.path.basename(datain['lm_bf'])[:16].replace(
                            '.', '-') +
                        f"{frmno}_t{hst['t0']}-{hst['t1']}sec_itr{k+1}{fcomment}_inrecon.nii.gz"
                    ))
                else:
                    fpet = os.path.join(
                        opth, fout + f'_itr{k+1}{fcomment}_inrecon.nii.gz')

                nimpa.array2nii(im[::-1, ::-1, :], B, fpet)

    log.info('recon time: %.3g', time.time() - stime)
    # ========================================================================

    log.info('applying decay correction of: %r', dcycrr)
    log.info('applying quantification factor: %r to the whole image', qf)
    log.info('for the frame duration of: %r', hst['dur'])

    # additional factor for making it quantitative in absolute terms (derived from measurements)
    img *= dcycrr * qf * qf_loc

    # ---- save images -----
    # -first convert to standard mMR image size
    im = mmrimg.convert2e7(img, Cnt)

    # -description text to NIfTI
    # -attenuation number: if only bed present then it is 0.5
    attnum = (1 * (np.sum(muh) > 0.5) + 1 * (np.sum(muo) > 0.5)) / 2.
    descrip = (f"alg=osem"
               f";sub=14"
               f";att={attnum*(recmod>0)}"
               f";sct={1*(recmod>1)}"
               f";spn={Cnt['SPN']}"
               f";itr={itr}"
               f";fwhm=0"
               f";t0={hst['t0']}"
               f";t1={hst['t1']}"
               f";dur={hst['dur']}"
               f";qf={qf}")

    # > file name of the output reconstructed image
    # > (maybe used later even if not stored now)
    if fout is None:
        fpet = os.path.join(
            opth,
            (os.path.basename(datain['lm_bf']).split('.')[0] +
             f"{frmno}_t{hst['t0']}-{hst['t1']}sec_itr{itr}{fcomment}.nii.gz"))
    else:
        fpet = os.path.join(opth, fout + f'_itr{itr}{fcomment}.nii.gz')

    if store_img:
        log.info('saving image to: %s', fpet)
        nimpa.array2nii(im[::-1, ::-1, :], B, fpet, descrip=descrip)

    im_smo = None
    fsmo = None
    if fwhm > 0:
        im_smo = ndi.filters.gaussian_filter(im,
                                             fwhm2sig(fwhm,
                                                      voxsize=Cnt['SZ_VOXY'] *
                                                      10),
                                             mode='mirror')

        if store_img:
            fsmo = fpet.split('.nii.gz')[0] + '_smo-' + str(fwhm).replace(
                '.', '-') + 'mm.nii.gz'
            log.info('saving smoothed image to: ' + fsmo)
            descrip.replace(';fwhm=0', ';fwhm=str(fwhm)')
            nimpa.array2nii(im_smo[::-1, ::-1, :], B, fsmo, descrip=descrip)

    # returning:
    # (0) E7 image [can be smoothed];
    # (1) file name of saved E7 image
    # (2) [optional] scatter sino
    # (3) [optional] single slice rebinned scatter
    # (4) [optional] mask for scatter scaling based on attenuation data
    # (5) [optional] random sino
    # if ret_sinos and recmod>=3:
    #     recout = namedtuple('recout', 'im, fpet, ssn, sssr, amsk, rsn')
    #     recout.im   = im
    #     recout.fpet = fout
    #     recout.ssn  = ssn
    #     recout.sssr = sssr
    #     recout.amsk = amsk
    #     recout.rsn  = rsino
    # else:
    #     recout = namedtuple('recout', 'im, fpet')
    #     recout.im   = im
    #     recout.fpet = fout

    if ret_sinos and recmod >= 3 and itr > 1:
        RecOut = namedtuple(
            'RecOut', 'im, fpet, imsmo, fsmo, affine, ssn, sssr, amsk, rsn')
        recout = RecOut(im, fpet, im_smo, fsmo, B, ssn, sct['ssrb'],
                        sct['mask'], rsino)
    else:
        RecOut = namedtuple('RecOut', 'im, fpet, imsmo, fsmo, affine')
        recout = RecOut(im, fpet, im_smo, fsmo, B)

    return recout
Пример #7
0
def osemone(datain,
            mumaps,
            hst,
            scanner_params,
            recmod=3,
            itr=4,
            fwhm=0.,
            mask_radius=29.,
            sctsino=np.array([]),
            outpath='',
            store_img=False,
            frmno='',
            fcomment='',
            store_itr=[],
            emmskS=False,
            ret_sinos=False,
            attnsino=None,
            randsino=None,
            normcomp=None):

    #---------- sort out OUTPUT ------------
    #-output file name for the reconstructed image, initially assume n/a
    fout = 'n/a'
    if store_img or store_itr:
        if outpath == '':
            opth = os.path.join(datain['corepath'], 'reconstructed')
        else:
            opth = outpath
        mmraux.create_dir(opth)

    if ret_sinos:
        return_ssrb = True
        return_mask = True
    else:
        return_ssrb = False
        return_mask = False

    #----------

    # Get particular scanner parameters: Constants, transaxial and axial LUTs
    Cnt = scanner_params['Cnt']
    txLUT = scanner_params['txLUT']
    axLUT = scanner_params['axLUT']

    import time
    from niftypet import nipet
    # from niftypet.nipet.sct import mmrsct
    # from niftypet.nipet.prj import mmrhist

    if Cnt['VERBOSE']: print 'i> reconstruction in mode', recmod

    # get object and hardware mu-maps
    muh, muo = mumaps

    # get the GPU version of the image dims
    mus = mmrimg.convert2dev(muo + muh, Cnt)

    if Cnt['SPN'] == 1:
        snno = Cnt['NSN1']
    elif Cnt['SPN'] == 11:
        snno = Cnt['NSN11']

    # remove gaps from the prompt sino
    psng = mmraux.remgaps(hst['psino'], txLUT, Cnt)

    #=========================================================================
    # GET NORM
    #-------------------------------------------------------------------------
    if normcomp == None:
        ncmp, _ = mmrnorm.get_components(datain, Cnt)
    else:
        ncmp = normcomp
        print 'w> using user-defined normalisation components'
    nsng = mmrnorm.get_sinog(datain, hst, axLUT, txLUT, Cnt, normcomp=ncmp)
    #=========================================================================

    #=========================================================================
    # ATTENUATION FACTORS FOR COMBINED OBJECT AND BED MU-MAP
    #-------------------------------------------------------------------------
    #> combine attenuation and norm together depending on reconstruction mode
    if recmod == 0:
        asng = np.ones(psng.shape, dtype=np.float32)
    else:
        #> check if the attenuation sino is given as an array
        if isinstance(attnsino, np.ndarray) \
                and attnsino.shape==(Cnt['NSN11'], Cnt['NSANGLES'], Cnt['NSBINS']):
            asng = mmraux.remgaps(attnsino, txLUT, Cnt)
            print 'i> using provided attenuation factor sinogram'
        elif isinstance(attnsino, np.ndarray) \
                and attnsino.shape==(Cnt['Naw'], Cnt['NSN11']):
            asng = attnsino
            print 'i> using provided attenuation factor sinogram'
        else:
            asng = np.zeros(psng.shape, dtype=np.float32)
            petprj.fprj(asng, mus, txLUT, axLUT,
                        np.array([-1], dtype=np.int32), Cnt, 1)
    #> combine attenuation and normalisation
    ansng = asng * nsng
    #=========================================================================

    #=========================================================================
    # Randoms
    #-------------------------------------------------------------------------
    if isinstance(randsino, np.ndarray):
        rsino = randsino
        rsng = mmraux.remgaps(randsino, txLUT, Cnt)
    else:
        rsino, snglmap = nipet.randoms(hst, scanner_params)
        rsng = mmraux.remgaps(rsino, txLUT, Cnt)
    #=========================================================================

    #=========================================================================
    # SCAT
    #-------------------------------------------------------------------------
    if recmod == 2:
        if sctsino.size > 0:
            ssng = mmraux.remgaps(sctsino, txLUT, Cnt)
        elif sctsino.size == 0 and os.path.isfile(datain['em_crr']):
            emd = nimpa.getnii(datain['em_crr'])
            ssn = nipet.vsm(datain,
                            mumaps,
                            emd['im'],
                            hst,
                            rsino,
                            scanner_params,
                            prcnt_scl=0.1,
                            emmsk=False)
            ssng = mmraux.remgaps(ssn, txLUT, Cnt)
        else:
            print 'e> no emission image available for scatter estimation!  check if it' 's present or the path is correct.'
            sys.exit()
    else:
        ssng = np.zeros(rsng.shape, dtype=rsng.dtype)
    #=========================================================================

    if Cnt['VERBOSE']:
        print '\n>------ OSEM (', itr, ') -------\n'
    #------------------------------------
    Sn = 14  # number of subsets
    #-get one subset to get number of projection bins in a subset
    Sprj, s = get_subsets14(0, scanner_params)
    Nprj = len(Sprj)
    #-init subset array and sensitivity image for a given subset
    sinoTIdx = np.zeros((Sn, Nprj + 1), dtype=np.int32)
    #-init sensitivity images for each subset
    imgsens = np.zeros((Sn, Cnt['SZ_IMY'], Cnt['SZ_IMX'], Cnt['SZ_IMZ']),
                       dtype=np.float32)
    for n in range(Sn):
        sinoTIdx[n, 0] = Nprj  #first number of projection for the given subset
        sinoTIdx[n, 1:], s = get_subsets14(n, scanner_params)
        # sensitivity image
        petprj.bprj(imgsens[n, :, :, :], ansng[sinoTIdx[n, 1:], :], txLUT,
                    axLUT, sinoTIdx[n, 1:], Cnt)
    #-------------------------------------

    #-mask for reconstructed image.  anything outside it is set to zero
    msk = mmrimg.get_cylinder(
        Cnt, rad=mask_radius, xo=0, yo=0, unival=1, gpu_dim=True) > 0.9

    #-init image
    img = np.ones((Cnt['SZ_IMY'], Cnt['SZ_IMX'], Cnt['SZ_IMZ']),
                  dtype=np.float32)

    #-decay correction
    lmbd = np.log(2) / resources.riLUT[Cnt['ISOTOPE']]['thalf']
    if Cnt['DCYCRR'] and 't0' in hst and 'dur' in hst:
        dcycrr = np.exp(lmbd * hst['t0']) * lmbd * hst['dur'] / (
            1 - np.exp(-lmbd * hst['dur']))
        # apply quantitative correction to the image
        qf = ncmp['qf'] / resources.riLUT[Cnt['ISOTOPE']]['BF'] / float(
            hst['dur'])
        qf_loc = ncmp['qf_loc']
    elif not Cnt['DCYCRR'] and 't0' in hst and 'dur' in hst:
        dcycrr = 1.
        # apply quantitative correction to the image
        qf = ncmp['qf'] / resources.riLUT[Cnt['ISOTOPE']]['BF'] / float(
            hst['dur'])
        qf_loc = ncmp['qf_loc']
    else:
        dcycrr = 1.
        qf = 1.
        qf_loc = 1.

    #-affine matrix for the reconstructed images
    B = mmrimg.image_affine(datain, Cnt)

    #-time it
    stime = time.time()

    # import pdb; pdb.set_trace()

    #=========================================================================
    # OSEM RECONSTRUCTION
    #-------------------------------------------------------------------------
    for k in trange(itr, disable=not Cnt['VERBOSE'], desc="OSEM"):
        petprj.osem(img, msk, psng, rsng, ssng, nsng, asng, imgsens, txLUT,
                    axLUT, sinoTIdx, Cnt)
        if np.nansum(img) < 0.1:
            print '---------------------------------------------------------------------'
            print 'w> it seems there is not enough true data to render reasonable image.'
            print '---------------------------------------------------------------------'
            #img[:]=0
            itr = k
            break
        if recmod >= 3 and (((k < itr - 1) and (itr > 1))):  # or (itr==1)
            sct_time = time.time()

            sct = nipet.vsm(datain,
                            mumaps,
                            mmrimg.convert2e7(img, Cnt),
                            hst,
                            rsino,
                            scanner_params,
                            emmsk=emmskS,
                            return_ssrb=return_ssrb,
                            return_mask=return_mask)

            if isinstance(sct, dict):
                ssn = sct['sino']
            else:
                ssn = sct

            ssng = mmraux.remgaps(ssn, txLUT, Cnt)

            if Cnt['VERBOSE']:
                print 'i> scatter time:', (time.time() - sct_time)

        # save images during reconstruction if requested
        if store_itr and k in store_itr:
            im = mmrimg.convert2e7(img * (dcycrr * qf * qf_loc), Cnt)
            fout =  os.path.join(opth, os.path.basename(datain['lm_bf'])[:8] \
                + frmno +'_t'+str(hst['t0'])+'-'+str(hst['t1'])+'sec' \
                +'_itr'+str(k)+fcomment+'_inrecon.nii.gz')
            nimpa.array2nii(im[::-1, ::-1, :], B, fout)

    if Cnt['VERBOSE']: print 'i> recon time:', (time.time() - stime)
    #=========================================================================

    if Cnt['VERBOSE']:
        print 'i> applying decay correction of', dcycrr
        print 'i> applying quantification factor', qf, 'to the whole image for the frame duration of :', hst[
            'dur']

    img *= dcycrr * qf * qf_loc  #additional factor for making it quantitative in absolute terms (derived from measurements)

    #---- save images -----
    #-first convert to standard mMR image size
    im = mmrimg.convert2e7(img, Cnt)

    #-description text to NIfTI
    #-attenuation number: if only bed present then it is 0.5
    attnum = (1 * (np.sum(muh) > 0.5) + 1 * (np.sum(muo) > 0.5)) / 2.
    descrip =   'alg=osem'+ \
                ';sub=14'+ \
                ';att='+str(attnum*(recmod>0))+ \
                ';sct='+str(1*(recmod>1))+ \
                ';spn='+str(Cnt['SPN'])+ \
                ';itr='+str(itr) +\
                ';fwhm='+str(fwhm) +\
                ';t0='+str(hst['t0']) +\
                ';t1='+str(hst['t1']) +\
                ';dur='+str(hst['dur']) +\
                ';qf='+str(qf)

    if fwhm > 0:
        im = ndi.filters.gaussian_filter(im,
                                         fwhm2sig(fwhm, Cnt),
                                         mode='mirror')
    if store_img:
        fout =  os.path.join(opth, os.path.basename(datain['lm_bf'])[:8] \
                + frmno +'_t'+str(hst['t0'])+'-'+str(hst['t1'])+'sec' \
                +'_itr'+str(itr)+fcomment+'.nii.gz')
        if Cnt['VERBOSE']: print 'i> saving image to: ', fout
        nimpa.array2nii(im[::-1, ::-1, :], B, fout, descrip=descrip)

    # returning:
    # (0) E7 image [can be smoothed];
    # (1) file name of saved E7 image
    # (2) [optional] scatter sino
    # (3) [optional] single slice rebinned scatter
    # (4) [optional] mask for scatter scaling based on attenuation data
    # (5) [optional] random sino
    # if ret_sinos and recmod>=3:
    #     recout = namedtuple('recout', 'im, fpet, ssn, sssr, amsk, rsn')
    #     recout.im   = im
    #     recout.fpet = fout
    #     recout.ssn  = ssn
    #     recout.sssr = sssr
    #     recout.amsk = amsk
    #     recout.rsn  = rsino
    # else:
    #     recout = namedtuple('recout', 'im, fpet')
    #     recout.im   = im
    #     recout.fpet = fout
    if ret_sinos and recmod >= 3 and itr > 1:
        RecOut = namedtuple('RecOut', 'im, fpet, affine, ssn, sssr, amsk, rsn')
        recout = RecOut(im, fout, B, ssn, sct['ssrb'], sct['mask'], rsino)
    else:
        RecOut = namedtuple('RecOut', 'im, fpet, affine')
        recout = RecOut(im, fout, B)

    return recout
Пример #8
0
def pct_mumap(datain, scanner_params, hst=None, t0=0, t1=0, itr=2, petopt='ac', faff='', fpet='',
              fcomment='', outpath='', store_npy=False, store=False, verbose=False):
    '''
    GET THE MU-MAP from pCT IMAGE (which is in T1w space)
    * the mu-map will be registered to PET which will be reconstructed for time frame t0-t1
    * it f0 and t1 are not given the whole LM dataset will be reconstructed
    * the reconstructed PET can be attenuation and scatter corrected or NOT using petopt
    '''
    if hst is None:
        hst = []

    # constants, transaxial and axial LUTs are extracted
    Cnt = scanner_params['Cnt']

    if not os.path.isfile(faff):
        from niftypet.nipet.prj import mmrrec

        # histogram the list data if needed
        if not hst:
            from niftypet.nipet.lm import mmrhist
            hst = mmrhist(datain, scanner_params, t0=t0, t1=t1)

    # get hardware mu-map
    if datain.get("hmumap", "").endswith(".npz") and os.path.isfile(datain["hmumap"]):
        muh = np.load(datain["hmumap"], allow_pickle=True)["hmu"]
        (log.info if verbose else log.debug)('loaded hardware mu-map from file:\n{}'.format(
            datain['hmumap']))
    elif outpath:
        hmupath = os.path.join(outpath, "mumap-hdw", "hmumap.npz")
        if os.path.isfile(hmupath):
            muh = np.load(hmupath, allow_pickle=True)["hmu"]
            datain['hmumap'] = hmupath
        else:
            raise IOError('Invalid path to the hardware mu-map')
    else:
        log.error('The hardware mu-map is required first.')
        raise IOError('Could not find the hardware mu-map!')

    if not {'MRT1W#', 'T1nii', 'T1bc'}.intersection(datain):
        log.error('no MR T1w images required for co-registration!')
        raise IOError('Missing MR data')
    # ----------------------------------

    # output dictionary
    mu_dct = {}
    if not os.path.isfile(faff):
        # first recon pet to get the T1 aligned to it
        if petopt == 'qnt':
            # ---------------------------------------------
            # OPTION 1 (quantitative recon with all corrections using MR-based mu-map)
            # get UTE object mu-map (may not be in register with the PET data)
            mudic = obj_mumap(datain, Cnt)
            muo = mudic['im']
            # reconstruct PET image with UTE mu-map to which co-register T1w
            recout = mmrrec.osemone(datain, [muh, muo], hst, scanner_params, recmod=3, itr=itr,
                                    fwhm=0., fcomment=fcomment + '_qntUTE',
                                    outpath=os.path.join(outpath, 'PET',
                                                         'positioning'), store_img=True)
        elif petopt == 'nac':
            # ---------------------------------------------
            # OPTION 2 (recon without any corrections for scatter and attenuation)
            # reconstruct PET image with UTE mu-map to which co-register T1w
            muo = np.zeros(muh.shape, dtype=muh.dtype)
            recout = mmrrec.osemone(datain, [muh, muo], hst, scanner_params, recmod=1, itr=itr,
                                    fwhm=0., fcomment=fcomment + '_NAC',
                                    outpath=os.path.join(outpath, 'PET',
                                                         'positioning'), store_img=True)
        elif petopt == 'ac':
            # ---------------------------------------------
            # OPTION 3 (recon with attenuation correction only but no scatter)
            # reconstruct PET image with UTE mu-map to which co-register T1w
            mudic = obj_mumap(datain, Cnt, outpath=outpath)
            muo = mudic['im']
            recout = mmrrec.osemone(datain, [muh, muo], hst, scanner_params, recmod=1, itr=itr,
                                    fwhm=0., fcomment=fcomment + '_AC',
                                    outpath=os.path.join(outpath, 'PET',
                                                         'positioning'), store_img=True)

        fpet = recout.fpet
        mu_dct['fpet'] = fpet

        # ------------------------------
        # get the affine transformation
        ft1w = nimpa.pick_t1w(datain)
        try:
            regdct = nimpa.coreg_spm(fpet, ft1w,
                                     outpath=os.path.join(outpath, 'PET', 'positioning'))
        except Exception:
            regdct = nimpa.affine_niftyreg(
                fpet,
                ft1w,
                outpath=os.path.join(outpath, 'PET', 'positioning'), # pcomment=fcomment,
                executable=Cnt['REGPATH'],
                omp=multiprocessing.cpu_count() / 2,
                rigOnly=True,
                affDirect=False,
                maxit=5,
                speed=True,
                pi=50,
                pv=50,
                smof=0,
                smor=0,
                rmsk=True,
                fmsk=True,
                rfwhm=15.,                                           # pillilitres
                rthrsh=0.05,
                ffwhm=15.,                                           # pillilitres
                fthrsh=0.05,
                verbose=verbose)

        faff = regdct['faff']
        # ------------------------------

    # pCT file name
    if outpath == '':
        pctdir = os.path.dirname(datain['pCT'])
    else:
        pctdir = os.path.join(outpath, 'mumap-obj')
    mmraux.create_dir(pctdir)
    fpct = os.path.join(pctdir, 'pCT_r_tmp' + fcomment + '.nii.gz')

    # > call the resampling routine to get the pCT in place
    if os.path.isfile(Cnt['RESPATH']):
        cmd = [
            Cnt['RESPATH'], '-ref', fpet, '-flo', datain['pCT'], '-trans', faff, '-res', fpct,
            '-pad', '0']
        if log.getEffectiveLevel() > logging.INFO:
            cmd.append('-voff')
        run(cmd)
    else:
        log.error('path to resampling executable is incorrect!')
        raise IOError('Incorrect path to executable!')

    # get the NIfTI of the pCT
    nim = nib.load(fpct)
    A = nim.get_sform()
    pct = nim.get_fdata(dtype=np.float32)
    pct = pct[:, ::-1, ::-1]
    pct = np.transpose(pct, (2, 1, 0))
    # convert the HU units to mu-values
    mu = hu2mu(pct)
    # get rid of negatives
    mu[mu < 0] = 0

    # return image dictionary with the image itself and other parameters
    mu_dct['im'] = mu
    mu_dct['affine'] = A
    mu_dct['faff'] = faff

    if store:
        # now save to numpy array and NIfTI in this folder
        if outpath == '':
            pctumapdir = os.path.join(datain['corepath'], 'mumap-obj')
        else:
            pctumapdir = os.path.join(outpath, 'mumap-obj')
        mmraux.create_dir(pctumapdir)
        # > Numpy
        if store_npy:
            fnp = os.path.join(pctumapdir, "mumap-pCT.npz")
            np.savez(fnp, mu=mu, A=A)

        # > NIfTI
        fmu = os.path.join(pctumapdir, 'mumap-pCT' + fcomment + '.nii.gz')
        nimpa.array2nii(mu[::-1, ::-1, :], A, fmu)
        mu_dct['fim'] = fmu
        datain['mumapCT'] = fmu

    return mu_dct
Пример #9
0
def align_mumap(
    datain,
    scanner_params=None,
    outpath='',
    reg_tool='niftyreg',
    use_stored=False,
    hst=None,
    t0=0,
    t1=0,
    itr=2,
    faff='',
    fpet='',
    fcomment='',
    store=False,
    store_npy=False,
    petopt='ac',
    musrc='ute',         # another option is pct for mu-map source
    ute_name='UTE2',
    del_auxilary=True,
    verbose=False,
):
    '''
    Align the a pCT or MR-derived mu-map to a PET image reconstructed to chosen
    specifications (e.g., with/without attenuation and scatter corrections)

    use_sotred only works if hst or t0/t1 given but not when faff.
    '''
    if scanner_params is None:
        scanner_params = {}

    # > output folder
    if outpath == '':
        opth = os.path.join(datain['corepath'], 'mumap-obj')
    else:
        opth = os.path.join(outpath, 'mumap-obj')

    # > create the folder, if not existent
    nimpa.create_dir(opth)

    # > get the timing of PET if affine not given
    if faff == '' and hst is not None and isinstance(hst, dict) and 't0' in hst:
        t0 = hst['t0']
        t1 = hst['t1']

    # > file name for the output mu-map
    fnm = 'mumap-' + musrc.upper()

    # > output dictionary
    mu_dct = {}

    # ---------------------------------------------------------------------------
    # > used stored if requested
    if use_stored:
        fmu_stored = fnm + '-aligned-to_t'\
                     + str(t0)+'-'+str(t1)+'_'+petopt.upper()\
                     + fcomment
        fmupath = os.path.join(opth, fmu_stored + '.nii.gz')

        if os.path.isfile(fmupath):
            mudct_stored = nimpa.getnii(fmupath, output='all')
            # > create output dictionary
            mu_dct['im'] = mudct_stored['im']
            mu_dct['affine'] = mudct_stored['affine']
            # pu_dct['faff'] = faff
            return mu_dct
    # ---------------------------------------------------------------------------

    # > tmp folder for not aligned mu-maps
    tmpdir = os.path.join(opth, 'tmp')
    nimpa.create_dir(tmpdir)

    # > three ways of passing scanner constants <Cnt> are here decoded
    if 'Cnt' in scanner_params:
        Cnt = scanner_params['Cnt']
    elif 'SO_IMZ' in scanner_params:
        Cnt = scanner_params
    else:
        Cnt = rs.get_mmr_constants()

    # > if affine not provided histogram the LM data for recon and registration
    if not os.path.isfile(faff):
        from niftypet.nipet.prj import mmrrec

        # -histogram the list data if needed
        if hst is None:
            from niftypet.nipet import mmrhist
            if 'txLUT' in scanner_params:
                hst = mmrhist(datain, scanner_params, t0=t0, t1=t1)
            else:
                raise ValueError('Full scanner are parameters not provided\
                     but are required for histogramming.')

    # ========================================================
    # -get hardware mu-map
    if 'hmumap' in datain and os.path.isfile(datain['hmumap']):
        muh = np.load(datain['hmumap'], allow_pickle=True)["hmu"]
        (log.info if verbose else log.debug)('loaded hardware mu-map from file:\n{}'.format(
            datain['hmumap']))
    elif outpath != '':
        hmupath = os.path.join(outpath, "mumap-hdw", "hmumap.npz")
        if os.path.isfile(hmupath):
            muh = np.load(hmupath, allow_pickle=True)["hmu"]
            datain["hmumap"] = hmupath
        else:
            raise IOError('Invalid path to the hardware mu-map')
    else:
        log.error('the hardware mu-map is required first.')
        raise IOError('Could not find the hardware mu-map!')
    # ========================================================
    # -check if T1w image is available
    if not {'MRT1W#', 'T1nii', 'T1bc', 'T1N4'}.intersection(datain):
        log.error('no MR T1w images required for co-registration!')
        raise IOError('T1w image could not be obtained!')
    # ========================================================

    # -if the affine is not given,
    # -it will be generated by reconstructing PET image, with some or no corrections
    if not os.path.isfile(faff):
        # first recon pet to get the T1 aligned to it
        if petopt == 'qnt':
            # ---------------------------------------------
            # OPTION 1 (quantitative recon with all corrections using MR-based mu-map)
            # get UTE object mu-map (may not be in register with the PET data)
            mudic = obj_mumap(datain, Cnt, outpath=tmpdir, del_auxilary=del_auxilary)
            muo = mudic['im']
            # reconstruct PET image with UTE mu-map to which co-register T1w
            recout = mmrrec.osemone(datain, [muh, muo], hst, scanner_params, recmod=3, itr=itr,
                                    fwhm=0., fcomment=fcomment + '_QNT-UTE',
                                    outpath=os.path.join(outpath, 'PET',
                                                         'positioning'), store_img=True)
        elif petopt == 'nac':
            # ---------------------------------------------
            # OPTION 2 (recon without any corrections for scatter and attenuation)
            # reconstruct PET image with UTE mu-map to which co-register T1w
            muo = np.zeros(muh.shape, dtype=muh.dtype)
            recout = mmrrec.osemone(datain, [muh, muo], hst, scanner_params, recmod=1, itr=itr,
                                    fwhm=0., fcomment=fcomment + '_NAC',
                                    outpath=os.path.join(outpath, 'PET',
                                                         'positioning'), store_img=True)
        elif petopt == 'ac':
            # ---------------------------------------------
            # OPTION 3 (recon with attenuation correction only but no scatter)
            # reconstruct PET image with UTE mu-map to which co-register T1w
            mudic = obj_mumap(datain, Cnt, outpath=tmpdir, del_auxilary=del_auxilary)
            muo = mudic['im']

            recout = mmrrec.osemone(datain, [muh, muo], hst, scanner_params, recmod=1, itr=itr,
                                    fwhm=0., fcomment=fcomment + '_AC-UTE',
                                    outpath=os.path.join(outpath, 'PET',
                                                         'positioning'), store_img=True)

        fpet = recout.fpet
        mu_dct['fpet'] = fpet

        # ------------------------------
        if musrc == 'ute' and ute_name in datain and os.path.exists(datain[ute_name]):
            # change to NIfTI if the UTE sequence is in DICOM files (folder)
            if os.path.isdir(datain[ute_name]):
                fnew = os.path.basename(datain[ute_name])
                run([Cnt['DCM2NIIX'], '-f', fnew, datain[ute_name]])
                fute = glob.glob(os.path.join(datain[ute_name], fnew + '*nii*'))[0]
            elif os.path.isfile(datain[ute_name]):
                fute = datain[ute_name]

            # get the affine transformation
            if reg_tool == 'spm':
                regdct = nimpa.coreg_spm(fpet, fute,
                                         outpath=os.path.join(outpath, 'PET', 'positioning'))
            elif reg_tool == 'niftyreg':
                regdct = nimpa.affine_niftyreg(
                    fpet,
                    fute,
                    outpath=os.path.join(outpath, 'PET', 'positioning'),
                    executable=Cnt['REGPATH'],
                    omp=multiprocessing.cpu_count() / 2,                 # pcomment=fcomment,
                    rigOnly=True,
                    affDirect=False,
                    maxit=5,
                    speed=True,
                    pi=50,
                    pv=50,
                    smof=0,
                    smor=0,
                    rmsk=True,
                    fmsk=True,
                    rfwhm=15.,                                           # pillilitres
                    rthrsh=0.05,
                    ffwhm=15.,                                           # pillilitres
                    fthrsh=0.05,
                    verbose=verbose)
            else:
                raise ValueError('unknown registration tool requested')

            faff_mrpet = regdct['faff']

        elif musrc == 'pct':

            ft1w = nimpa.pick_t1w(datain)

            if reg_tool == 'spm':
                regdct = nimpa.coreg_spm(fpet, ft1w,
                                         outpath=os.path.join(outpath, 'PET', 'positioning'))
            elif reg_tool == 'niftyreg':
                regdct = nimpa.affine_niftyreg(
                    fpet,
                    ft1w,
                    outpath=os.path.join(outpath, 'PET', 'positioning'),
                    executable=Cnt['REGPATH'],
                    omp=multiprocessing.cpu_count() / 2,
                    rigOnly=True,
                    affDirect=False,
                    maxit=5,
                    speed=True,
                    pi=50,
                    pv=50,
                    smof=0,
                    smor=0,
                    rmsk=True,
                    fmsk=True,
                    rfwhm=15.,                                           # pillilitres
                    rthrsh=0.05,
                    ffwhm=15.,                                           # pillilitres
                    fthrsh=0.05,
                    verbose=verbose)
            else:
                raise ValueError('unknown registration tool requested')

            faff_mrpet = regdct['faff']

        else:
            raise IOError('Floating MR image not provided or is invalid.')

    else:
        faff_mrpet = faff
        regdct = {}
        if not os.path.isfile(fpet):
            raise IOError('e> the reference PET should be supplied with the affine.')

    # > output file name for the aligned mu-maps
    if musrc == 'pct':

        # > convert to mu-values before resampling to avoid artefacts with negative values
        nii = nib.load(datain['pCT'])
        img = nii.get_fdata(dtype=np.float32)
        img_mu = hu2mu(img)
        nii_mu = nib.Nifti1Image(img_mu, nii.affine)
        fflo = os.path.join(tmpdir, 'pct2mu-not-aligned.nii.gz')
        nib.save(nii_mu, fflo)

        freg = os.path.join(opth, 'pct2mu-aligned-' + fcomment + '.nii.gz')

    elif musrc == 'ute':
        freg = os.path.join(opth, 'UTE-res-tmp' + fcomment + '.nii.gz')
        if 'UTE' not in datain:
            fnii = 'converted-from-DICOM_'
            tstmp = nimpa.time_stamp(simple_ascii=True)
            # convert the DICOM mu-map images to nii
            if 'mumapDCM' not in datain:
                raise IOError('DICOM with the UTE mu-map are not given.')
            run([Cnt['DCM2NIIX'], '-f', fnii + tstmp, '-o', opth, datain['mumapDCM']])
            # piles for the T1w, pick one:
            fflo = glob.glob(os.path.join(opth, '*' + fnii + tstmp + '*.nii*'))[0]
        else:
            if os.path.isfile(datain['UTE']):
                fflo = datain['UTE']
            else:
                raise IOError('The provided NIfTI UTE path is not valid.')

    # > call the resampling routine to get the pCT/UTE in place
    if reg_tool == "spm":
        nimpa.resample_spm(fpet, fflo, faff_mrpet, fimout=freg, del_ref_uncmpr=True,
                           del_flo_uncmpr=True, del_out_uncmpr=True)
    else:
        nimpa.resample_niftyreg(fpet, fflo, faff_mrpet, fimout=freg, executable=Cnt['RESPATH'],
                                verbose=verbose)

    # -get the NIfTI of registered image
    nim = nib.load(freg)
    A = nim.affine
    imreg = nim.get_fdata(dtype=np.float32)
    imreg = imreg[:, ::-1, ::-1]
    imreg = np.transpose(imreg, (2, 1, 0))

    # -convert to mu-values; sort out the file name too.
    if musrc == 'pct':
        mu = imreg
    elif musrc == 'ute':
        mu = np.float32(imreg) / 1e4
        # -remove the converted file from DICOMs
        os.remove(fflo)
    else:
        raise NameError('Confused o_O')

    # > get rid of negatives and nans
    mu[mu < 0] = 0
    mu[np.isnan(mu)] = 0

    # > return image dictionary with the image itself and other parameters
    mu_dct['im'] = mu
    mu_dct['affine'] = A
    mu_dct['faff'] = faff_mrpet

    if store or store_npy:
        nimpa.create_dir(opth)
        if faff == '':
            fname = fnm + '-aligned-to_t'\
                    + str(t0)+'-'+str(t1)+'_'+petopt.upper()\
                    + fcomment
        else:
            fname = fnm + '-aligned-to-given-affine' + fcomment
    if store_npy:
        fnp = os.path.join(opth, fname + ".npz")
        np.savez(fnp, mu=mu, A=A)
    if store:
        # > NIfTI
        fmu = os.path.join(opth, fname + '.nii.gz')
        nimpa.array2nii(mu[::-1, ::-1, :], A, fmu)
        mu_dct['fim'] = fmu

    if del_auxilary:
        os.remove(freg)

        if musrc == 'ute' and not os.path.isfile(faff):
            os.remove(fute)
        shutil.rmtree(tmpdir)

    return mu_dct
Пример #10
0
def obj_mumap(
    datain,
    params=None,
    outpath='',
    comment='',
    store=False,
    store_npy=False,
    gantry_offset=True,
    del_auxilary=True,
):
    '''Get the object mu-map from DICOM images'''
    if params is None:
        params = {}

    # three ways of passing scanner constants <Cnt> are here decoded
    if 'Cnt' in params:
        Cnt = params['Cnt']
    elif 'SO_IMZ' in params:
        Cnt = params
    else:
        Cnt = rs.get_mmr_constants()

    # output folder
    if outpath == '':
        fmudir = os.path.join(datain['corepath'], 'mumap-obj')
    else:
        fmudir = os.path.join(outpath, 'mumap-obj')
    nimpa.create_dir(fmudir)

    # > ref file name
    fmuref = os.path.join(fmudir, 'muref.nii.gz')

    # > ref affine
    B = image_affine(datain, Cnt, gantry_offset=gantry_offset)

    # > ref image (blank)
    im = np.zeros((Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMX']), dtype=np.float32)

    # > store ref image
    nimpa.array2nii(im, B, fmuref)

    # check if the object dicom files for MR-based mu-map exists
    if 'mumapDCM' not in datain or not os.path.isdir(datain['mumapDCM']):
        log.error('DICOM folder for the mu-map does not exist.')
        return None

    fnii = 'converted-from-object-DICOM_'
    tstmp = nimpa.time_stamp(simple_ascii=True)

    # find residual(s) from previous runs and delete them
    resdcm = glob.glob(os.path.join(fmudir, '*' + fnii + '*.nii*'))
    for d in resdcm:
        os.remove(d)

    # convert the DICOM mu-map images to nii
    run([Cnt['DCM2NIIX'], '-f', fnii + tstmp, '-o', fmudir, datain['mumapDCM']])
    # piles for the T1w, pick one:
    fmunii = glob.glob(os.path.join(fmudir, '*' + fnii + tstmp + '*.nii*'))[0]
    # fmunii = glob.glob( os.path.join(datain['mumapDCM'], '*converted*.nii*') )
    # fmunii = fmunii[0]

    # the converted nii image resample to the reference size
    fmu = os.path.join(fmudir, comment + 'mumap_tmp.nii.gz')
    if os.path.isfile(Cnt['RESPATH']):
        cmd = [Cnt['RESPATH'], '-ref', fmuref, '-flo', fmunii, '-res', fmu, '-pad', '0']
        if log.getEffectiveLevel() > logging.INFO:
            cmd.append('-voff')
        run(cmd)
    else:
        log.error('path to resampling executable is incorrect!')
        raise IOError('Path to executable is incorrect!')

    nim = nib.load(fmu)
    # get the affine transform
    A = nim.get_sform()
    mu = nim.get_fdata(dtype=np.float32)
    mu = np.transpose(mu[:, ::-1, ::-1], (2, 1, 0))
    # convert to mu-values
    mu = np.float32(mu) / 1e4
    mu[mu < 0] = 0

    # > return image dictionary with the image itself and some other stats
    mu_dct = {'im': mu, 'affine': A}
    if not del_auxilary:
        mu_dct['fmuref'] = fmuref

    # > store the mu-map if requested
    if store_npy:
        # to numpy array
        fnp = os.path.join(fmudir, "mumap-from-DICOM.npz")
        np.savez(fnp, mu=mu, A=A)

    if store:
        # with this file name
        fmumap = os.path.join(fmudir, 'mumap-from-DICOM_no-alignment' + comment + '.nii.gz')
        nimpa.array2nii(mu[::-1, ::-1, :], A, fmumap)
        mu_dct['fim'] = fmumap

    if del_auxilary:
        os.remove(fmuref)
        os.remove(fmunii)
        os.remove(fmu)

        if [f for f in os.listdir(fmudir)
                if not f.startswith('.') and not f.endswith('.json')] == []:
            shutil.rmtree(fmudir)

    return mu_dct
Пример #11
0
def get_hmupos(datain, parts, Cnt, outpath=''):
    # check if registration executable exists
    if not os.path.isfile(Cnt['RESPATH']):
        raise IOError('No registration executable found!')
    # ----- get positions from the DICOM list-mode file -----
    ihdr, csainfo = mmraux.hdr_lm(datain, Cnt)
    # pable position origin
    fi = csainfo.find(b'TablePositionOrigin')
    tpostr = csainfo[fi:fi + 200]
    tpo = re.sub(b'[^a-zA-Z0-9.\\-]', b'', tpostr).split(b'M')
    tpozyx = np.array([float(tpo[-1]), float(tpo[-2]), float(tpo[-3])]) / 10
    log.info('table position (z,y,x) (cm): {}'.format(tpozyx))
    # --------------------------------------------------------

    # ------- get positions from the DICOM mu-map file -------
    csamu, dhdr = hdr_mu(datain, Cnt)
    # > get the indices where the table offset may reside:
    idxs = [m.start() for m in re.finditer(b'GantryTableHomeOffset(?!_)', csamu)]
    # > loop over the indices and find those which are correct
    found_off = False
    for i in idxs:
        gtostr1 = csamu[i:i + 300]
        gtostr2 = re.sub(b'[^a-zA-Z0-9.\\-]', b'', gtostr1)
        # gantry table offset, through conversion of string to float
        gtoxyz = re.findall(b'(?<=M)-*[\\d]{1,4}\\.[\\d]{6,9}', gtostr2)
        gtozyx = np.float32(gtoxyz)[::-1] / 10
        if len(gtoxyz) > 3:
            log.warning('the gantry table offset got more than 3 entries detected--check needed.')
            gtozyx = gtozyx[-3:]
        if abs(gtozyx[0]) > 20 and abs(gtozyx[1]) < 20 and abs(gtozyx[2]) < 2:
            found_off = True
            break

    if found_off:
        log.info('gantry table offset (z,y,x) (cm): {}'.format(gtozyx))
    else:
        raise ValueError('Could not find the gantry table offset or the offset is unusual.')
    # --------------------------------------------------------

    # create the folder for hardware mu-maps
    if outpath == '':
        dirhmu = os.path.join(datain['corepath'], 'mumap-hdw')
    else:
        dirhmu = os.path.join(outpath, 'mumap-hdw')
    mmraux.create_dir(dirhmu)
    # get the reference nii image
    fref = os.path.join(dirhmu, 'hmuref.nii.gz')

    # ptart horizontal bed position
    p = re.compile(r'start horizontal bed position.*\d{1,3}\.*\d*')
    m = p.search(ihdr)
    fi = ihdr[m.start():m.end()].find('=')
    hbedpos = 0.1 * float(ihdr[m.start() + fi + 1:m.end()])

    # ptart vertical bed position
    p = re.compile(r'start vertical bed position.*\d{1,3}\.*\d*')
    m = p.search(ihdr)
    fi = ihdr[m.start():m.end()].find('=')
    vbedpos = 0.1 * float(ihdr[m.start() + fi + 1:m.end()])

    log.info('creating reference NIfTI image for resampling')
    B = np.diag(np.array([-10 * Cnt['SO_VXX'], 10 * Cnt['SO_VXY'], 10 * Cnt['SO_VXZ'], 1]))
    B[0, 3] = 10 * (.5 * Cnt['SO_IMX']) * Cnt['SO_VXX']
    B[1, 3] = 10 * (-.5 * Cnt['SO_IMY'] + 1) * Cnt['SO_VXY']
    B[2, 3] = 10 * ((-.5 * Cnt['SO_IMZ'] + 1) * Cnt['SO_VXZ'] + hbedpos)
    nimpa.array2nii(np.zeros((Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMX']), dtype=np.float32), B,
                    fref)

    # pefine a dictionary of all positions/offsets of hardware mu-maps
    hmupos = [None] * 5
    hmupos[0] = {
        'TabPosOrg': tpozyx, # prom DICOM of LM file
        'GanTabOff': gtozyx, # prom DICOM of mMR mu-map file
        'HBedPos': hbedpos,  # prom Interfile of LM file [cm]
        'VBedPos': vbedpos,  # prom Interfile of LM file [cm]
        'niipath': fref}

    # --------------------------------------------------------------------------
    # iteratively go through the mu-maps and add them as needed
    for i in parts:
        fh = os.path.join(Cnt['HMUDIR'], Cnt['HMULIST'][i - 1])
        # get the interfile header and binary data
        hdr, im = rd_hmu(fh)
        # pet shape, origin, offset and voxel size
        s = hmu_shape(hdr)
        im.shape = s
        # get the origin, offset and voxel size for the mu-map interfile data
        org = hmu_origin(hdr)
        off = hmu_offset(hdr)
        vs = hmu_voxsize(hdr)
        # corner voxel position for the interfile image data
        vpos = (-org * vs + off + gtozyx - tpozyx)
        # pdd to the dictionary
        hmupos[i] = {
            'vpos': vpos,
            'shape': s,   # prom interfile
            'iorg': org,  # prom interfile
            'ioff': off,  # prom interfile
            'ivs': vs,    # prom interfile
            'img': im,    # prom interfile
            'niipath': os.path.join(dirhmu, '_' + Cnt['HMULIST'][i - 1].split('.')[0] + '.nii.gz')}
        log.info('creating mu-map for: {}'.format(Cnt['HMULIST'][i - 1]))
        A = np.diag(np.append(10 * vs[::-1], 1))
        A[0, 0] *= -1
        A[0, 3] = 10 * (-vpos[2])
        A[1, 3] = -10 * ((s[1] - 1) * vs[1] + vpos[1])
        A[2, 3] = -10 * ((s[0] - 1) * vs[0] - vpos[0])
        nimpa.array2nii(im[::-1, ::-1, :], A, hmupos[i]['niipath'])

        # resample using nify.reg
        fout = os.path.join(os.path.dirname(hmupos[0]['niipath']),
                            'r' + os.path.basename(hmupos[i]['niipath']).split('.')[0] + '.nii.gz')
        cmd = [
            Cnt['RESPATH'], '-ref', hmupos[0]['niipath'], '-flo', hmupos[i]['niipath'], '-res',
            fout, '-pad', '0']
        if log.getEffectiveLevel() > logging.INFO:
            cmd.append('-voff')
        run(cmd)

    return hmupos
Пример #12
0
def get_hmupos(datain, parts, Cnt, outpath=''):

    # check if registration executable exists
    if not os.path.isfile(Cnt['RESPATH']):
        print 'e> no registration executable found!'
        sys.exit()

    #----- get positions from the DICOM list-mode file -----
    ihdr, csainfo = mmraux.hdr_lm(datain, Cnt)
    #table position origin
    fi = csainfo.find('TablePositionOrigin')
    tpostr = csainfo[fi:fi+200]
    tpo = re.sub(r'[^a-zA-Z0-9\-\.]', '', tpostr).split('M')
    tpozyx = np.array([float(tpo[-1]), float(tpo[-2]), float(tpo[-3])]) / 10
    if Cnt['VERBOSE']: print 'i> table position (z,y,x) (cm):', tpozyx
    #--------------------------------------------------------

    #------- get positions from the DICOM mu-map file -------
    csamu, dhdr = hdr_mu(datain, Cnt)
    tmp = re.search('GantryTableHomeOffset(?!_)', csamu)
    gtostr1  = csamu[ tmp.start():tmp.start()+300 ]
    gtostr2 = re.sub(r'[^a-zA-Z0-9\-\.]', '', gtostr1)
    # gantry table offset, through conversion of string to float
    gtoxyz = re.findall(r'(?<=M)-*[\d]{1,4}\.[\d]{6,9}', gtostr2)
    gtozyx = np.float32(gtoxyz)[::-1]/10
    #--------------------------------------------------------

    if Cnt['VERBOSE']: print 'i> gantry table offset (z,y,x) (cm):', gtozyx

    ## ----
    ## old II
    # csamu, dhdr = nipet.img.mmrimg.hdr_mu(datain, Cnt)
    # tmp = re.search('GantryTableHomeOffset(?!_)', csamu)
    # gtostr = csamu[ tmp.start():tmp.start()+300 ]
    # gto = re.sub(r'[^a-zA-Z0-9\-\.]', '', gtostr).split('M')
    # # get the first three numbers
    # zyx = np.zeros(3, dtype=np.float32)
    # c = 0
    # for i in range(len(gto)):
    #     if re.search(r'[\d]{1,3}\.[\d]{6}', gto[i])!=None and c<3:
    #         zyx[c] = np.float32(re.sub(r'[^0-9\-\.]', '', gto[i]))
    #         c+=1
    # #gantry table offset
    # gtozyx = zyx[::-1]/10
    ## ----

    ## ----
    ## old I: only worked for syngo MR B20P
    # fi = csamu.find('GantryTableHomeOffset')
    # gtostr =csamu[fi:fi+300]
    # if dhdr[0x0018, 0x1020].value == 'syngo MR B20P':
    #     gto = re.sub(r'[^a-zA-Z0-9\-\.]', '', gtostr).split('M')
    #     # get the first three numbers
    #     zyx = np.zeros(3, dtype=np.float32)
    #     c = 0
    #     for i in range(len(gto)):
    #         if re.search(r'[\d]', gto[i])!=None and c<3:
    #             zyx[c] = np.float32(re.sub(r'[^0-9\-\.]', '', gto[i]))
    #             c+=1
    #     #gantry table offset
    #     gtozyx = zyx[::-1]/10
    #     if Cnt['VERBOSE']: print 'i> gantry table offset (z,y,x) (cm):', gtozyx
    # # older scanner version
    # elif dhdr[0x0018, 0x1020].value == 'syngo MR B18P':
    #     zyx = np.zeros(3, dtype=np.float32)
    #     for k in range(3):
    #         tmp = re.search(r'\{\s*[\-0-9.]*\s*\}', gtostr)
    #         i0 = tmp.start()
    #         i1 = tmp.end()
    #         if gtostr[i0+1:i1-1]!=' ':  zyx[k] = np.float32(gtostr[i0+1:i1-1])
    #         gtostr = gtostr[i1:]
    #     #gantry table offset
    #     gtozyx = zyx[::-1]/10
    #     if Cnt['VERBOSE']: print 'i> gantry table offset (z,y,x) (cm):', gtozyx
    ## -----

    # create the folder for hardware mu-maps
    if outpath=='':
        dirhmu = os.path.join( datain['corepath'], 'mumap-hdw')
    else:
        dirhmu = os.path.join( outpath, 'mumap-hdw')
    mmraux.create_dir(dirhmu)
    # get the reference nii image
    fref = os.path.join(dirhmu, 'hmuref.nii.gz')

    #start horizontal bed position
    p = re.compile(r'start horizontal bed position.*\d{1,3}\.*\d*')
    m = p.search(ihdr)
    fi = ihdr[m.start():m.end()].find('=')
    hbedpos = 0.1*float(ihdr[m.start()+fi+1:m.end()])

    #start vertical bed position
    p = re.compile(r'start vertical bed position.*\d{1,3}\.*\d*')
    m = p.search(ihdr)
    fi = ihdr[m.start():m.end()].find('=')
    vbedpos = 0.1*float(ihdr[m.start()+fi+1:m.end()])

    if Cnt['VERBOSE']: print 'i> creating reference nii image for resampling'
    B = np.diag(np.array([-10*Cnt['SO_VXX'], 10*Cnt['SO_VXY'], 10*Cnt['SO_VXZ'], 1]))
    B[0,3] = 10*(.5*Cnt['SO_IMX'])*Cnt['SO_VXX']
    B[1,3] = 10*( -.5*Cnt['SO_IMY']+1)*Cnt['SO_VXY']
    B[2,3] = 10*((-.5*Cnt['SO_IMZ']+1)*Cnt['SO_VXZ'] + hbedpos )
    nimpa.array2nii(  np.zeros((Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMX']), dtype=np.float32), B, fref)

    #define a dictionary of all positions/offsets of hardware mu-maps
    hmupos = [None]*5
    hmupos[0] = {
        'TabPosOrg' :   tpozyx, #from DICOM of LM file
        'GanTabOff' :   gtozyx, #from DICOM of mMR mu-map file
        'HBedPos'   :   hbedpos, #from Interfile of LM file [cm]
        'VBedPos'   :   vbedpos, #from Interfile of LM file [cm]
        'niipath'   :   fref
        }

    #--------------------------------------------------------------------------
    # iteratively go through the mu-maps and add them as needed
    for i in parts:
        fh = os.path.join(Cnt['HMUDIR'], Cnt['HMULIST'][i-1])
        # get the interfile header and binary data
        hdr, im = rd_hmu(fh)
        #get shape, origin, offset and voxel size
        s = hmu_shape(hdr)
        im.shape = s
        # get the origin, offset and voxel size for the mu-map interfile data
        org = hmu_origin(hdr)
        off = hmu_offset(hdr)
        vs  = hmu_voxsize(hdr)
        # corner voxel position for the interfile image data
        vpos = (-org*vs + off + gtozyx - tpozyx)
        #add to the dictionary
        hmupos[i] = {
            'vpos'    :   vpos,
            'shape'   :   s,   #from interfile
            'iorg'    :   org, #from interfile
            'ioff'    :   off, #from interfile
            'ivs'     :   vs,  #from interfile
            'img'     :   im, #from interfile
            'niipath' :   os.path.join(dirhmu, '_'+Cnt['HMULIST'][i-1].split('.')[0]+'.nii.gz')
        }
        #save to NIfTI
        if Cnt['VERBOSE']: print 'i> creating mu-map for:', Cnt['HMULIST'][i-1]
        A = np.diag(np.append(10*vs[::-1], 1))
        A[0,0] *= -1
        A[0,3] =  10*(-vpos[2])
        A[1,3] = -10*((s[1]-1)*vs[1] + vpos[1])
        A[2,3] = -10*((s[0]-1)*vs[0] - vpos[0])
        nimpa.array2nii(im[::-1,::-1,:], A, hmupos[i]['niipath'])

        # resample using nify.reg
        fout = os.path.join(    os.path.dirname (hmupos[0]['niipath']),
                                'r'+os.path.basename(hmupos[i]['niipath']).split('.')[0]+'.nii.gz' )
        cmd = [ Cnt['RESPATH'],
                '-ref', hmupos[0]['niipath'],
                '-flo', hmupos[i]['niipath'],
                '-res', fout,
                '-pad', '0']
        if not Cnt['VERBOSE']: cmd.append('-voff')
        call(cmd)

    return hmupos