Пример #1
0
def obtain_image(img, Cnt=[], imtype='', verbose=False):
    ''' 
    Obtain the image (hardware or object mu-map) from file,
    numpy array, dictionary or empty list (assuming blank then).
    The image has to have the dimensions of the PET image used as in Cnt['SO_IM[X-Z]'].
    '''

    if Cnt: verbose = Cnt['VERBOSE']
    # establishing what and if the image object has been provided
    # all findings go to the output dictionary
    output = {}
    if isinstance(img, dict):
        if Cnt and img['im'].shape != (Cnt['SO_IMZ'], Cnt['SO_IMY'],
                                       Cnt['SO_IMX']):
            print 'e> provided ' + imtype + ' via the dictionary has inconsistent dimensions compared to Cnt.'
            raise ValueError('Wrong dimensions of the mu-map')
        else:
            output['im'] = img['im']
            output['exists'] = True
            output['fim'] = img['fim']
            if 'faff' in img: output['faff'] = img['faff']
            if 'fmuref' in img: output['fmuref'] = img['fmuref']
            if 'affine' in img: output['affine'] = img['affine']
            if verbose: print 'i> using ' + imtype + ' from dictionary.'

    elif isinstance(img, (np.ndarray, np.generic)):
        if Cnt and img.shape != (Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMX']):
            print 'e> provided ' + imtype + ' via the numpy array has inconsistent dimensions compared to Cnt.'
            raise ValueError('Wrong dimensions of the mu-map')
        else:
            output['im'] = img
            output['exists'] = True
            output['fim'] = ''
            if verbose: print 'i> using hardware mu-map from numpy array.'

    elif isinstance(img, basestring):
        if os.path.isfile(img):
            imdct = nimpa.getnii(img, output='all')
            output['im'] = imdct['im']
            output['affine'] = imdct['affine']
            if Cnt and output['im'].shape != (Cnt['SO_IMZ'], Cnt['SO_IMY'],
                                              Cnt['SO_IMX']):
                print 'e> provided ' + imtype + ' via file has inconsistent dimensions compared to Cnt.'
                raise ValueError('Wrong dimensions of the mu-map')
            else:
                output['exists'] = True
                output['fim'] = img
                if verbose: print 'i> using ' + imtype + ' from NIfTI file.'
        else:
            print 'e> provided ' + imtype + ' path is invalid.'
            return None
    elif isinstance(img, list):
        output['im'] = np.zeros((Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMX']),
                                dtype=np.float32)
        if verbose:
            print 'w> ' + imtype + ' has not been provided -> using blank.'
        output['fim'] = ''
        output['exists'] = False
    #------------------------------------------------------------------------
    return output
Пример #2
0
def mmrchain(
    datain,  # all input data in a dictionary
    scanner_params,  # all scanner parameters in one dictionary
    # containing constants, transaxial and axial
    # LUTs.
    outpath='',  # output path for results
    frames=['fluid', [0, 0]],  # definition of time frames.
    mu_h=[],  # hardware mu-map.
    mu_o=[],  # object mu-map.
    tAffine=[],  # affine transformations for the mu-map for
    # each time frame separately.
    itr=4,  # number of OSEM iterations
    fwhm=0.,  # Gaussian Smoothing FWHM
    recmod=-1,  # reconstruction mode: -1: undefined, chosen
    # automatically. 3: attenuation and scatter
    # correction, 1: attenuation correction
    # only, 0: no correction (randoms only).
    histo=[],  # input histogram (from list-mode data);
    # if not given, it will be performed.
    trim=False,
    trim_scale=2,
    trim_interp=1,  # interpolation for upsampling used in PVC
    trim_memlim=True,  # reduced use of memory for machines
    # with limited memory (slow though)
    pvcroi=[],  # ROI used for PVC.  If undefined no PVC
    # is performed.
    pvcreg_tool='niftyreg',  # the registration tool used in PVC
    store_rois=False,  # stores the image of PVC ROIs
    # as defined in pvcroi.
    psfkernel=[],
    pvcitr=5,
    fcomment='',  # text comment used in the file name of
    # generated image files
    ret_sinos=False,  # return prompt, scatter and randoms
    # sinograms for each reconstruction
    store_img=True,
    store_img_intrmd=False,
    store_itr=[],  # store any reconstruction iteration in
    # the list.  ignored if the list is empty.
    del_img_intrmd=False):
    log = logging.getLogger(__name__)

    # decompose all the scanner parameters and constants
    Cnt = scanner_params['Cnt']
    txLUT = scanner_params['txLUT']
    axLUT = scanner_params['axLUT']

    # -------------------------------------------------------------------------
    # FRAMES
    # check for the provided dynamic frames
    if isinstance(frames, list):
        # Can be given in three ways:
        # * a 1D list (duration of each frame is listed)
        # * a more concise 2D list--repetition and duration lists in
        #   each entry.  Must start with the 'def' entry.
        # * a 2D list with fluid timings: must start with the string
        #   'fluid' or 'timings'.  a 2D list with consecutive lists
        #   describing start and end of the time frame, [t0, t1];
        #   The number of time frames for this option is unlimited,
        #   provided the t0 and t1 are within the acquisition times.

        # 2D starting with entry 'fluid' or 'timings'
        if  isinstance(frames[0], basestring) and (frames[0]=='fluid' or frames[0]=='timings') \
            and all([isinstance(t,list) and len(t)==2 for t in frames[1:]]):
            t_frms = frames[1:]

        # if 2D definitions, starting with entry 'def':
        elif isinstance(frames[0], basestring) and frames[0]=='def' \
            and all([isinstance(t,list) and len(t)==2 for t in frames[1:]]):
            # get total time and list of all time frames
            dfrms = dynamic_timings(frames)
            t_frms = dfrms[1:]

        # if 1D:
        elif all([isinstance(t, integers) for t in frames]):
            # get total time and list of all time frames
            dfrms = dynamic_timings(frames)
            t_frms = dfrms[1:]

        else:
            log.error(
                'osemdyn: frames definitions are not given in the correct list format: 1D [15,15,30,30,...] or 2D list [[2,15], [2,30], ...]'
            )
    else:
        log.error(
            'osemdyn: provided dynamic frames definitions are not in either Python list or nympy array.'
        )
        raise TypeError('Wrong data type for dynamic frames')
    # number of dynamic time frames
    nfrm = len(t_frms)
    # -------------------------------------------------------------------------

    # -------------------------------------------------------------------------
    # create folders for results
    if outpath == '':
        petdir = os.path.join(datain['corepath'], 'reconstructed')
        fmudir = os.path.join(datain['corepath'], 'mumap-obj')
        pvcdir = os.path.join(datain['corepath'], 'PRCL')
    else:
        petdir = os.path.join(outpath, 'PET')
        fmudir = os.path.join(outpath, 'mumap-obj')
        pvcdir = os.path.join(outpath, 'PRCL')

    # folder for co-registered mu-maps (for motion compensation)
    fmureg = os.path.join(fmudir, 'registered')
    # folder for affine transformation MR/CT->PET
    petaff = os.path.join(petdir, 'faffine')

    # folder for reconstructed images (dynamic or static depending on number of frames).
    if nfrm > 1:
        petimg = os.path.join(petdir, 'multiple-frames')
        pvcdir = os.path.join(pvcdir, 'multiple-frames')
    elif nfrm == 1:
        petimg = os.path.join(petdir, 'single-frame')
        pvcdir = os.path.join(pvcdir, 'single-frame')
    else:
        log.error('confused!')
        raise TypeError('Unrecognised time frames!')
    # create now the folder
    nimpa.create_dir(petimg)
    # create folder
    nimpa.create_dir(petdir)
    # -------------------------------------------------------------------------

    # -------------------------------------------------------------------------
    # MU-MAPS
    # get the mu-maps, if given;  otherwise will use blank mu-maps.
    if tAffine:
        muod = obtain_image(mu_o, imtype='object mu-map')
    else:
        muod = obtain_image(mu_o, Cnt=Cnt, imtype='object mu-map')

    # hardware mu-map
    muhd = obtain_image(mu_h, Cnt, imtype='hardware mu-map')

    # choose the mode of reconstruction based on the provided (or not) mu-maps
    if recmod == -1:
        if muod['exists'] and muhd['exists']:
            recmod = 3
        elif muod['exists'] or muhd['exists']:
            recmod = 1
            log.warning('partial mu-map:  scatter correction is switched off.')
        else:
            recmod = 0
            log.warning(
                'no mu-map provided: scatter and attenuation corrections are switched off.'
            )
    # -------------------------------------------------------------------------

    #import pdb; pdb.set_trace()

    # output dictionary
    output = {}
    output['recmod'] = recmod
    output['frames'] = t_frms
    output['#frames'] = nfrm

    # if affine transformation is given the baseline mu-map in NIfTI file or dictionary has to be given
    if not tAffine:
        log.debug('using the provided mu-map the same way for all frames.')
    else:
        if len(tAffine) != nfrm:
            log.error(
                'the number of affine transformations in the list has to be the same as the number of dynamic frames!'
            )
            raise IndexError('Inconsistent number of frames.')
        elif not isinstance(tAffine, list):
            log.error(
                'tAffine has to be a list of either 4x4 numpy arrays of affine transformations or a list of file path strings!'
            )
            raise IndexError('Expecting a list.')
        elif not 'fim' in muod:
            log.error(
                'when tAffine is given, the object mu-map has to be provided either as a dictionary or NIfTI file!'
            )
            raise NameError('No path to object mu-map.')

        # check if all are file path strings to the existing files
        if all([isinstance(t, basestring) for t in tAffine]):
            if all([os.path.isfile(t) for t in tAffine]):
                # the internal list of affine transformations
                faff_frms = tAffine
                log.debug(
                    'using provided paths to affine transformations for each dynamic frame.'
                )
            else:
                log.error('not all provided paths are valid!')
                raise IOError('Wrong paths.')
        # check if all are numpy arrays
        elif all([isinstance(t, (np.ndarray, np.generic)) for t in tAffine]):
            # create the folder for dynamic affine transformations
            nimpa.create_dir(petaff)
            faff_frms = []
            for i in range(nfrm):
                fout = os.path.join(petaff, 'affine_frame(' + str(i) + ').txt')
                np.savetxt(fout, tAffine[i], fmt='%3.9f')
                faff_frms.append(fout)
            log.debug(
                'using provided numpy arrays affine transformations for each dynamic frame.'
            )
        else:
            raise StandardError(
                'Affine transformations for each dynamic frame could not be established.'
            )

        # -------------------------------------------------------------------------------------
        # get ref image for mu-map resampling
        # -------------------------------------------------------------------------------------
        if 'fmuref' in muod:
            fmuref = muod['fmuref']
            log.debug(
                'reusing the reference mu-map from the object mu-map dictionary.'
            )
        else:
            # create folder if doesn't exists
            nimpa.create_dir(fmudir)
            # ref file name
            fmuref = os.path.join(fmudir, 'muref.nii.gz')
            # ref affine
            B = image_affine(datain, Cnt, gantry_offset=False)
            # ref image (blank)
            im = np.zeros((Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMX']),
                          dtype=np.float32)
            # store ref image
            nimpa.array2nii(im, B, fmuref)
            log.debug('generated a reference mu-map in' + fmuref)
        # -------------------------------------------------------------------------------------

        output['fmuref'] = fmuref
        output['faffine'] = faff_frms

    # output list of intermidiate file names for mu-maps and PET images (useful for dynamic imaging)
    if tAffine: output['fmureg'] = []
    if store_img_intrmd: output['fpeti'] = []

    # dynamic images in one numpy array
    dynim = np.zeros((nfrm, Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMY']),
                     dtype=np.float32)
    #if asked, output only scatter+randoms sinogram for each frame
    if ret_sinos and itr > 1 and recmod > 2:
        dynmsk = np.zeros((nfrm, Cnt['NSN11'], Cnt['NSANGLES'], Cnt['NSBINS']),
                          dtype=np.float32)
        dynrsn = np.zeros((nfrm, Cnt['NSN11'], Cnt['NSANGLES'], Cnt['NSBINS']),
                          dtype=np.float32)
        dynssn = np.zeros((nfrm, Cnt['NSN11'], Cnt['NSANGLES'], Cnt['NSBINS']),
                          dtype=np.float32)
        dynpsn = np.zeros((nfrm, Cnt['NSN11'], Cnt['NSANGLES'], Cnt['NSBINS']),
                          dtype=np.float32)

    # import pdb; pdb.set_trace()

    # starting frame index with reasonable prompt data
    ifrmP = 0
    # iterate over frame index
    for ifrm in range(nfrm):
        # start time of a current (ifrm-th) dynamic frame
        t0 = int(t_frms[ifrm][0])
        # end time of a current (ifrm-th) dynamic frame
        t1 = int(t_frms[ifrm][1])
        # --------------
        # check if there is enough prompt data to do a reconstruction
        # --------------
        log.info('dynamic frame times t0, t1:%r, %r' % (t0, t1))
        if not histo:
            hst = mmrhist(datain, scanner_params, t0=t0, t1=t1)
        else:
            hst = histo
            log.info('using provided histogram')
        if np.sum(hst['dhc']) > 0.99 * np.sum(hst['phc']):
            log.warning(
                'the amount of random events is the greatest part of prompt events => omitting reconstruction'
            )
            ifrmP = ifrm + 1
            continue
        # --------------------
        # transform the mu-map if given the affine transformation for each frame
        if tAffine:
            # create the folder for aligned (registered for motion compensation) mu-maps
            nimpa.create_dir(fmureg)
            # the converted nii image resample to the reference size
            fmu = os.path.join(
                fmureg, 'mumap_dyn_frm' + str(ifrm) + fcomment + '.nii.gz')
            # command for resampling
            if os.path.isfile(Cnt['RESPATH']):
                cmd = [
                    Cnt['RESPATH'], '-ref', fmuref, '-flo', muod['fim'],
                    '-trans', faff_frms[ifrm], '-res', fmu, '-pad', '0'
                ]
                if log.getEffectiveLevel() > log.DEBUG:
                    cmd.append('-voff')
                call(cmd)
            else:
                log.error(
                    'path to the executable for resampling is incorrect!')
                raise IOError('Incorrect NiftyReg (resampling) executable.')
            # get the new mu-map from the just resampled file
            muodct = nimpa.getnii(fmu, output='all')
            muo = muodct['im']
            A = muodct['affine']
            muo[muo < 0] = 0
            output['fmureg'].append(fmu)
        else:
            muo = muod['im']
        #---------------------

        # output image file name
        if nfrm > 1:
            frmno = '_frm' + str(ifrm)
        else:
            frmno = ''

        # run OSEM reconstruction of a single time frame
        recimg = mmrrec.osemone(datain, [muhd['im'], muo],
                                hst,
                                scanner_params,
                                recmod=recmod,
                                itr=itr,
                                fwhm=fwhm,
                                outpath=petimg,
                                frmno=frmno,
                                fcomment=fcomment + '_i',
                                store_img=store_img_intrmd,
                                store_itr=store_itr,
                                ret_sinos=ret_sinos)
        # form dynamic numpy array
        dynim[ifrm, :, :, :] = recimg.im
        if ret_sinos and itr > 1 and recmod > 2:
            dynpsn[ifrm, :, :, :] = hst['psino']
            dynssn[ifrm, :, :, :] = recimg.ssn
            dynrsn[ifrm, :, :, :] = recimg.rsn
            dynmsk[ifrm, :, :, :] = recimg.amsk

        if store_img_intrmd: output['fpeti'].append(recimg.fpet)
        if nfrm == 1: output['tuple'] = recimg

    output['im'] = np.squeeze(dynim)
    if ret_sinos and itr > 1 and recmod > 2:
        output['sinos'] = {
            'psino': dynpsn,
            'ssino': dynssn,
            'rsino': dynrsn,
            'amask': dynmsk
        }

    # ----------------------------------------------------------------------
    # trim the PET image
    # images have to be stored for PVC
    if pvcroi: store_img_intrmd = True
    if trim:
        # create file name
        if 'lm_dcm' in datain:
            fnm = os.path.basename(datain['lm_dcm'])[:20]
        elif 'lm_ima' in datain:
            fnm = os.path.basename(datain['lm_ima'])[:20]
        # trim PET and upsample
        petu = nimpa.trimim(dynim,
                            affine=image_affine(datain, Cnt),
                            scale=trim_scale,
                            int_order=trim_interp,
                            outpath=petimg,
                            fname=fnm,
                            fcomment=fcomment,
                            store_img_intrmd=store_img_intrmd,
                            memlim=trim_memlim,
                            verbose=log.getEffectiveLevel() < logging.INFO)

        output.update({
            'trimmed': {
                'im': petu['im'],
                'fpet': petu['fimi'],
                'affine': petu['affine']
            }
        })
    # ----------------------------------------------------------------------

    # ----------------------------------------------------------------------
    #run PVC if requested and required input given
    if pvcroi:
        if not os.path.isfile(datain['T1lbl']):
            log.error(
                'no label image from T1 parcellations and/or ROI definitions!')
            raise StandardError('No ROIs')
        else:
            # get the PSF kernel for PVC
            if not psfkernel:
                psfkernel = nimpa.psf_measured(scanner='mmr', scale=trim_scale)
            else:
                if isinstance(
                        psfkernel,
                    (np.ndarray, np.generic)) and psfkernel.shape != (3, 17):
                    log.error(
                        'the PSF kernel has to be an numpy array with the shape of (3, 17)!'
                    )
                    raise IndexError('PSF: wrong shape or not a matrix')

        #> file names for NIfTI images of PVC ROIs and PVC corrected PET
        froi = []
        fpvc = []

        #> perform PVC for each time frame
        dynpvc = np.zeros(petu['im'].shape, dtype=np.float32)
        for i in range(ifrmP, nfrm):
            # transform the parcellations (ROIs) if given the affine transformation for each frame
            if not tAffine:
                log.warning(
                    'affine transformation are not provided: will generate for the time frame.'
                )
                faffpvc = ''
                #raise StandardError('No affine transformation')
            else:
                faffpvc = faff_frms[i]
            # chose file name of individual PVC images
            if nfrm > 1:
                fcomment_pvc = '_frm' + str(i) + fcomment
            else:
                fcomment_pvc = fcomment
            #============================
            # perform PVC
            petpvc_dic = nimpa.pvc_iyang(petu['fimi'][i],
                                         datain,
                                         Cnt,
                                         pvcroi,
                                         psfkernel,
                                         tool=pvcreg_tool,
                                         itr=pvcitr,
                                         faff=faffpvc,
                                         fcomment=fcomment_pvc,
                                         outpath=pvcdir,
                                         store_rois=store_rois,
                                         store_img=store_img_intrmd)
            #============================
            if nfrm > 1:
                dynpvc[i, :, :, :] = petpvc_dic['im']
            else:
                dynpvc = petpvc_dic['im']

            fpvc.append(petpvc_dic['fpet'])

            if store_rois: froi.append(petpvc_dic['froi'])

        #> update output dictionary
        output.update({'impvc': dynpvc})
        if store_img_intrmd: output.update({'fpvc': fpvc})
        if store_rois: output.update({'froi': froi})
    # ----------------------------------------------------------------------

    if store_img:
        # description for saving NIFTI image
        # attenuation number: if only bed present then it is 0.5
        attnum = (1 * muhd['exists'] + 1 * muod['exists']) / 2.
        descrip =    'alg=osem'                     \
                    +';att='+str(attnum*(recmod>0)) \
                    +';sct='+str(1*(recmod>1))      \
                    +';spn='+str(Cnt['SPN'])        \
                    +';sub=14'                      \
                    +';itr='+str(itr)               \
                    +';fwhm='+str(fwhm)             \
                    +';nfrm='+str(nfrm)

        # squeeze the not needed dimensions
        dynim = np.squeeze(dynim)

        # NIfTI file name for the full PET image (single or multiple frame)

        # save the image to NIfTI file
        if nfrm == 1:
            t0 = hst['t0']
            t1 = hst['t1']
            if t1 == t0:
                t0 = 0
                t1 = hst['dur']
            fpet = os.path.join(
                    petimg,
                    os.path.basename(recimg.fpet)[:8] \
                    +'_t-'+str(t0)+'-'+str(t1)+'sec' \
                    +'_itr-'+str(itr) )
            fpeto = fpet + fcomment + '.nii.gz'
            nimpa.prc.array2nii(dynim[::-1, ::-1, :],
                                recimg.affine,
                                fpeto,
                                descrip=descrip)
        else:
            fpet = os.path.join(
                    petimg,
                    os.path.basename(recimg.fpet)[:8]\
                    +'_nfrm-'+str(nfrm)+'_itr-'+str(itr)
                )
            fpeto = fpet + fcomment + '.nii.gz'
            nimpa.prc.array2nii(dynim[:, ::-1, ::-1, :],
                                recimg.affine,
                                fpeto,
                                descrip=descrip)

        # get output file names for trimmed/PVC images
        if trim:
            # folder for trimmed and dynamic
            pettrim = os.path.join(petimg, 'trimmed')
            # make folder
            nimpa.create_dir(pettrim)
            # trimming scale added to NIfTI descritoption
            descrip_trim = descrip + ';trim_scale=' + str(trim_scale)
            # file name for saving the trimmed image
            fpetu = os.path.join(
                pettrim,
                os.path.basename(fpet) + '_trimmed-upsampled-scale-' +
                str(trim_scale))
            # in case of PVC
            if pvcroi:
                # itertive Yang (iY) added to NIfTI descritoption
                descrip_pvc = descrip_trim + ';pvc=iY'
                # file name for saving the PVC NIfTI image
                fpvc = fpetu + '_PVC' + fcomment + '.nii.gz'
                output['trimmed']['fpvc'] = fpvc

            # update the trimmed image file name
            fpetu += fcomment + '.nii.gz'
            # store the file name in the output dictionary
            output['trimmed']['fpet'] = fpetu

        output['fpet'] = fpeto

        # save images
        if nfrm == 1:
            if trim:
                nimpa.prc.array2nii(petu['im'][::-1, ::-1, :],
                                    petu['affine'],
                                    fpetu,
                                    descrip=descrip_trim)
            if pvcroi:
                nimpa.prc.array2nii(dynpvc[::-1, ::-1, :],
                                    petu['affine'],
                                    fpvc,
                                    descrip=descrip_pvc)
        elif nfrm > 1:
            if trim:
                nimpa.prc.array2nii(petu['im'][:, ::-1, ::-1, :],
                                    petu['affine'],
                                    fpetu,
                                    descrip=descrip_trim)
            if pvcroi:
                nimpa.prc.array2nii(dynpvc[:, ::-1, ::-1, :],
                                    petu['affine'],
                                    fpvc,
                                    descrip=descrip_pvc)

    if del_img_intrmd:
        if pvcroi:
            for fi in fpvc:
                os.remove(fi)
        if trim:
            for fi in petu['fimi']:
                os.remove(fi)

    return output
Пример #3
0
def mmrchain(
    datain,                 # all input data in a dictionary
    scanner_params,         # all scanner parameters in one dictionary
                            # containing constants, transaxial and axial
                            # LUTs.
    outpath=None,           # output path for results
    fout=None,              # full file name (any folders and extensions are disregarded)
    frames=None,            # definition of time frames, default: ['fluid', [0, 0]]
    mu_h=None,              # hardware mu-map.
    mu_o=None,              # object mu-map.
    tAffine=None,           # affine transformations for the mu-map for
                            # each time frame separately.
    itr=4,                  # number of OSEM iterations
    fwhm=0.,                # Gaussian Post-Smoothing FWHM
    psf=None,               # Resolution Modelling
    recmod=-1,              # reconstruction mode: -1: undefined, chosen
                            # automatically. 3: attenuation and scatter
                            # correction, 1: attenuation correction
                            # only, 0: no correction (randoms only).
    histo=None,             # input histogram (from list-mode data);
                            # if not given, it will be performed.
    decay_ref_time=None,    # decay corrects relative to the reference
                            # time provided; otherwise corrects to the scan
                            # start time.
    trim=False,
    trim_scale=2,
    trim_interp=0,          # interpolation for upsampling used in PVC
    trim_memlim=True,       # reduced use of memory for machines
                            # with limited memory (slow though)
    pvcroi=None,            # ROI used for PVC.  If undefined no PVC
                            # is performed.
    pvcreg_tool='niftyreg', # the registration tool used in PVC
    store_rois=False,       # stores the image of PVC ROIs
                            # as defined in pvcroi.
    pvcpsf=None,
    pvcitr=5,
    fcomment='',            # text comment used in the file name of
                            # generated image files
    ret_sinos=False,        # return prompt, scatter and randoms
                            # sinograms for each reconstruction
    ret_histo=False,        # return histogram (LM processing output) for
                            # each image frame
    store_img=True,
    store_img_intrmd=False,
    store_itr=None,         # store any reconstruction iteration in
                            # the list.  ignored if the list is empty.
    del_img_intrmd=False,
):
    if frames is None:
        frames = ['fluid', [0, 0]]
    if mu_h is None:
        mu_h = []
    if mu_o is None:
        mu_o = []
    if pvcroi is None:
        pvcroi = []
    if pvcpsf is None:
        pvcpsf = []
    if store_itr is None:
        store_itr = []

    # decompose all the scanner parameters and constants
    Cnt = scanner_params['Cnt']

    # -------------------------------------------------------------------------
    # HISOTGRAM PRECEEDS FRAMES
    if histo is not None and 'psino' in histo:
        frames = ['fluid', [histo['t0'], histo['t1']]]
    else:
        histo = None
        log.warning(
            'the given histogram does not contain a prompt sinogram--will generate a histogram.')

    # FRAMES
    # check for the provided dynamic frames
    if isinstance(frames, list):
        # Can be given in three ways:
        # * a 1D list (duration of each frame is listed)
        # * a more concise 2D list--repetition and duration lists in
        #   each entry.  Must start with the 'def' entry.
        # * a 2D list with fluid timings: must start with the string
        #   'fluid' or 'timings'.  a 2D list with consecutive lists
        #   describing start and end of the time frame, [t0, t1];
        #   The number of time frames for this option is unlimited,
        #   provided the t0 and t1 are within the acquisition times.

        # 2D starting with entry 'fluid' or 'timings'
        if (isinstance(frames[0], str) and frames[0] in ('fluid', 'timings')
                and all(isinstance(t, list) and len(t) == 2 for t in frames[1:])):
            t_frms = frames[1:]
        # if 2D definitions, starting with entry 'def':
        elif (isinstance(frames[0], str) and frames[0] == 'def'
              and all(isinstance(t, list) and len(t) == 2 for t in frames[1:])):
            # get total time and list of all time frames
            dfrms = dynamic_timings(frames)
            t_frms = dfrms[1:]

        # if 1D:
        elif all(isinstance(t, Integral) for t in frames):
            # get total time and list of all time frames
            dfrms = dynamic_timings(frames)
            t_frms = dfrms[1:]

        else:
            log.error('osemdyn: frames definitions are not given\
                in the correct list format: 1D [15,15,30,30,...]\
                or 2D list [[2,15], [2,30], ...]')
    else:
        log.error(
            'provided dynamic frames definitions are incorrect (should be a list of definitions).')
        raise TypeError('Wrong data type for dynamic frames')
    # number of dynamic time frames
    nfrm = len(t_frms)
    # -------------------------------------------------------------------------

    # -------------------------------------------------------------------------
    # create folders for results
    if outpath is None:
        petdir = os.path.join(datain['corepath'], 'reconstructed')
        fmudir = os.path.join(datain['corepath'], 'mumap-obj')
        pvcdir = os.path.join(datain['corepath'], 'PRCL')
    else:
        petdir = os.path.join(outpath, 'PET')
        fmudir = os.path.join(outpath, 'mumap-obj')
        pvcdir = os.path.join(outpath, 'PRCL')

    if fout is not None:
        # > get rid of folders
        fout = os.path.basename(fout)
        # > get rid of extension
        fout = fout.split('.')[0]

    # folder for co-registered mu-maps (for motion compensation)
    fmureg = os.path.join(fmudir, 'registered')
    # folder for affine transformation MR/CT->PET
    petaff = os.path.join(petdir, 'faffine')

    # folder for reconstructed images (dynamic or static depending on number of frames).
    if nfrm > 1:
        petimg = os.path.join(petdir, 'multiple-frames')
        pvcdir = os.path.join(pvcdir, 'multiple-frames')
    elif nfrm == 1:
        petimg = os.path.join(petdir, 'single-frame')
        pvcdir = os.path.join(pvcdir, 'single-frame')
    else:
        raise TypeError('Unrecognised/confusing time frames!')
    # create now the folder
    nimpa.create_dir(petimg)
    # create folder
    nimpa.create_dir(petdir)
    # -------------------------------------------------------------------------

    # -------------------------------------------------------------------------
    # MU-MAPS
    # get the mu-maps, if given;  otherwise will use blank mu-maps.
    if tAffine is not None:
        muod = obtain_image(mu_o, imtype='object mu-map')
    else:
        muod = obtain_image(mu_o, Cnt=Cnt, imtype='object mu-map')

    # hardware mu-map
    muhd = obtain_image(mu_h, Cnt, imtype='hardware mu-map')

    # choose the mode of reconstruction based on the provided (or not) mu-maps
    if muod['exists'] and muhd['exists'] and recmod == -1:
        recmod = 3
    elif (muod['exists'] or muhd['exists']) and recmod == -1:
        recmod = 1
        log.warning('partial mu-map:  scatter correction is switched off.')
    else:
        if recmod == -1:
            recmod = 0
            log.warning(
                'no mu-map provided: scatter and attenuation corrections are switched off.')
    # -------------------------------------------------------------------------

    # import pdb; pdb.set_trace()

    # output dictionary
    output = {}
    output['recmod'] = recmod
    output['frames'] = t_frms
    output['#frames'] = nfrm

    # if affine transformation is given
    # the baseline mu-map in NIfTI file or dictionary has to be given
    if tAffine is None:
        log.info('using the provided mu-map the same way for all frames.')
    else:
        if len(tAffine) != nfrm:
            raise ValueError("the number of affine transformations in the list"
                             " has to be the same as the number of dynamic frames")
        elif not isinstance(tAffine, list):
            raise ValueError("tAffine has to be a list of either 4x4 numpy arrays"
                             " of affine transformations or a list of file path strings")
        elif 'fim' not in muod:
            raise NameError("when tAffine is given, the object mu-map has to be"
                            " provided either as a dictionary or NIfTI file")

        # check if all are file path strings to the existing files
        if all(isinstance(t, str) for t in tAffine):
            if all(os.path.isfile(t) for t in tAffine):
                # the internal list of affine transformations
                faff_frms = tAffine
                log.info('using provided paths to affine transformations for each dynamic frame.')
            else:
                raise IOError('not all provided paths are valid!')
        # check if all are numpy arrays
        elif all(isinstance(t, (np.ndarray, np.generic)) for t in tAffine):
            # create the folder for dynamic affine transformations
            nimpa.create_dir(petaff)
            faff_frms = []
            for i in range(nfrm):
                fout_ = os.path.join(petaff, 'affine_frame(' + str(i) + ').txt')
                np.savetxt(fout_, tAffine[i], fmt='%3.9f')
                faff_frms.append(fout_)
            log.info('using provided numpy arrays affine transformations for each dynamic frame.')
        else:
            raise ValueError(
                'Affine transformations for each dynamic frame could not be established.')

        # -------------------------------------------------------------------------------------
        # get ref image for mu-map resampling
        # -------------------------------------------------------------------------------------
        if 'fmuref' in muod:
            fmuref = muod['fmuref']
            log.info('reusing the reference mu-map from the object mu-map dictionary.')
        else:
            # create folder if doesn't exists
            nimpa.create_dir(fmudir)
            # ref file name
            fmuref = os.path.join(fmudir, 'muref.nii.gz')
            # ref affine
            B = image_affine(datain, Cnt, gantry_offset=False)
            # ref image (blank)
            im = np.zeros((Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMX']), dtype=np.float32)
            # store ref image
            nimpa.array2nii(im, B, fmuref)
            log.info('generated a reference mu-map in:\n{}'.format(fmuref))
        # -------------------------------------------------------------------------------------

        output['fmuref'] = fmuref
        output['faffine'] = faff_frms

    # output list of intermediate file names for mu-maps and PET images
    # (useful for dynamic imaging)
    if tAffine is not None: output['fmureg'] = []

    if store_img_intrmd:
        output['fpeti'] = []
        if fwhm > 0:
            output['fsmoi'] = []

    # > number of3D  sinograms
    if Cnt['SPN'] == 1:
        snno = Cnt['NSN1']
    elif Cnt['SPN'] == 11:
        snno = Cnt['NSN11']
    else:
        raise ValueError('unrecognised span: {}'.format(Cnt['SPN']))

    # dynamic images in one numpy array
    dynim = np.zeros((nfrm, Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMY']), dtype=np.float32)
    # if asked, output only scatter+randoms sinogram for each frame
    if ret_sinos and itr > 1 and recmod > 2:
        dynmsk = np.zeros((nfrm, Cnt['NSEG0'], Cnt['NSANGLES'], Cnt['NSBINS']), dtype=np.float32)
        dynrsn = np.zeros((nfrm, snno, Cnt['NSANGLES'], Cnt['NSBINS']), dtype=np.float32)
        dynssn = np.zeros((nfrm, snno, Cnt['NSANGLES'], Cnt['NSBINS']), dtype=np.float32)
        dynpsn = np.zeros((nfrm, snno, Cnt['NSANGLES'], Cnt['NSBINS']), dtype=np.float32)

    # > returning dictionary of histograms if requested
    if ret_histo:
        hsts = {}

    # import pdb; pdb.set_trace()

    # starting frame index with reasonable prompt data
    ifrmP = 0
    # iterate over frame index
    for ifrm in range(nfrm):
        # start time of a current (ifrm-th) dynamic frame
        t0 = int(t_frms[ifrm][0])
        # end time of a current (ifrm-th) dynamic frame
        t1 = int(t_frms[ifrm][1])
        # --------------
        # check if there is enough prompt data to do a reconstruction
        # --------------
        log.info('dynamic frame times t0={}, t1={}:'.format(t0, t1))
        if histo is None:
            hst = mmrhist(datain, scanner_params, t0=t0, t1=t1)
        else:
            hst = histo
            log.info(
                dedent('''\
                ------------------------------------------------------
                using provided histogram
                ------------------------------------------------------'''))

        if ret_histo:
            hsts[str(t0) + '-' + str(t1)] = hst

        if np.sum(hst['dhc']) > 0.99 * np.sum(hst['phc']):
            log.warning(
                dedent('''\
                ===========================================================================
                amount of randoms is the greater part of prompts => omitting reconstruction
                ==========================================================================='''))
            ifrmP = ifrm + 1
            continue
        # --------------------
        # transform the mu-map if given the affine transformation for each frame
        if tAffine is not None:
            # create the folder for aligned (registered for motion compensation) mu-maps
            nimpa.create_dir(fmureg)
            # the converted nii image resample to the reference size
            fmu = os.path.join(fmureg, 'mumap_dyn_frm' + str(ifrm) + fcomment + '.nii.gz')
            # command for resampling
            if os.path.isfile(Cnt['RESPATH']):
                cmd = [
                    Cnt['RESPATH'], '-ref', fmuref, '-flo', muod['fim'], '-trans', faff_frms[ifrm],
                    '-res', fmu, '-pad', '0']
                if log.getEffectiveLevel() > log.INFO:
                    cmd.append('-voff')
                call(cmd)
            else:
                raise IOError('Incorrect path to NiftyReg (resampling) executable.')
            # get the new mu-map from the just resampled file
            muodct = nimpa.getnii(fmu, output='all')
            muo = muodct['im']
            muo[muo < 0] = 0
            output['fmureg'].append(fmu)
        else:
            muo = muod['im']
        # ---------------------

        # output image file name
        if nfrm > 1:
            frmno = '_frm' + str(ifrm)
        else:
            frmno = ''

        # run OSEM reconstruction of a single time frame
        recimg = mmrrec.osemone(datain, [muhd['im'], muo], hst, scanner_params,
                                decay_ref_time=decay_ref_time, recmod=recmod, itr=itr, fwhm=fwhm,
                                psf=psf, outpath=petimg, frmno=frmno, fcomment=fcomment + '_i',
                                store_img=store_img_intrmd, store_itr=store_itr, fout=fout,
                                ret_sinos=ret_sinos)

        # form dynamic Numpy array
        if fwhm > 0:
            dynim[ifrm, :, :, :] = recimg.imsmo
        else:
            dynim[ifrm, :, :, :] = recimg.im

        if ret_sinos and itr > 1 and recmod > 2:
            dynpsn[ifrm, :, :, :] = np.squeeze(hst['psino'])
            dynssn[ifrm, :, :, :] = np.squeeze(recimg.ssn)
            dynrsn[ifrm, :, :, :] = np.squeeze(recimg.rsn)
            dynmsk[ifrm, :, :, :] = np.squeeze(recimg.amsk)

        if store_img_intrmd:
            output['fpeti'].append(recimg.fpet)
            if fwhm > 0:
                output['fsmoi'].append(recimg.fsmo)

        if nfrm == 1: output['tuple'] = recimg

    output['im'] = np.squeeze(dynim)

    if ret_sinos and itr > 1 and recmod > 2:
        output['sinos'] = {
            'psino': np.squeeze(dynpsn), 'ssino': np.squeeze(dynssn), 'rsino': np.squeeze(dynrsn),
            'amask': np.squeeze(dynmsk)}

    if ret_histo:
        output['hst'] = hsts

    # ----------------------------------------------------------------------
    # trim the PET image
    # images have to be stored for PVC
    if pvcroi: store_img_intrmd = True
    if trim:
        # create file name
        if 'lm_dcm' in datain:
            fnm = os.path.basename(datain['lm_dcm'])[:20]
        elif 'lm_ima' in datain:
            fnm = os.path.basename(datain['lm_ima'])[:20]
        # trim PET and upsample
        petu = nimpa.imtrimup(dynim, affine=image_affine(datain, Cnt), scale=trim_scale,
                              int_order=trim_interp, outpath=petimg, fname=fnm, fcomment=fcomment,
                              store_img_intrmd=store_img_intrmd, memlim=trim_memlim,
                              verbose=log.getEffectiveLevel())

        output.update({
            'trimmed': {'im': petu['im'], 'fpet': petu['fimi'], 'affine': petu['affine']}})
    # ----------------------------------------------------------------------

    # ----------------------------------------------------------------------
    # run PVC if requested and required input given
    if pvcroi:
        if not os.path.isfile(datain['T1lbl']):
            raise Exception('No labels and/or ROIs image definitions found!')
        else:
            # get the PSF kernel for PVC
            if not pvcpsf:
                pvcpsf = nimpa.psf_measured(scanner='mmr', scale=trim_scale)
            else:
                if (
                    isinstance(pvcpsf, (np.ndarray, np.generic)) and
                    pvcpsf.shape != (3, 2 * Cnt['RSZ_PSF_KRNL'] + 1)
                ):  # yapf: disable
                    raise ValueError(
                        'the PSF kernel has to be an numpy array with the shape of ({},{})'.format(
                            3, 2 * Cnt['RSZ_PSF_KRNL'] + 1))

        # > file names for NIfTI images of PVC ROIs and PVC corrected PET
        froi = []
        fpvc = []

        # > perform PVC for each time frame
        dynpvc = np.zeros(petu['im'].shape, dtype=np.float32)
        for i in range(ifrmP, nfrm):
            # transform the parcellations (ROIs) if given the affine transformation for each frame
            if tAffine is None:
                log.warning(
                    'affine transformation are not provided: will generate for the time frame.')
                faffpvc = None
                # raise StandardError('No affine transformation')
            else:
                faffpvc = faff_frms[i]

            # chose file name of individual PVC images
            if nfrm > 1:
                fcomment_pvc = '_frm' + str(i) + fcomment
            else:
                fcomment_pvc = fcomment
            # ===========================
            # perform PVC
            petpvc_dic = nimpa.pvc_iyang(petu['fimi'][i], datain, Cnt, pvcroi, pvcpsf,
                                         tool=pvcreg_tool, itr=pvcitr, faff=faffpvc,
                                         fcomment=fcomment_pvc, outpath=pvcdir,
                                         store_rois=store_rois, store_img=store_img_intrmd)
            # ===========================
            if nfrm > 1:
                dynpvc[i, :, :, :] = petpvc_dic['im']
            else:
                dynpvc = petpvc_dic['im']
            fpvc.append(petpvc_dic['fpet'])

            if store_rois: froi.append(petpvc_dic['froi'])

        # > update output dictionary
        output.update({'impvc': dynpvc})
        output['fprc'] = petpvc_dic['fprc']
        output['imprc'] = petpvc_dic['imprc']

        if store_img_intrmd: output.update({'fpvc': fpvc})
        if store_rois: output.update({'froi': froi})
    # ----------------------------------------------------------------------

    if store_img:
        # description for saving NIFTI image
        # attenuation number: if only bed present then it is 0.5
        attnum = (1 * muhd['exists'] + 1 * muod['exists']) / 2.
        descrip = (f"alg=osem"
                   f";att={attnum*(recmod>0)}"
                   f";sct={1*(recmod>1)}"
                   f";spn={Cnt['SPN']}"
                   f";sub=14"
                   f";itr={itr}"
                   f";fwhm={fwhm}"
                   f";psf={psf}"
                   f";nfrm={nfrm}")

        # squeeze the not needed dimensions
        dynim = np.squeeze(dynim)

        # NIfTI file name for the full PET image (single or multiple frame)

        # save the image to NIfTI file
        if nfrm == 1:
            t0 = hst['t0']
            t1 = hst['t1']
            if t1 == t0:
                t0 = 0
                t1 = hst['dur']
            # > --- file naming and saving ---
            if fout is None:
                fpet = os.path.join(
                    petimg,
                    os.path.basename(recimg.fpet)[:8] + f'_t-{t0}-{t1}sec_itr-{itr}')
                fpeto = f"{fpet}{fcomment}.nii.gz"
            else:
                fpeto = os.path.join(petimg, os.path.basename(fout) + '.nii.gz')

            nimpa.prc.array2nii(dynim[::-1, ::-1, :], recimg.affine, fpeto, descrip=descrip)
            # > --- ---
        else:
            if fout is None:
                fpet = os.path.join(petimg,
                                    os.path.basename(recimg.fpet)[:8] + f'_nfrm-{nfrm}_itr-{itr}')
                fpeto = f"{fpet}{fcomment}.nii.gz"
            else:
                fpeto = os.path.join(petimg, os.path.basename(fout) + f'_nfrm-{nfrm}.nii.gz')

            nimpa.prc.array2nii(dynim[:, ::-1, ::-1, :], recimg.affine, fpeto, descrip=descrip)

        output['fpet'] = fpeto

        # get output file names for trimmed/PVC images
        if trim:
            # folder for trimmed and dynamic
            pettrim = os.path.join(petimg, 'trimmed')
            # make folder
            nimpa.create_dir(pettrim)
            # trimming scale added to NIfTI descritoption
            descrip_trim = f'{descrip};trim_scale={trim_scale}'
            # file name for saving the trimmed image
            if fout is None:
                fpetu = os.path.join(
                    pettrim,
                    os.path.basename(fpet) + f'_trimmed-upsampled-scale-{trim_scale}')
            else:
                fpetu = os.path.join(
                    pettrim,
                    os.path.basename(fout) + f'_trimmed-upsampled-scale-{trim_scale}')
            # in case of PVC
            if pvcroi:
                # itertive Yang (iY) added to NIfTI descritoption
                descrip_pvc = f'{descrip_trim};pvc=iY'
                # file name for saving the PVC NIfTI image
                fpvc = f"{fpetu}_PVC{fcomment}.nii.gz"
                output['trimmed']['fpvc'] = fpvc

            # update the trimmed image file name
            fpetu += f'{fcomment}.nii.gz'
            # store the file name in the output dictionary
            output['trimmed']['fpet'] = fpetu

        # save images
        if nfrm == 1:
            if trim:
                nimpa.prc.array2nii(petu['im'][::-1, ::-1, :], petu['affine'], fpetu,
                                    descrip=descrip_trim)
            if pvcroi:
                nimpa.prc.array2nii(dynpvc[::-1, ::-1, :], petu['affine'], fpvc,
                                    descrip=descrip_pvc)
        elif nfrm > 1:
            if trim:
                nimpa.prc.array2nii(petu['im'][:, ::-1, ::-1, :], petu['affine'], fpetu,
                                    descrip=descrip_trim)
            if pvcroi:
                nimpa.prc.array2nii(dynpvc[:, ::-1, ::-1, :], petu['affine'], fpvc,
                                    descrip=descrip_pvc)

    if del_img_intrmd:
        if pvcroi:
            for fi in fpvc:
                os.remove(fi)
        if trim:
            for fi in petu['fimi']:
                os.remove(fi)

    return output
Пример #4
0
def osemone(datain,
            mumaps,
            hst,
            scanner_params,
            recmod=3,
            itr=4,
            fwhm=0.,
            psf=None,
            mask_radius=29.,
            decay_ref_time=None,
            attnsino=None,
            sctsino=None,
            randsino=None,
            normcomp=None,
            emmskS=False,
            frmno='',
            fcomment='',
            outpath=None,
            fout=None,
            store_img=False,
            store_itr=None,
            ret_sinos=False):
    '''
    OSEM image reconstruction with several modes
    (with/without scatter and/or attenuation correction)

    Args:
      psf: Reconstruction with PSF, passed to `psf_config`
    '''

    # > Get particular scanner parameters: Constants, transaxial and axial LUTs
    Cnt = scanner_params['Cnt']
    txLUT = scanner_params['txLUT']
    axLUT = scanner_params['axLUT']

    # ---------- sort out OUTPUT ------------
    # -output file name for the reconstructed image
    if outpath is None:
        opth = os.path.join(datain['corepath'], 'reconstructed')
    else:
        opth = outpath

    # > file output name (the path is ignored if given)
    if fout is not None:
        # > get rid of folders
        fout = os.path.basename(fout)
        # > get rid of extension
        fout = fout.split('.')[0]

    if store_img is True or store_itr is not None:
        mmraux.create_dir(opth)

    return_ssrb, return_mask = ret_sinos, ret_sinos

    # ----------

    log.info('reconstruction in mode: %d', recmod)

    # get object and hardware mu-maps
    muh, muo = mumaps

    # get the GPU version of the image dims
    mus = mmrimg.convert2dev(muo + muh, Cnt)

    # remove gaps from the prompt sino
    psng = mmraux.remgaps(hst['psino'], txLUT, Cnt)

    # ========================================================================
    # GET NORM
    # -------------------------------------------------------------------------
    if normcomp is None:
        ncmp, _ = mmrnorm.get_components(datain, Cnt)
    else:
        ncmp = normcomp
        log.warning('using user-defined normalisation components')
    nsng = mmrnorm.get_norm_sino(datain,
                                 scanner_params,
                                 hst,
                                 normcomp=ncmp,
                                 gpu_dim=True)
    # ========================================================================

    # ========================================================================
    # ATTENUATION FACTORS FOR COMBINED OBJECT AND BED MU-MAP
    # -------------------------------------------------------------------------
    # > combine attenuation and norm together depending on reconstruction mode
    if recmod == 0:
        asng = np.ones(psng.shape, dtype=np.float32)
    else:
        # > check if the attenuation sino is given as an array
        if isinstance(attnsino, np.ndarray) \
                and attnsino.shape==(Cnt['NSN11'], Cnt['NSANGLES'], Cnt['NSBINS']):
            asng = mmraux.remgaps(attnsino, txLUT, Cnt)
            log.info('using provided attenuation factor sinogram')
        elif isinstance(attnsino, np.ndarray) \
                and attnsino.shape==(Cnt['Naw'], Cnt['NSN11']):
            asng = attnsino
            log.info('using provided attenuation factor sinogram')
        else:
            asng = cu.zeros(psng.shape, dtype=np.float32)
            petprj.fprj(asng.cuvec,
                        cu.asarray(mus).cuvec, txLUT, axLUT,
                        np.array([-1], dtype=np.int32), Cnt, 1)
    # > combine attenuation and normalisation
    ansng = asng * nsng
    # ========================================================================

    # ========================================================================
    # Randoms
    # -------------------------------------------------------------------------
    if isinstance(randsino, np.ndarray) \
            and randsino.shape==(Cnt['NSN11'], Cnt['NSANGLES'], Cnt['NSBINS']):
        rsino = randsino
        rsng = mmraux.remgaps(randsino, txLUT, Cnt)
    else:
        rsino, snglmap = randoms(hst, scanner_params)
        rsng = mmraux.remgaps(rsino, txLUT, Cnt)
    # ========================================================================

    # ========================================================================
    # SCAT
    # -------------------------------------------------------------------------
    if recmod == 2:
        if sctsino is not None:
            ssng = mmraux.remgaps(sctsino, txLUT, Cnt)
        elif sctsino is None and os.path.isfile(datain['em_crr']):
            emd = nimpa.getnii(datain['em_crr'])
            ssn = vsm(
                datain,
                mumaps,
                emd['im'],
                scanner_params,
                histo=hst,
                rsino=rsino,
                prcnt_scl=0.1,
                emmsk=False,
            )
            ssng = mmraux.remgaps(ssn, txLUT, Cnt)
        else:
            raise ValueError(
                "No emission image available for scatter estimation! " +
                " Check if it's present or the path is correct.")
    else:
        ssng = np.zeros(rsng.shape, dtype=rsng.dtype)
    # ========================================================================

    log.info('------ OSEM (%d) -------', itr)
    # ------------------------------------
    Sn = 14  # number of subsets

    # -get one subset to get number of projection bins in a subset
    Sprj, s = get_subsets14(0, scanner_params)
    Nprj = len(Sprj)
    # -init subset array and sensitivity image for a given subset
    sinoTIdx = np.zeros((Sn, Nprj + 1), dtype=np.int32)
    # -init sensitivity images for each subset
    imgsens = np.zeros((Sn, Cnt['SZ_IMY'], Cnt['SZ_IMX'], Cnt['SZ_IMZ']),
                       dtype=np.float32)
    tmpsens = cu.zeros((Cnt['SZ_IMY'], Cnt['SZ_IMX'], Cnt['SZ_IMZ']),
                       dtype=np.float32)
    for n in range(Sn):
        # first number of projection for the given subset
        sinoTIdx[n, 0] = Nprj
        sinoTIdx[n, 1:], s = get_subsets14(n, scanner_params)
        # sensitivity image
        petprj.bprj(tmpsens.cuvec,
                    cu.asarray(ansng[sinoTIdx[n, 1:], :]).cuvec, txLUT, axLUT,
                    sinoTIdx[n, 1:], Cnt)
        imgsens[n] = tmpsens
    del tmpsens
    # -------------------------------------

    # -mask for reconstructed image.  anything outside it is set to zero
    msk = mmrimg.get_cylinder(
        Cnt, rad=mask_radius, xo=0, yo=0, unival=1, gpu_dim=True) > 0.9

    # -init image
    img = np.ones((Cnt['SZ_IMY'], Cnt['SZ_IMX'], Cnt['SZ_IMZ']),
                  dtype=np.float32)

    # -decay correction
    lmbd = np.log(2) / resources.riLUT[Cnt['ISOTOPE']]['thalf']
    if Cnt['DCYCRR'] and 't0' in hst and 'dur' in hst:
        # > decay correct to the reference time (e.g., injection time) if provided
        # > otherwise correct in reference to the scan start time (using the time
        # > past from the start to the start time frame)
        if decay_ref_time is not None:
            tref = decay_ref_time
        else:
            tref = hst['t0']

        dcycrr = np.exp(
            lmbd * tref) * lmbd * hst['dur'] / (1 - np.exp(-lmbd * hst['dur']))
        # apply quantitative correction to the image
        qf = ncmp['qf'] / resources.riLUT[Cnt['ISOTOPE']]['BF'] / float(
            hst['dur'])
        qf_loc = ncmp['qf_loc']

    elif not Cnt['DCYCRR'] and 't0' in hst and 'dur' in hst:
        dcycrr = 1.
        # apply quantitative correction to the image
        qf = ncmp['qf'] / resources.riLUT[Cnt['ISOTOPE']]['BF'] / float(
            hst['dur'])
        qf_loc = ncmp['qf_loc']

    else:
        dcycrr = 1.
        qf = 1.
        qf_loc = 1.

    # -affine matrix for the reconstructed images
    B = mmrimg.image_affine(datain, Cnt)

    # resolution modelling
    psfkernel = psf_config(psf, Cnt)

    # -time it
    stime = time.time()

    # import pdb; pdb.set_trace()

    # ========================================================================
    # OSEM RECONSTRUCTION
    # -------------------------------------------------------------------------
    with trange(itr,
                desc="OSEM",
                disable=log.getEffectiveLevel() > logging.INFO,
                leave=log.getEffectiveLevel() <= logging.INFO) as pbar:

        for k in pbar:

            petprj.osem(img, psng, rsng, ssng, nsng, asng, sinoTIdx, imgsens,
                        msk, psfkernel, txLUT, axLUT, Cnt)

            if np.nansum(img) < 0.1:
                log.warning(
                    'it seems there is not enough true data to render reasonable image'
                )
                # img[:]=0
                itr = k
                break
            if recmod >= 3 and k < itr - 1 and itr > 1:
                sct_time = time.time()
                sct = vsm(datain,
                          mumaps,
                          mmrimg.convert2e7(img, Cnt),
                          scanner_params,
                          histo=hst,
                          rsino=rsino,
                          emmsk=emmskS,
                          return_ssrb=return_ssrb,
                          return_mask=return_mask)

                if isinstance(sct, dict):
                    ssn = sct['sino']
                else:
                    ssn = sct

                ssng = mmraux.remgaps(ssn, txLUT, Cnt)
                pbar.set_postfix(scatter="%.3gs" % (time.time() - sct_time))
            # save images during reconstruction if requested
            if store_itr and (k + 1) in store_itr:
                im = mmrimg.convert2e7(img * (dcycrr * qf * qf_loc), Cnt)

                if fout is None:
                    fpet = os.path.join(opth, (
                        os.path.basename(datain['lm_bf'])[:16].replace(
                            '.', '-') +
                        f"{frmno}_t{hst['t0']}-{hst['t1']}sec_itr{k+1}{fcomment}_inrecon.nii.gz"
                    ))
                else:
                    fpet = os.path.join(
                        opth, fout + f'_itr{k+1}{fcomment}_inrecon.nii.gz')

                nimpa.array2nii(im[::-1, ::-1, :], B, fpet)

    log.info('recon time: %.3g', time.time() - stime)
    # ========================================================================

    log.info('applying decay correction of: %r', dcycrr)
    log.info('applying quantification factor: %r to the whole image', qf)
    log.info('for the frame duration of: %r', hst['dur'])

    # additional factor for making it quantitative in absolute terms (derived from measurements)
    img *= dcycrr * qf * qf_loc

    # ---- save images -----
    # -first convert to standard mMR image size
    im = mmrimg.convert2e7(img, Cnt)

    # -description text to NIfTI
    # -attenuation number: if only bed present then it is 0.5
    attnum = (1 * (np.sum(muh) > 0.5) + 1 * (np.sum(muo) > 0.5)) / 2.
    descrip = (f"alg=osem"
               f";sub=14"
               f";att={attnum*(recmod>0)}"
               f";sct={1*(recmod>1)}"
               f";spn={Cnt['SPN']}"
               f";itr={itr}"
               f";fwhm=0"
               f";t0={hst['t0']}"
               f";t1={hst['t1']}"
               f";dur={hst['dur']}"
               f";qf={qf}")

    # > file name of the output reconstructed image
    # > (maybe used later even if not stored now)
    if fout is None:
        fpet = os.path.join(
            opth,
            (os.path.basename(datain['lm_bf']).split('.')[0] +
             f"{frmno}_t{hst['t0']}-{hst['t1']}sec_itr{itr}{fcomment}.nii.gz"))
    else:
        fpet = os.path.join(opth, fout + f'_itr{itr}{fcomment}.nii.gz')

    if store_img:
        log.info('saving image to: %s', fpet)
        nimpa.array2nii(im[::-1, ::-1, :], B, fpet, descrip=descrip)

    im_smo = None
    fsmo = None
    if fwhm > 0:
        im_smo = ndi.filters.gaussian_filter(im,
                                             fwhm2sig(fwhm,
                                                      voxsize=Cnt['SZ_VOXY'] *
                                                      10),
                                             mode='mirror')

        if store_img:
            fsmo = fpet.split('.nii.gz')[0] + '_smo-' + str(fwhm).replace(
                '.', '-') + 'mm.nii.gz'
            log.info('saving smoothed image to: ' + fsmo)
            descrip.replace(';fwhm=0', ';fwhm=str(fwhm)')
            nimpa.array2nii(im_smo[::-1, ::-1, :], B, fsmo, descrip=descrip)

    # returning:
    # (0) E7 image [can be smoothed];
    # (1) file name of saved E7 image
    # (2) [optional] scatter sino
    # (3) [optional] single slice rebinned scatter
    # (4) [optional] mask for scatter scaling based on attenuation data
    # (5) [optional] random sino
    # if ret_sinos and recmod>=3:
    #     recout = namedtuple('recout', 'im, fpet, ssn, sssr, amsk, rsn')
    #     recout.im   = im
    #     recout.fpet = fout
    #     recout.ssn  = ssn
    #     recout.sssr = sssr
    #     recout.amsk = amsk
    #     recout.rsn  = rsino
    # else:
    #     recout = namedtuple('recout', 'im, fpet')
    #     recout.im   = im
    #     recout.fpet = fout

    if ret_sinos and recmod >= 3 and itr > 1:
        RecOut = namedtuple(
            'RecOut', 'im, fpet, imsmo, fsmo, affine, ssn, sssr, amsk, rsn')
        recout = RecOut(im, fpet, im_smo, fsmo, B, ssn, sct['ssrb'],
                        sct['mask'], rsino)
    else:
        RecOut = namedtuple('RecOut', 'im, fpet, imsmo, fsmo, affine')
        recout = RecOut(im, fpet, im_smo, fsmo, B)

    return recout
Пример #5
0
def osemone(datain,
            mumaps,
            hst,
            scanner_params,
            recmod=3,
            itr=4,
            fwhm=0.,
            mask_radius=29.,
            sctsino=np.array([]),
            outpath='',
            store_img=False,
            frmno='',
            fcomment='',
            store_itr=[],
            emmskS=False,
            ret_sinos=False,
            attnsino=None,
            randsino=None,
            normcomp=None):

    #---------- sort out OUTPUT ------------
    #-output file name for the reconstructed image, initially assume n/a
    fout = 'n/a'
    if store_img or store_itr:
        if outpath == '':
            opth = os.path.join(datain['corepath'], 'reconstructed')
        else:
            opth = outpath
        mmraux.create_dir(opth)

    if ret_sinos:
        return_ssrb = True
        return_mask = True
    else:
        return_ssrb = False
        return_mask = False

    #----------

    # Get particular scanner parameters: Constants, transaxial and axial LUTs
    Cnt = scanner_params['Cnt']
    txLUT = scanner_params['txLUT']
    axLUT = scanner_params['axLUT']

    import time
    from niftypet import nipet
    # from niftypet.nipet.sct import mmrsct
    # from niftypet.nipet.prj import mmrhist

    if Cnt['VERBOSE']: print 'i> reconstruction in mode', recmod

    # get object and hardware mu-maps
    muh, muo = mumaps

    # get the GPU version of the image dims
    mus = mmrimg.convert2dev(muo + muh, Cnt)

    if Cnt['SPN'] == 1:
        snno = Cnt['NSN1']
    elif Cnt['SPN'] == 11:
        snno = Cnt['NSN11']

    # remove gaps from the prompt sino
    psng = mmraux.remgaps(hst['psino'], txLUT, Cnt)

    #=========================================================================
    # GET NORM
    #-------------------------------------------------------------------------
    if normcomp == None:
        ncmp, _ = mmrnorm.get_components(datain, Cnt)
    else:
        ncmp = normcomp
        print 'w> using user-defined normalisation components'
    nsng = mmrnorm.get_sinog(datain, hst, axLUT, txLUT, Cnt, normcomp=ncmp)
    #=========================================================================

    #=========================================================================
    # ATTENUATION FACTORS FOR COMBINED OBJECT AND BED MU-MAP
    #-------------------------------------------------------------------------
    #> combine attenuation and norm together depending on reconstruction mode
    if recmod == 0:
        asng = np.ones(psng.shape, dtype=np.float32)
    else:
        #> check if the attenuation sino is given as an array
        if isinstance(attnsino, np.ndarray) \
                and attnsino.shape==(Cnt['NSN11'], Cnt['NSANGLES'], Cnt['NSBINS']):
            asng = mmraux.remgaps(attnsino, txLUT, Cnt)
            print 'i> using provided attenuation factor sinogram'
        elif isinstance(attnsino, np.ndarray) \
                and attnsino.shape==(Cnt['Naw'], Cnt['NSN11']):
            asng = attnsino
            print 'i> using provided attenuation factor sinogram'
        else:
            asng = np.zeros(psng.shape, dtype=np.float32)
            petprj.fprj(asng, mus, txLUT, axLUT,
                        np.array([-1], dtype=np.int32), Cnt, 1)
    #> combine attenuation and normalisation
    ansng = asng * nsng
    #=========================================================================

    #=========================================================================
    # Randoms
    #-------------------------------------------------------------------------
    if isinstance(randsino, np.ndarray):
        rsino = randsino
        rsng = mmraux.remgaps(randsino, txLUT, Cnt)
    else:
        rsino, snglmap = nipet.randoms(hst, scanner_params)
        rsng = mmraux.remgaps(rsino, txLUT, Cnt)
    #=========================================================================

    #=========================================================================
    # SCAT
    #-------------------------------------------------------------------------
    if recmod == 2:
        if sctsino.size > 0:
            ssng = mmraux.remgaps(sctsino, txLUT, Cnt)
        elif sctsino.size == 0 and os.path.isfile(datain['em_crr']):
            emd = nimpa.getnii(datain['em_crr'])
            ssn = nipet.vsm(datain,
                            mumaps,
                            emd['im'],
                            hst,
                            rsino,
                            scanner_params,
                            prcnt_scl=0.1,
                            emmsk=False)
            ssng = mmraux.remgaps(ssn, txLUT, Cnt)
        else:
            print 'e> no emission image available for scatter estimation!  check if it' 's present or the path is correct.'
            sys.exit()
    else:
        ssng = np.zeros(rsng.shape, dtype=rsng.dtype)
    #=========================================================================

    if Cnt['VERBOSE']:
        print '\n>------ OSEM (', itr, ') -------\n'
    #------------------------------------
    Sn = 14  # number of subsets
    #-get one subset to get number of projection bins in a subset
    Sprj, s = get_subsets14(0, scanner_params)
    Nprj = len(Sprj)
    #-init subset array and sensitivity image for a given subset
    sinoTIdx = np.zeros((Sn, Nprj + 1), dtype=np.int32)
    #-init sensitivity images for each subset
    imgsens = np.zeros((Sn, Cnt['SZ_IMY'], Cnt['SZ_IMX'], Cnt['SZ_IMZ']),
                       dtype=np.float32)
    for n in range(Sn):
        sinoTIdx[n, 0] = Nprj  #first number of projection for the given subset
        sinoTIdx[n, 1:], s = get_subsets14(n, scanner_params)
        # sensitivity image
        petprj.bprj(imgsens[n, :, :, :], ansng[sinoTIdx[n, 1:], :], txLUT,
                    axLUT, sinoTIdx[n, 1:], Cnt)
    #-------------------------------------

    #-mask for reconstructed image.  anything outside it is set to zero
    msk = mmrimg.get_cylinder(
        Cnt, rad=mask_radius, xo=0, yo=0, unival=1, gpu_dim=True) > 0.9

    #-init image
    img = np.ones((Cnt['SZ_IMY'], Cnt['SZ_IMX'], Cnt['SZ_IMZ']),
                  dtype=np.float32)

    #-decay correction
    lmbd = np.log(2) / resources.riLUT[Cnt['ISOTOPE']]['thalf']
    if Cnt['DCYCRR'] and 't0' in hst and 'dur' in hst:
        dcycrr = np.exp(lmbd * hst['t0']) * lmbd * hst['dur'] / (
            1 - np.exp(-lmbd * hst['dur']))
        # apply quantitative correction to the image
        qf = ncmp['qf'] / resources.riLUT[Cnt['ISOTOPE']]['BF'] / float(
            hst['dur'])
        qf_loc = ncmp['qf_loc']
    elif not Cnt['DCYCRR'] and 't0' in hst and 'dur' in hst:
        dcycrr = 1.
        # apply quantitative correction to the image
        qf = ncmp['qf'] / resources.riLUT[Cnt['ISOTOPE']]['BF'] / float(
            hst['dur'])
        qf_loc = ncmp['qf_loc']
    else:
        dcycrr = 1.
        qf = 1.
        qf_loc = 1.

    #-affine matrix for the reconstructed images
    B = mmrimg.image_affine(datain, Cnt)

    #-time it
    stime = time.time()

    # import pdb; pdb.set_trace()

    #=========================================================================
    # OSEM RECONSTRUCTION
    #-------------------------------------------------------------------------
    for k in trange(itr, disable=not Cnt['VERBOSE'], desc="OSEM"):
        petprj.osem(img, msk, psng, rsng, ssng, nsng, asng, imgsens, txLUT,
                    axLUT, sinoTIdx, Cnt)
        if np.nansum(img) < 0.1:
            print '---------------------------------------------------------------------'
            print 'w> it seems there is not enough true data to render reasonable image.'
            print '---------------------------------------------------------------------'
            #img[:]=0
            itr = k
            break
        if recmod >= 3 and (((k < itr - 1) and (itr > 1))):  # or (itr==1)
            sct_time = time.time()

            sct = nipet.vsm(datain,
                            mumaps,
                            mmrimg.convert2e7(img, Cnt),
                            hst,
                            rsino,
                            scanner_params,
                            emmsk=emmskS,
                            return_ssrb=return_ssrb,
                            return_mask=return_mask)

            if isinstance(sct, dict):
                ssn = sct['sino']
            else:
                ssn = sct

            ssng = mmraux.remgaps(ssn, txLUT, Cnt)

            if Cnt['VERBOSE']:
                print 'i> scatter time:', (time.time() - sct_time)

        # save images during reconstruction if requested
        if store_itr and k in store_itr:
            im = mmrimg.convert2e7(img * (dcycrr * qf * qf_loc), Cnt)
            fout =  os.path.join(opth, os.path.basename(datain['lm_bf'])[:8] \
                + frmno +'_t'+str(hst['t0'])+'-'+str(hst['t1'])+'sec' \
                +'_itr'+str(k)+fcomment+'_inrecon.nii.gz')
            nimpa.array2nii(im[::-1, ::-1, :], B, fout)

    if Cnt['VERBOSE']: print 'i> recon time:', (time.time() - stime)
    #=========================================================================

    if Cnt['VERBOSE']:
        print 'i> applying decay correction of', dcycrr
        print 'i> applying quantification factor', qf, 'to the whole image for the frame duration of :', hst[
            'dur']

    img *= dcycrr * qf * qf_loc  #additional factor for making it quantitative in absolute terms (derived from measurements)

    #---- save images -----
    #-first convert to standard mMR image size
    im = mmrimg.convert2e7(img, Cnt)

    #-description text to NIfTI
    #-attenuation number: if only bed present then it is 0.5
    attnum = (1 * (np.sum(muh) > 0.5) + 1 * (np.sum(muo) > 0.5)) / 2.
    descrip =   'alg=osem'+ \
                ';sub=14'+ \
                ';att='+str(attnum*(recmod>0))+ \
                ';sct='+str(1*(recmod>1))+ \
                ';spn='+str(Cnt['SPN'])+ \
                ';itr='+str(itr) +\
                ';fwhm='+str(fwhm) +\
                ';t0='+str(hst['t0']) +\
                ';t1='+str(hst['t1']) +\
                ';dur='+str(hst['dur']) +\
                ';qf='+str(qf)

    if fwhm > 0:
        im = ndi.filters.gaussian_filter(im,
                                         fwhm2sig(fwhm, Cnt),
                                         mode='mirror')
    if store_img:
        fout =  os.path.join(opth, os.path.basename(datain['lm_bf'])[:8] \
                + frmno +'_t'+str(hst['t0'])+'-'+str(hst['t1'])+'sec' \
                +'_itr'+str(itr)+fcomment+'.nii.gz')
        if Cnt['VERBOSE']: print 'i> saving image to: ', fout
        nimpa.array2nii(im[::-1, ::-1, :], B, fout, descrip=descrip)

    # returning:
    # (0) E7 image [can be smoothed];
    # (1) file name of saved E7 image
    # (2) [optional] scatter sino
    # (3) [optional] single slice rebinned scatter
    # (4) [optional] mask for scatter scaling based on attenuation data
    # (5) [optional] random sino
    # if ret_sinos and recmod>=3:
    #     recout = namedtuple('recout', 'im, fpet, ssn, sssr, amsk, rsn')
    #     recout.im   = im
    #     recout.fpet = fout
    #     recout.ssn  = ssn
    #     recout.sssr = sssr
    #     recout.amsk = amsk
    #     recout.rsn  = rsino
    # else:
    #     recout = namedtuple('recout', 'im, fpet')
    #     recout.im   = im
    #     recout.fpet = fout
    if ret_sinos and recmod >= 3 and itr > 1:
        RecOut = namedtuple('RecOut', 'im, fpet, affine, ssn, sssr, amsk, rsn')
        recout = RecOut(im, fout, B, ssn, sct['ssrb'], sct['mask'], rsino)
    else:
        RecOut = namedtuple('RecOut', 'im, fpet, affine')
        recout = RecOut(im, fout, B)

    return recout
Пример #6
0
def align_mumap(
    datain,
    scanner_params=None,
    outpath='',
    reg_tool='niftyreg',
    use_stored=False,
    hst=None,
    t0=0,
    t1=0,
    itr=2,
    faff='',
    fpet='',
    fcomment='',
    store=False,
    store_npy=False,
    petopt='ac',
    musrc='ute',         # another option is pct for mu-map source
    ute_name='UTE2',
    del_auxilary=True,
    verbose=False,
):
    '''
    Align the a pCT or MR-derived mu-map to a PET image reconstructed to chosen
    specifications (e.g., with/without attenuation and scatter corrections)

    use_sotred only works if hst or t0/t1 given but not when faff.
    '''
    if scanner_params is None:
        scanner_params = {}

    # > output folder
    if outpath == '':
        opth = os.path.join(datain['corepath'], 'mumap-obj')
    else:
        opth = os.path.join(outpath, 'mumap-obj')

    # > create the folder, if not existent
    nimpa.create_dir(opth)

    # > get the timing of PET if affine not given
    if faff == '' and hst is not None and isinstance(hst, dict) and 't0' in hst:
        t0 = hst['t0']
        t1 = hst['t1']

    # > file name for the output mu-map
    fnm = 'mumap-' + musrc.upper()

    # > output dictionary
    mu_dct = {}

    # ---------------------------------------------------------------------------
    # > used stored if requested
    if use_stored:
        fmu_stored = fnm + '-aligned-to_t'\
                     + str(t0)+'-'+str(t1)+'_'+petopt.upper()\
                     + fcomment
        fmupath = os.path.join(opth, fmu_stored + '.nii.gz')

        if os.path.isfile(fmupath):
            mudct_stored = nimpa.getnii(fmupath, output='all')
            # > create output dictionary
            mu_dct['im'] = mudct_stored['im']
            mu_dct['affine'] = mudct_stored['affine']
            # pu_dct['faff'] = faff
            return mu_dct
    # ---------------------------------------------------------------------------

    # > tmp folder for not aligned mu-maps
    tmpdir = os.path.join(opth, 'tmp')
    nimpa.create_dir(tmpdir)

    # > three ways of passing scanner constants <Cnt> are here decoded
    if 'Cnt' in scanner_params:
        Cnt = scanner_params['Cnt']
    elif 'SO_IMZ' in scanner_params:
        Cnt = scanner_params
    else:
        Cnt = rs.get_mmr_constants()

    # > if affine not provided histogram the LM data for recon and registration
    if not os.path.isfile(faff):
        from niftypet.nipet.prj import mmrrec

        # -histogram the list data if needed
        if hst is None:
            from niftypet.nipet import mmrhist
            if 'txLUT' in scanner_params:
                hst = mmrhist(datain, scanner_params, t0=t0, t1=t1)
            else:
                raise ValueError('Full scanner are parameters not provided\
                     but are required for histogramming.')

    # ========================================================
    # -get hardware mu-map
    if 'hmumap' in datain and os.path.isfile(datain['hmumap']):
        muh = np.load(datain['hmumap'], allow_pickle=True)["hmu"]
        (log.info if verbose else log.debug)('loaded hardware mu-map from file:\n{}'.format(
            datain['hmumap']))
    elif outpath != '':
        hmupath = os.path.join(outpath, "mumap-hdw", "hmumap.npz")
        if os.path.isfile(hmupath):
            muh = np.load(hmupath, allow_pickle=True)["hmu"]
            datain["hmumap"] = hmupath
        else:
            raise IOError('Invalid path to the hardware mu-map')
    else:
        log.error('the hardware mu-map is required first.')
        raise IOError('Could not find the hardware mu-map!')
    # ========================================================
    # -check if T1w image is available
    if not {'MRT1W#', 'T1nii', 'T1bc', 'T1N4'}.intersection(datain):
        log.error('no MR T1w images required for co-registration!')
        raise IOError('T1w image could not be obtained!')
    # ========================================================

    # -if the affine is not given,
    # -it will be generated by reconstructing PET image, with some or no corrections
    if not os.path.isfile(faff):
        # first recon pet to get the T1 aligned to it
        if petopt == 'qnt':
            # ---------------------------------------------
            # OPTION 1 (quantitative recon with all corrections using MR-based mu-map)
            # get UTE object mu-map (may not be in register with the PET data)
            mudic = obj_mumap(datain, Cnt, outpath=tmpdir, del_auxilary=del_auxilary)
            muo = mudic['im']
            # reconstruct PET image with UTE mu-map to which co-register T1w
            recout = mmrrec.osemone(datain, [muh, muo], hst, scanner_params, recmod=3, itr=itr,
                                    fwhm=0., fcomment=fcomment + '_QNT-UTE',
                                    outpath=os.path.join(outpath, 'PET',
                                                         'positioning'), store_img=True)
        elif petopt == 'nac':
            # ---------------------------------------------
            # OPTION 2 (recon without any corrections for scatter and attenuation)
            # reconstruct PET image with UTE mu-map to which co-register T1w
            muo = np.zeros(muh.shape, dtype=muh.dtype)
            recout = mmrrec.osemone(datain, [muh, muo], hst, scanner_params, recmod=1, itr=itr,
                                    fwhm=0., fcomment=fcomment + '_NAC',
                                    outpath=os.path.join(outpath, 'PET',
                                                         'positioning'), store_img=True)
        elif petopt == 'ac':
            # ---------------------------------------------
            # OPTION 3 (recon with attenuation correction only but no scatter)
            # reconstruct PET image with UTE mu-map to which co-register T1w
            mudic = obj_mumap(datain, Cnt, outpath=tmpdir, del_auxilary=del_auxilary)
            muo = mudic['im']

            recout = mmrrec.osemone(datain, [muh, muo], hst, scanner_params, recmod=1, itr=itr,
                                    fwhm=0., fcomment=fcomment + '_AC-UTE',
                                    outpath=os.path.join(outpath, 'PET',
                                                         'positioning'), store_img=True)

        fpet = recout.fpet
        mu_dct['fpet'] = fpet

        # ------------------------------
        if musrc == 'ute' and ute_name in datain and os.path.exists(datain[ute_name]):
            # change to NIfTI if the UTE sequence is in DICOM files (folder)
            if os.path.isdir(datain[ute_name]):
                fnew = os.path.basename(datain[ute_name])
                run([Cnt['DCM2NIIX'], '-f', fnew, datain[ute_name]])
                fute = glob.glob(os.path.join(datain[ute_name], fnew + '*nii*'))[0]
            elif os.path.isfile(datain[ute_name]):
                fute = datain[ute_name]

            # get the affine transformation
            if reg_tool == 'spm':
                regdct = nimpa.coreg_spm(fpet, fute,
                                         outpath=os.path.join(outpath, 'PET', 'positioning'))
            elif reg_tool == 'niftyreg':
                regdct = nimpa.affine_niftyreg(
                    fpet,
                    fute,
                    outpath=os.path.join(outpath, 'PET', 'positioning'),
                    executable=Cnt['REGPATH'],
                    omp=multiprocessing.cpu_count() / 2,                 # pcomment=fcomment,
                    rigOnly=True,
                    affDirect=False,
                    maxit=5,
                    speed=True,
                    pi=50,
                    pv=50,
                    smof=0,
                    smor=0,
                    rmsk=True,
                    fmsk=True,
                    rfwhm=15.,                                           # pillilitres
                    rthrsh=0.05,
                    ffwhm=15.,                                           # pillilitres
                    fthrsh=0.05,
                    verbose=verbose)
            else:
                raise ValueError('unknown registration tool requested')

            faff_mrpet = regdct['faff']

        elif musrc == 'pct':

            ft1w = nimpa.pick_t1w(datain)

            if reg_tool == 'spm':
                regdct = nimpa.coreg_spm(fpet, ft1w,
                                         outpath=os.path.join(outpath, 'PET', 'positioning'))
            elif reg_tool == 'niftyreg':
                regdct = nimpa.affine_niftyreg(
                    fpet,
                    ft1w,
                    outpath=os.path.join(outpath, 'PET', 'positioning'),
                    executable=Cnt['REGPATH'],
                    omp=multiprocessing.cpu_count() / 2,
                    rigOnly=True,
                    affDirect=False,
                    maxit=5,
                    speed=True,
                    pi=50,
                    pv=50,
                    smof=0,
                    smor=0,
                    rmsk=True,
                    fmsk=True,
                    rfwhm=15.,                                           # pillilitres
                    rthrsh=0.05,
                    ffwhm=15.,                                           # pillilitres
                    fthrsh=0.05,
                    verbose=verbose)
            else:
                raise ValueError('unknown registration tool requested')

            faff_mrpet = regdct['faff']

        else:
            raise IOError('Floating MR image not provided or is invalid.')

    else:
        faff_mrpet = faff
        regdct = {}
        if not os.path.isfile(fpet):
            raise IOError('e> the reference PET should be supplied with the affine.')

    # > output file name for the aligned mu-maps
    if musrc == 'pct':

        # > convert to mu-values before resampling to avoid artefacts with negative values
        nii = nib.load(datain['pCT'])
        img = nii.get_fdata(dtype=np.float32)
        img_mu = hu2mu(img)
        nii_mu = nib.Nifti1Image(img_mu, nii.affine)
        fflo = os.path.join(tmpdir, 'pct2mu-not-aligned.nii.gz')
        nib.save(nii_mu, fflo)

        freg = os.path.join(opth, 'pct2mu-aligned-' + fcomment + '.nii.gz')

    elif musrc == 'ute':
        freg = os.path.join(opth, 'UTE-res-tmp' + fcomment + '.nii.gz')
        if 'UTE' not in datain:
            fnii = 'converted-from-DICOM_'
            tstmp = nimpa.time_stamp(simple_ascii=True)
            # convert the DICOM mu-map images to nii
            if 'mumapDCM' not in datain:
                raise IOError('DICOM with the UTE mu-map are not given.')
            run([Cnt['DCM2NIIX'], '-f', fnii + tstmp, '-o', opth, datain['mumapDCM']])
            # piles for the T1w, pick one:
            fflo = glob.glob(os.path.join(opth, '*' + fnii + tstmp + '*.nii*'))[0]
        else:
            if os.path.isfile(datain['UTE']):
                fflo = datain['UTE']
            else:
                raise IOError('The provided NIfTI UTE path is not valid.')

    # > call the resampling routine to get the pCT/UTE in place
    if reg_tool == "spm":
        nimpa.resample_spm(fpet, fflo, faff_mrpet, fimout=freg, del_ref_uncmpr=True,
                           del_flo_uncmpr=True, del_out_uncmpr=True)
    else:
        nimpa.resample_niftyreg(fpet, fflo, faff_mrpet, fimout=freg, executable=Cnt['RESPATH'],
                                verbose=verbose)

    # -get the NIfTI of registered image
    nim = nib.load(freg)
    A = nim.affine
    imreg = nim.get_fdata(dtype=np.float32)
    imreg = imreg[:, ::-1, ::-1]
    imreg = np.transpose(imreg, (2, 1, 0))

    # -convert to mu-values; sort out the file name too.
    if musrc == 'pct':
        mu = imreg
    elif musrc == 'ute':
        mu = np.float32(imreg) / 1e4
        # -remove the converted file from DICOMs
        os.remove(fflo)
    else:
        raise NameError('Confused o_O')

    # > get rid of negatives and nans
    mu[mu < 0] = 0
    mu[np.isnan(mu)] = 0

    # > return image dictionary with the image itself and other parameters
    mu_dct['im'] = mu
    mu_dct['affine'] = A
    mu_dct['faff'] = faff_mrpet

    if store or store_npy:
        nimpa.create_dir(opth)
        if faff == '':
            fname = fnm + '-aligned-to_t'\
                    + str(t0)+'-'+str(t1)+'_'+petopt.upper()\
                    + fcomment
        else:
            fname = fnm + '-aligned-to-given-affine' + fcomment
    if store_npy:
        fnp = os.path.join(opth, fname + ".npz")
        np.savez(fnp, mu=mu, A=A)
    if store:
        # > NIfTI
        fmu = os.path.join(opth, fname + '.nii.gz')
        nimpa.array2nii(mu[::-1, ::-1, :], A, fmu)
        mu_dct['fim'] = fmu

    if del_auxilary:
        os.remove(freg)

        if musrc == 'ute' and not os.path.isfile(faff):
            os.remove(fute)
        shutil.rmtree(tmpdir)

    return mu_dct
Пример #7
0
def hdw_mumap(datain, hparts, params, outpath='', use_stored=False, del_interm=True):
    '''Get hardware mu-map components, including bed, coils etc.'''
    # two ways of passing Cnt are here decoded
    if 'Cnt' in params:
        Cnt = params['Cnt']
    else:
        Cnt = params

    if outpath != '':
        fmudir = os.path.join(outpath, 'mumap-hdw')
    else:
        fmudir = os.path.join(datain['corepath'], 'mumap-hdw')

    nimpa.create_dir(fmudir)

    # if requested to use the stored hardware mu_map get it from the path in datain
    if use_stored and "hmumap" in datain and os.path.isfile(datain["hmumap"]):
        if datain['hmumap'].endswith(('.nii', '.nii.gz')):
            dct = nimpa.getnii(datain['hmumap'], output='all')
            hmu = dct['im']
            A = dct['affine']
            fmu = datain['hmumap']
        elif datain["hmumap"].endswith(".npz"):
            arr = np.load(datain["hmumap"], allow_pickle=True)
            hmu, A, fmu = arr["hmu"], arr["A"], arr["fmu"]
            log.info('loaded hardware mu-map from file: {}'.format(datain['hmumap']))
            fnp = datain['hmumap']
    elif outpath and os.path.isfile(os.path.join(fmudir, "hmumap.npz")):
        fnp = os.path.join(fmudir, "hmumap.npz")
        arr = np.load(fnp, allow_pickle=True)
        hmu, A, fmu = arr["hmu"], arr["A"], arr["fmu"]
        datain['hmumap'] = fnp
    # otherwise generate it from the parts through resampling the high resolution CT images
    else:
        hmupos = get_hmupos(datain, hparts, Cnt, outpath=outpath)
        # just to get the dims, get the ref image
        nimo = nib.load(hmupos[0]['niipath'])
        A = nimo.affine
        imo = nimo.get_fdata(dtype=np.float32)
        imo[:] = 0

        for i in hparts:
            fin = os.path.join(
                os.path.dirname(hmupos[0]['niipath']),
                'r' + os.path.basename(hmupos[i]['niipath']).split('.')[0] + '.nii.gz')
            nim = nib.load(fin)
            mu = nim.get_fdata(dtype=np.float32)
            mu[mu < 0] = 0

            imo += mu

        hdr = nimo.header
        hdr['cal_max'] = np.max(imo)
        hdr['cal_min'] = np.min(imo)
        fmu = os.path.join(os.path.dirname(hmupos[0]['niipath']), 'hardware_umap.nii.gz')
        hmu_nii = nib.Nifti1Image(imo, A)
        nib.save(hmu_nii, fmu)

        hmu = np.transpose(imo[:, ::-1, ::-1], (2, 1, 0))

        # save the objects to numpy arrays
        fnp = os.path.join(fmudir, "hmumap.npz")
        np.savez(fnp, hmu=hmu, A=A, fmu=fmu)
        # ppdate the datain dictionary (assuming it is mutable)
        datain['hmumap'] = fnp

        if del_interm:
            for fname in glob.glob(os.path.join(fmudir, '_*.nii*')):
                os.remove(fname)
            for fname in glob.glob(os.path.join(fmudir, 'r_*.nii*')):
                os.remove(fname)

    # peturn image dictionary with the image itself and some other stats
    hmu_dct = {'im': hmu, 'fim': fmu, 'affine': A}
    if 'fnp' in locals():
        hmu_dct['fnp'] = fnp

    return hmu_dct
Пример #8
0
def obtain_image(img, Cnt=[], imtype=''):
    '''
    Obtain the image (hardware or object mu-map) from file,
    numpy array, dictionary or empty list (assuming blank then).
    The image has to have the dimensions of the PET image used as in Cnt['SO_IM[X-Z]'].
    '''
    from os import path
    log = logging.getLogger(__name__)

    # establishing what and if the image object has been provided
    # all findings go to the output dictionary
    output = {}
    if isinstance(img, dict):
        if Cnt and img['im'].shape != (Cnt['SO_IMZ'], Cnt['SO_IMY'],
                                       Cnt['SO_IMX']):
            log.error(
                'provided ' + imtype +
                ' via the dictionary has inconsistent dimensions compared to Cnt.'
            )
            raise ValueError('Wrong dimensions of the mu-map')
        else:
            output['im'] = img['im']
            output['exists'] = True
            output['fim'] = img['fim']
            if 'faff' in img: output['faff'] = img['faff']
            if 'fmuref' in img: output['fmuref'] = img['fmuref']
            if 'affine' in img: output['affine'] = img['affine']
            log.debug('using ' + imtype + ' from dictionary')

    elif isinstance(img, (np.ndarray, np.generic)):
        if Cnt and img.shape != (Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMX']):
            log.error(
                'provided ' + imtype +
                ' via the numpy array has inconsistent dimensions compared to Cnt.'
            )
            raise ValueError('Wrong dimensions of the mu-map')
        else:
            output['im'] = img
            output['exists'] = True
            output['fim'] = ''
            log.debug('using hardware mu-map from numpy array.')

    elif isinstance(img, basestring):
        if path.isfile(img):
            from niftypet import nimpa
            imdct = nimpa.getnii(img, output='all')
            output['im'] = imdct['im']
            output['affine'] = imdct['affine']
            if Cnt and output['im'].shape != (Cnt['SO_IMZ'], Cnt['SO_IMY'],
                                              Cnt['SO_IMX']):
                log.error(
                    'provided ' + imtype +
                    ' via file has inconsistent dimensions compared to Cnt.')
                raise ValueError('Wrong dimensions of the mu-map')
            else:
                output['exists'] = True
                output['fim'] = img
                log.debug('using ' + imtype + ' from NIfTI file.')
        else:
            log.error('provided ' + imtype + ' path is invalid.')
            return None
    elif isinstance(img, list):
        output['im'] = np.zeros((Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMX']),
                                dtype=np.float32)
        log.info(imtype + ' has not been provided -> using blank.')
        output['fim'] = ''
        output['exists'] = False
    #------------------------------------------------------------------------
    return output