Esempio n. 1
0
def getscan(
    sbjix,
    expt,
    xc,
    scan_types='',
    scan_ids='',
    cookie='',
    outpath='',
    fcomment='',
    #close_session=True,
):

    if not cookie:
        sessionID = nipet.xnat.post_xnat(xc['url'] + '/data/JSESSIONID',
                                         '',
                                         usrpwd=xc['usrpwd'])
        cookie = 'JSESSIONID=' + sessionID

    #> output dictionary
    out = {}
    out['cookie'] = cookie

    if outpath == '':
        if os.path.isdir(xc['opth']):
            opth = xc['opth']
        else:
            if platform.system() in ['Linux', 'Darwin']:
                opth = os.path.join(os.path.expanduser('~'), 'xnat_scans')
            elif platform.system() == 'Windows':
                opth = os.path.join(os.getenv('LOCALAPPDATA'), 'xnat_scans')
            else:
                raise IOError(
                    'e> unknown system and no output folder provided!')
    else:
        opth = outpath

    scans = get_xnatList(xc['sbjs'] + '/' + sbjix + '/experiments/' +
                         expt['ID'] + '/scans',
                         cookie=cookie)

    all_scan_types = [(s['type'], s['quality'], s['ID']) for s in scans]

    picked_scans = []
    if scan_types:
        for st in scan_types:
            picked_scans.extend([s for s in all_scan_types if st in s[0]])

    elif scan_ids:
        for si in scan_ids:
            picked_scans.extend([s for s in all_scan_types if si == s[2]])
    else:
        raise ValueError('e> unspecified scans to download!')

    for scn in picked_scans:

        stype = str(scn[0])
        quality = str(scn[1])
        sid = str(scn[2])

        s_type_id = sid + '_' + stype

        entries = get_xnatList(xc['sbjs'] + '/' + sbjix + '/experiments/' +
                               expt['ID'] + '/scans/' + sid + '/resources',
                               cookie=cookie)

        for e in entries:

            if e['format'] in ['DICOM', 'NIFTI']:

                out[s_type_id] = []

                files = get_xnatList(
                        xc['sbjs']+'/' +sbjix+ '/experiments/' + expt['ID'] \
                            + '/scans/'+sid+'/resources/'+ e['format']+ '/files',
                        cookie=cookie
                        )

                #> scan path
                spth = os.path.join(opth, s_type_id + '__' + quality)
                nimpa.create_dir(spth)

                #> download all files
                for i in range(len(files)):

                    fname = 'scan-'+s_type_id+'__'+quality+fcomment\
                            +'.'+files[i]['Name'].split('.',1)[-1]

                    status = get_xnatFile(xc['url'] + files[i]['URI'],
                                          os.path.join(spth, fname),
                                          cookie=cookie)

                    if status < 0:
                        print 'e> no scan data for', scntype
                    else:
                        out[s_type_id].append(os.path.join(spth, fname))

                if len(files) < 1:
                    print 'e> no scan data for', scntype
Esempio n. 2
0
def mmrchain(
    datain,  # all input data in a dictionary
    scanner_params,  # all scanner parameters in one dictionary
    # containing constants, transaxial and axial
    # LUTs.
    outpath='',  # output path for results
    frames=['fluid', [0, 0]],  # definition of time frames.
    mu_h=[],  # hardware mu-map.
    mu_o=[],  # object mu-map.
    tAffine=[],  # affine transformations for the mu-map for
    # each time frame separately.
    itr=4,  # number of OSEM iterations
    fwhm=0.,  # Gaussian Smoothing FWHM
    recmod=-1,  # reconstruction mode: -1: undefined, chosen
    # automatically. 3: attenuation and scatter
    # correction, 1: attenuation correction
    # only, 0: no correction (randoms only).
    histo=[],  # input histogram (from list-mode data);
    # if not given, it will be performed.
    trim=False,
    trim_scale=2,
    trim_interp=1,  # interpolation for upsampling used in PVC
    trim_memlim=True,  # reduced use of memory for machines
    # with limited memory (slow though)
    pvcroi=[],  # ROI used for PVC.  If undefined no PVC
    # is performed.
    pvcreg_tool='niftyreg',  # the registration tool used in PVC
    store_rois=False,  # stores the image of PVC ROIs
    # as defined in pvcroi.
    psfkernel=[],
    pvcitr=5,
    fcomment='',  # text comment used in the file name of
    # generated image files
    ret_sinos=False,  # return prompt, scatter and randoms
    # sinograms for each reconstruction
    store_img=True,
    store_img_intrmd=False,
    store_itr=[],  # store any reconstruction iteration in
    # the list.  ignored if the list is empty.
    del_img_intrmd=False):
    log = logging.getLogger(__name__)

    # decompose all the scanner parameters and constants
    Cnt = scanner_params['Cnt']
    txLUT = scanner_params['txLUT']
    axLUT = scanner_params['axLUT']

    # -------------------------------------------------------------------------
    # FRAMES
    # check for the provided dynamic frames
    if isinstance(frames, list):
        # Can be given in three ways:
        # * a 1D list (duration of each frame is listed)
        # * a more concise 2D list--repetition and duration lists in
        #   each entry.  Must start with the 'def' entry.
        # * a 2D list with fluid timings: must start with the string
        #   'fluid' or 'timings'.  a 2D list with consecutive lists
        #   describing start and end of the time frame, [t0, t1];
        #   The number of time frames for this option is unlimited,
        #   provided the t0 and t1 are within the acquisition times.

        # 2D starting with entry 'fluid' or 'timings'
        if  isinstance(frames[0], basestring) and (frames[0]=='fluid' or frames[0]=='timings') \
            and all([isinstance(t,list) and len(t)==2 for t in frames[1:]]):
            t_frms = frames[1:]

        # if 2D definitions, starting with entry 'def':
        elif isinstance(frames[0], basestring) and frames[0]=='def' \
            and all([isinstance(t,list) and len(t)==2 for t in frames[1:]]):
            # get total time and list of all time frames
            dfrms = dynamic_timings(frames)
            t_frms = dfrms[1:]

        # if 1D:
        elif all([isinstance(t, integers) for t in frames]):
            # get total time and list of all time frames
            dfrms = dynamic_timings(frames)
            t_frms = dfrms[1:]

        else:
            log.error(
                'osemdyn: frames definitions are not given in the correct list format: 1D [15,15,30,30,...] or 2D list [[2,15], [2,30], ...]'
            )
    else:
        log.error(
            'osemdyn: provided dynamic frames definitions are not in either Python list or nympy array.'
        )
        raise TypeError('Wrong data type for dynamic frames')
    # number of dynamic time frames
    nfrm = len(t_frms)
    # -------------------------------------------------------------------------

    # -------------------------------------------------------------------------
    # create folders for results
    if outpath == '':
        petdir = os.path.join(datain['corepath'], 'reconstructed')
        fmudir = os.path.join(datain['corepath'], 'mumap-obj')
        pvcdir = os.path.join(datain['corepath'], 'PRCL')
    else:
        petdir = os.path.join(outpath, 'PET')
        fmudir = os.path.join(outpath, 'mumap-obj')
        pvcdir = os.path.join(outpath, 'PRCL')

    # folder for co-registered mu-maps (for motion compensation)
    fmureg = os.path.join(fmudir, 'registered')
    # folder for affine transformation MR/CT->PET
    petaff = os.path.join(petdir, 'faffine')

    # folder for reconstructed images (dynamic or static depending on number of frames).
    if nfrm > 1:
        petimg = os.path.join(petdir, 'multiple-frames')
        pvcdir = os.path.join(pvcdir, 'multiple-frames')
    elif nfrm == 1:
        petimg = os.path.join(petdir, 'single-frame')
        pvcdir = os.path.join(pvcdir, 'single-frame')
    else:
        log.error('confused!')
        raise TypeError('Unrecognised time frames!')
    # create now the folder
    nimpa.create_dir(petimg)
    # create folder
    nimpa.create_dir(petdir)
    # -------------------------------------------------------------------------

    # -------------------------------------------------------------------------
    # MU-MAPS
    # get the mu-maps, if given;  otherwise will use blank mu-maps.
    if tAffine:
        muod = obtain_image(mu_o, imtype='object mu-map')
    else:
        muod = obtain_image(mu_o, Cnt=Cnt, imtype='object mu-map')

    # hardware mu-map
    muhd = obtain_image(mu_h, Cnt, imtype='hardware mu-map')

    # choose the mode of reconstruction based on the provided (or not) mu-maps
    if recmod == -1:
        if muod['exists'] and muhd['exists']:
            recmod = 3
        elif muod['exists'] or muhd['exists']:
            recmod = 1
            log.warning('partial mu-map:  scatter correction is switched off.')
        else:
            recmod = 0
            log.warning(
                'no mu-map provided: scatter and attenuation corrections are switched off.'
            )
    # -------------------------------------------------------------------------

    #import pdb; pdb.set_trace()

    # output dictionary
    output = {}
    output['recmod'] = recmod
    output['frames'] = t_frms
    output['#frames'] = nfrm

    # if affine transformation is given the baseline mu-map in NIfTI file or dictionary has to be given
    if not tAffine:
        log.debug('using the provided mu-map the same way for all frames.')
    else:
        if len(tAffine) != nfrm:
            log.error(
                'the number of affine transformations in the list has to be the same as the number of dynamic frames!'
            )
            raise IndexError('Inconsistent number of frames.')
        elif not isinstance(tAffine, list):
            log.error(
                'tAffine has to be a list of either 4x4 numpy arrays of affine transformations or a list of file path strings!'
            )
            raise IndexError('Expecting a list.')
        elif not 'fim' in muod:
            log.error(
                'when tAffine is given, the object mu-map has to be provided either as a dictionary or NIfTI file!'
            )
            raise NameError('No path to object mu-map.')

        # check if all are file path strings to the existing files
        if all([isinstance(t, basestring) for t in tAffine]):
            if all([os.path.isfile(t) for t in tAffine]):
                # the internal list of affine transformations
                faff_frms = tAffine
                log.debug(
                    'using provided paths to affine transformations for each dynamic frame.'
                )
            else:
                log.error('not all provided paths are valid!')
                raise IOError('Wrong paths.')
        # check if all are numpy arrays
        elif all([isinstance(t, (np.ndarray, np.generic)) for t in tAffine]):
            # create the folder for dynamic affine transformations
            nimpa.create_dir(petaff)
            faff_frms = []
            for i in range(nfrm):
                fout = os.path.join(petaff, 'affine_frame(' + str(i) + ').txt')
                np.savetxt(fout, tAffine[i], fmt='%3.9f')
                faff_frms.append(fout)
            log.debug(
                'using provided numpy arrays affine transformations for each dynamic frame.'
            )
        else:
            raise StandardError(
                'Affine transformations for each dynamic frame could not be established.'
            )

        # -------------------------------------------------------------------------------------
        # get ref image for mu-map resampling
        # -------------------------------------------------------------------------------------
        if 'fmuref' in muod:
            fmuref = muod['fmuref']
            log.debug(
                'reusing the reference mu-map from the object mu-map dictionary.'
            )
        else:
            # create folder if doesn't exists
            nimpa.create_dir(fmudir)
            # ref file name
            fmuref = os.path.join(fmudir, 'muref.nii.gz')
            # ref affine
            B = image_affine(datain, Cnt, gantry_offset=False)
            # ref image (blank)
            im = np.zeros((Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMX']),
                          dtype=np.float32)
            # store ref image
            nimpa.array2nii(im, B, fmuref)
            log.debug('generated a reference mu-map in' + fmuref)
        # -------------------------------------------------------------------------------------

        output['fmuref'] = fmuref
        output['faffine'] = faff_frms

    # output list of intermidiate file names for mu-maps and PET images (useful for dynamic imaging)
    if tAffine: output['fmureg'] = []
    if store_img_intrmd: output['fpeti'] = []

    # dynamic images in one numpy array
    dynim = np.zeros((nfrm, Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMY']),
                     dtype=np.float32)
    #if asked, output only scatter+randoms sinogram for each frame
    if ret_sinos and itr > 1 and recmod > 2:
        dynmsk = np.zeros((nfrm, Cnt['NSN11'], Cnt['NSANGLES'], Cnt['NSBINS']),
                          dtype=np.float32)
        dynrsn = np.zeros((nfrm, Cnt['NSN11'], Cnt['NSANGLES'], Cnt['NSBINS']),
                          dtype=np.float32)
        dynssn = np.zeros((nfrm, Cnt['NSN11'], Cnt['NSANGLES'], Cnt['NSBINS']),
                          dtype=np.float32)
        dynpsn = np.zeros((nfrm, Cnt['NSN11'], Cnt['NSANGLES'], Cnt['NSBINS']),
                          dtype=np.float32)

    # import pdb; pdb.set_trace()

    # starting frame index with reasonable prompt data
    ifrmP = 0
    # iterate over frame index
    for ifrm in range(nfrm):
        # start time of a current (ifrm-th) dynamic frame
        t0 = int(t_frms[ifrm][0])
        # end time of a current (ifrm-th) dynamic frame
        t1 = int(t_frms[ifrm][1])
        # --------------
        # check if there is enough prompt data to do a reconstruction
        # --------------
        log.info('dynamic frame times t0, t1:%r, %r' % (t0, t1))
        if not histo:
            hst = mmrhist(datain, scanner_params, t0=t0, t1=t1)
        else:
            hst = histo
            log.info('using provided histogram')
        if np.sum(hst['dhc']) > 0.99 * np.sum(hst['phc']):
            log.warning(
                'the amount of random events is the greatest part of prompt events => omitting reconstruction'
            )
            ifrmP = ifrm + 1
            continue
        # --------------------
        # transform the mu-map if given the affine transformation for each frame
        if tAffine:
            # create the folder for aligned (registered for motion compensation) mu-maps
            nimpa.create_dir(fmureg)
            # the converted nii image resample to the reference size
            fmu = os.path.join(
                fmureg, 'mumap_dyn_frm' + str(ifrm) + fcomment + '.nii.gz')
            # command for resampling
            if os.path.isfile(Cnt['RESPATH']):
                cmd = [
                    Cnt['RESPATH'], '-ref', fmuref, '-flo', muod['fim'],
                    '-trans', faff_frms[ifrm], '-res', fmu, '-pad', '0'
                ]
                if log.getEffectiveLevel() > log.DEBUG:
                    cmd.append('-voff')
                call(cmd)
            else:
                log.error(
                    'path to the executable for resampling is incorrect!')
                raise IOError('Incorrect NiftyReg (resampling) executable.')
            # get the new mu-map from the just resampled file
            muodct = nimpa.getnii(fmu, output='all')
            muo = muodct['im']
            A = muodct['affine']
            muo[muo < 0] = 0
            output['fmureg'].append(fmu)
        else:
            muo = muod['im']
        #---------------------

        # output image file name
        if nfrm > 1:
            frmno = '_frm' + str(ifrm)
        else:
            frmno = ''

        # run OSEM reconstruction of a single time frame
        recimg = mmrrec.osemone(datain, [muhd['im'], muo],
                                hst,
                                scanner_params,
                                recmod=recmod,
                                itr=itr,
                                fwhm=fwhm,
                                outpath=petimg,
                                frmno=frmno,
                                fcomment=fcomment + '_i',
                                store_img=store_img_intrmd,
                                store_itr=store_itr,
                                ret_sinos=ret_sinos)
        # form dynamic numpy array
        dynim[ifrm, :, :, :] = recimg.im
        if ret_sinos and itr > 1 and recmod > 2:
            dynpsn[ifrm, :, :, :] = hst['psino']
            dynssn[ifrm, :, :, :] = recimg.ssn
            dynrsn[ifrm, :, :, :] = recimg.rsn
            dynmsk[ifrm, :, :, :] = recimg.amsk

        if store_img_intrmd: output['fpeti'].append(recimg.fpet)
        if nfrm == 1: output['tuple'] = recimg

    output['im'] = np.squeeze(dynim)
    if ret_sinos and itr > 1 and recmod > 2:
        output['sinos'] = {
            'psino': dynpsn,
            'ssino': dynssn,
            'rsino': dynrsn,
            'amask': dynmsk
        }

    # ----------------------------------------------------------------------
    # trim the PET image
    # images have to be stored for PVC
    if pvcroi: store_img_intrmd = True
    if trim:
        # create file name
        if 'lm_dcm' in datain:
            fnm = os.path.basename(datain['lm_dcm'])[:20]
        elif 'lm_ima' in datain:
            fnm = os.path.basename(datain['lm_ima'])[:20]
        # trim PET and upsample
        petu = nimpa.trimim(dynim,
                            affine=image_affine(datain, Cnt),
                            scale=trim_scale,
                            int_order=trim_interp,
                            outpath=petimg,
                            fname=fnm,
                            fcomment=fcomment,
                            store_img_intrmd=store_img_intrmd,
                            memlim=trim_memlim,
                            verbose=log.getEffectiveLevel() < logging.INFO)

        output.update({
            'trimmed': {
                'im': petu['im'],
                'fpet': petu['fimi'],
                'affine': petu['affine']
            }
        })
    # ----------------------------------------------------------------------

    # ----------------------------------------------------------------------
    #run PVC if requested and required input given
    if pvcroi:
        if not os.path.isfile(datain['T1lbl']):
            log.error(
                'no label image from T1 parcellations and/or ROI definitions!')
            raise StandardError('No ROIs')
        else:
            # get the PSF kernel for PVC
            if not psfkernel:
                psfkernel = nimpa.psf_measured(scanner='mmr', scale=trim_scale)
            else:
                if isinstance(
                        psfkernel,
                    (np.ndarray, np.generic)) and psfkernel.shape != (3, 17):
                    log.error(
                        'the PSF kernel has to be an numpy array with the shape of (3, 17)!'
                    )
                    raise IndexError('PSF: wrong shape or not a matrix')

        #> file names for NIfTI images of PVC ROIs and PVC corrected PET
        froi = []
        fpvc = []

        #> perform PVC for each time frame
        dynpvc = np.zeros(petu['im'].shape, dtype=np.float32)
        for i in range(ifrmP, nfrm):
            # transform the parcellations (ROIs) if given the affine transformation for each frame
            if not tAffine:
                log.warning(
                    'affine transformation are not provided: will generate for the time frame.'
                )
                faffpvc = ''
                #raise StandardError('No affine transformation')
            else:
                faffpvc = faff_frms[i]
            # chose file name of individual PVC images
            if nfrm > 1:
                fcomment_pvc = '_frm' + str(i) + fcomment
            else:
                fcomment_pvc = fcomment
            #============================
            # perform PVC
            petpvc_dic = nimpa.pvc_iyang(petu['fimi'][i],
                                         datain,
                                         Cnt,
                                         pvcroi,
                                         psfkernel,
                                         tool=pvcreg_tool,
                                         itr=pvcitr,
                                         faff=faffpvc,
                                         fcomment=fcomment_pvc,
                                         outpath=pvcdir,
                                         store_rois=store_rois,
                                         store_img=store_img_intrmd)
            #============================
            if nfrm > 1:
                dynpvc[i, :, :, :] = petpvc_dic['im']
            else:
                dynpvc = petpvc_dic['im']

            fpvc.append(petpvc_dic['fpet'])

            if store_rois: froi.append(petpvc_dic['froi'])

        #> update output dictionary
        output.update({'impvc': dynpvc})
        if store_img_intrmd: output.update({'fpvc': fpvc})
        if store_rois: output.update({'froi': froi})
    # ----------------------------------------------------------------------

    if store_img:
        # description for saving NIFTI image
        # attenuation number: if only bed present then it is 0.5
        attnum = (1 * muhd['exists'] + 1 * muod['exists']) / 2.
        descrip =    'alg=osem'                     \
                    +';att='+str(attnum*(recmod>0)) \
                    +';sct='+str(1*(recmod>1))      \
                    +';spn='+str(Cnt['SPN'])        \
                    +';sub=14'                      \
                    +';itr='+str(itr)               \
                    +';fwhm='+str(fwhm)             \
                    +';nfrm='+str(nfrm)

        # squeeze the not needed dimensions
        dynim = np.squeeze(dynim)

        # NIfTI file name for the full PET image (single or multiple frame)

        # save the image to NIfTI file
        if nfrm == 1:
            t0 = hst['t0']
            t1 = hst['t1']
            if t1 == t0:
                t0 = 0
                t1 = hst['dur']
            fpet = os.path.join(
                    petimg,
                    os.path.basename(recimg.fpet)[:8] \
                    +'_t-'+str(t0)+'-'+str(t1)+'sec' \
                    +'_itr-'+str(itr) )
            fpeto = fpet + fcomment + '.nii.gz'
            nimpa.prc.array2nii(dynim[::-1, ::-1, :],
                                recimg.affine,
                                fpeto,
                                descrip=descrip)
        else:
            fpet = os.path.join(
                    petimg,
                    os.path.basename(recimg.fpet)[:8]\
                    +'_nfrm-'+str(nfrm)+'_itr-'+str(itr)
                )
            fpeto = fpet + fcomment + '.nii.gz'
            nimpa.prc.array2nii(dynim[:, ::-1, ::-1, :],
                                recimg.affine,
                                fpeto,
                                descrip=descrip)

        # get output file names for trimmed/PVC images
        if trim:
            # folder for trimmed and dynamic
            pettrim = os.path.join(petimg, 'trimmed')
            # make folder
            nimpa.create_dir(pettrim)
            # trimming scale added to NIfTI descritoption
            descrip_trim = descrip + ';trim_scale=' + str(trim_scale)
            # file name for saving the trimmed image
            fpetu = os.path.join(
                pettrim,
                os.path.basename(fpet) + '_trimmed-upsampled-scale-' +
                str(trim_scale))
            # in case of PVC
            if pvcroi:
                # itertive Yang (iY) added to NIfTI descritoption
                descrip_pvc = descrip_trim + ';pvc=iY'
                # file name for saving the PVC NIfTI image
                fpvc = fpetu + '_PVC' + fcomment + '.nii.gz'
                output['trimmed']['fpvc'] = fpvc

            # update the trimmed image file name
            fpetu += fcomment + '.nii.gz'
            # store the file name in the output dictionary
            output['trimmed']['fpet'] = fpetu

        output['fpet'] = fpeto

        # save images
        if nfrm == 1:
            if trim:
                nimpa.prc.array2nii(petu['im'][::-1, ::-1, :],
                                    petu['affine'],
                                    fpetu,
                                    descrip=descrip_trim)
            if pvcroi:
                nimpa.prc.array2nii(dynpvc[::-1, ::-1, :],
                                    petu['affine'],
                                    fpvc,
                                    descrip=descrip_pvc)
        elif nfrm > 1:
            if trim:
                nimpa.prc.array2nii(petu['im'][:, ::-1, ::-1, :],
                                    petu['affine'],
                                    fpetu,
                                    descrip=descrip_trim)
            if pvcroi:
                nimpa.prc.array2nii(dynpvc[:, ::-1, ::-1, :],
                                    petu['affine'],
                                    fpvc,
                                    descrip=descrip_pvc)

    if del_img_intrmd:
        if pvcroi:
            for fi in fpvc:
                os.remove(fi)
        if trim:
            for fi in petu['fimi']:
                os.remove(fi)

    return output
Esempio n. 3
0
def mmrchain(
    datain,                 # all input data in a dictionary
    scanner_params,         # all scanner parameters in one dictionary
                            # containing constants, transaxial and axial
                            # LUTs.
    outpath=None,           # output path for results
    fout=None,              # full file name (any folders and extensions are disregarded)
    frames=None,            # definition of time frames, default: ['fluid', [0, 0]]
    mu_h=None,              # hardware mu-map.
    mu_o=None,              # object mu-map.
    tAffine=None,           # affine transformations for the mu-map for
                            # each time frame separately.
    itr=4,                  # number of OSEM iterations
    fwhm=0.,                # Gaussian Post-Smoothing FWHM
    psf=None,               # Resolution Modelling
    recmod=-1,              # reconstruction mode: -1: undefined, chosen
                            # automatically. 3: attenuation and scatter
                            # correction, 1: attenuation correction
                            # only, 0: no correction (randoms only).
    histo=None,             # input histogram (from list-mode data);
                            # if not given, it will be performed.
    decay_ref_time=None,    # decay corrects relative to the reference
                            # time provided; otherwise corrects to the scan
                            # start time.
    trim=False,
    trim_scale=2,
    trim_interp=0,          # interpolation for upsampling used in PVC
    trim_memlim=True,       # reduced use of memory for machines
                            # with limited memory (slow though)
    pvcroi=None,            # ROI used for PVC.  If undefined no PVC
                            # is performed.
    pvcreg_tool='niftyreg', # the registration tool used in PVC
    store_rois=False,       # stores the image of PVC ROIs
                            # as defined in pvcroi.
    pvcpsf=None,
    pvcitr=5,
    fcomment='',            # text comment used in the file name of
                            # generated image files
    ret_sinos=False,        # return prompt, scatter and randoms
                            # sinograms for each reconstruction
    ret_histo=False,        # return histogram (LM processing output) for
                            # each image frame
    store_img=True,
    store_img_intrmd=False,
    store_itr=None,         # store any reconstruction iteration in
                            # the list.  ignored if the list is empty.
    del_img_intrmd=False,
):
    if frames is None:
        frames = ['fluid', [0, 0]]
    if mu_h is None:
        mu_h = []
    if mu_o is None:
        mu_o = []
    if pvcroi is None:
        pvcroi = []
    if pvcpsf is None:
        pvcpsf = []
    if store_itr is None:
        store_itr = []

    # decompose all the scanner parameters and constants
    Cnt = scanner_params['Cnt']

    # -------------------------------------------------------------------------
    # HISOTGRAM PRECEEDS FRAMES
    if histo is not None and 'psino' in histo:
        frames = ['fluid', [histo['t0'], histo['t1']]]
    else:
        histo = None
        log.warning(
            'the given histogram does not contain a prompt sinogram--will generate a histogram.')

    # FRAMES
    # check for the provided dynamic frames
    if isinstance(frames, list):
        # Can be given in three ways:
        # * a 1D list (duration of each frame is listed)
        # * a more concise 2D list--repetition and duration lists in
        #   each entry.  Must start with the 'def' entry.
        # * a 2D list with fluid timings: must start with the string
        #   'fluid' or 'timings'.  a 2D list with consecutive lists
        #   describing start and end of the time frame, [t0, t1];
        #   The number of time frames for this option is unlimited,
        #   provided the t0 and t1 are within the acquisition times.

        # 2D starting with entry 'fluid' or 'timings'
        if (isinstance(frames[0], str) and frames[0] in ('fluid', 'timings')
                and all(isinstance(t, list) and len(t) == 2 for t in frames[1:])):
            t_frms = frames[1:]
        # if 2D definitions, starting with entry 'def':
        elif (isinstance(frames[0], str) and frames[0] == 'def'
              and all(isinstance(t, list) and len(t) == 2 for t in frames[1:])):
            # get total time and list of all time frames
            dfrms = dynamic_timings(frames)
            t_frms = dfrms[1:]

        # if 1D:
        elif all(isinstance(t, Integral) for t in frames):
            # get total time and list of all time frames
            dfrms = dynamic_timings(frames)
            t_frms = dfrms[1:]

        else:
            log.error('osemdyn: frames definitions are not given\
                in the correct list format: 1D [15,15,30,30,...]\
                or 2D list [[2,15], [2,30], ...]')
    else:
        log.error(
            'provided dynamic frames definitions are incorrect (should be a list of definitions).')
        raise TypeError('Wrong data type for dynamic frames')
    # number of dynamic time frames
    nfrm = len(t_frms)
    # -------------------------------------------------------------------------

    # -------------------------------------------------------------------------
    # create folders for results
    if outpath is None:
        petdir = os.path.join(datain['corepath'], 'reconstructed')
        fmudir = os.path.join(datain['corepath'], 'mumap-obj')
        pvcdir = os.path.join(datain['corepath'], 'PRCL')
    else:
        petdir = os.path.join(outpath, 'PET')
        fmudir = os.path.join(outpath, 'mumap-obj')
        pvcdir = os.path.join(outpath, 'PRCL')

    if fout is not None:
        # > get rid of folders
        fout = os.path.basename(fout)
        # > get rid of extension
        fout = fout.split('.')[0]

    # folder for co-registered mu-maps (for motion compensation)
    fmureg = os.path.join(fmudir, 'registered')
    # folder for affine transformation MR/CT->PET
    petaff = os.path.join(petdir, 'faffine')

    # folder for reconstructed images (dynamic or static depending on number of frames).
    if nfrm > 1:
        petimg = os.path.join(petdir, 'multiple-frames')
        pvcdir = os.path.join(pvcdir, 'multiple-frames')
    elif nfrm == 1:
        petimg = os.path.join(petdir, 'single-frame')
        pvcdir = os.path.join(pvcdir, 'single-frame')
    else:
        raise TypeError('Unrecognised/confusing time frames!')
    # create now the folder
    nimpa.create_dir(petimg)
    # create folder
    nimpa.create_dir(petdir)
    # -------------------------------------------------------------------------

    # -------------------------------------------------------------------------
    # MU-MAPS
    # get the mu-maps, if given;  otherwise will use blank mu-maps.
    if tAffine is not None:
        muod = obtain_image(mu_o, imtype='object mu-map')
    else:
        muod = obtain_image(mu_o, Cnt=Cnt, imtype='object mu-map')

    # hardware mu-map
    muhd = obtain_image(mu_h, Cnt, imtype='hardware mu-map')

    # choose the mode of reconstruction based on the provided (or not) mu-maps
    if muod['exists'] and muhd['exists'] and recmod == -1:
        recmod = 3
    elif (muod['exists'] or muhd['exists']) and recmod == -1:
        recmod = 1
        log.warning('partial mu-map:  scatter correction is switched off.')
    else:
        if recmod == -1:
            recmod = 0
            log.warning(
                'no mu-map provided: scatter and attenuation corrections are switched off.')
    # -------------------------------------------------------------------------

    # import pdb; pdb.set_trace()

    # output dictionary
    output = {}
    output['recmod'] = recmod
    output['frames'] = t_frms
    output['#frames'] = nfrm

    # if affine transformation is given
    # the baseline mu-map in NIfTI file or dictionary has to be given
    if tAffine is None:
        log.info('using the provided mu-map the same way for all frames.')
    else:
        if len(tAffine) != nfrm:
            raise ValueError("the number of affine transformations in the list"
                             " has to be the same as the number of dynamic frames")
        elif not isinstance(tAffine, list):
            raise ValueError("tAffine has to be a list of either 4x4 numpy arrays"
                             " of affine transformations or a list of file path strings")
        elif 'fim' not in muod:
            raise NameError("when tAffine is given, the object mu-map has to be"
                            " provided either as a dictionary or NIfTI file")

        # check if all are file path strings to the existing files
        if all(isinstance(t, str) for t in tAffine):
            if all(os.path.isfile(t) for t in tAffine):
                # the internal list of affine transformations
                faff_frms = tAffine
                log.info('using provided paths to affine transformations for each dynamic frame.')
            else:
                raise IOError('not all provided paths are valid!')
        # check if all are numpy arrays
        elif all(isinstance(t, (np.ndarray, np.generic)) for t in tAffine):
            # create the folder for dynamic affine transformations
            nimpa.create_dir(petaff)
            faff_frms = []
            for i in range(nfrm):
                fout_ = os.path.join(petaff, 'affine_frame(' + str(i) + ').txt')
                np.savetxt(fout_, tAffine[i], fmt='%3.9f')
                faff_frms.append(fout_)
            log.info('using provided numpy arrays affine transformations for each dynamic frame.')
        else:
            raise ValueError(
                'Affine transformations for each dynamic frame could not be established.')

        # -------------------------------------------------------------------------------------
        # get ref image for mu-map resampling
        # -------------------------------------------------------------------------------------
        if 'fmuref' in muod:
            fmuref = muod['fmuref']
            log.info('reusing the reference mu-map from the object mu-map dictionary.')
        else:
            # create folder if doesn't exists
            nimpa.create_dir(fmudir)
            # ref file name
            fmuref = os.path.join(fmudir, 'muref.nii.gz')
            # ref affine
            B = image_affine(datain, Cnt, gantry_offset=False)
            # ref image (blank)
            im = np.zeros((Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMX']), dtype=np.float32)
            # store ref image
            nimpa.array2nii(im, B, fmuref)
            log.info('generated a reference mu-map in:\n{}'.format(fmuref))
        # -------------------------------------------------------------------------------------

        output['fmuref'] = fmuref
        output['faffine'] = faff_frms

    # output list of intermediate file names for mu-maps and PET images
    # (useful for dynamic imaging)
    if tAffine is not None: output['fmureg'] = []

    if store_img_intrmd:
        output['fpeti'] = []
        if fwhm > 0:
            output['fsmoi'] = []

    # > number of3D  sinograms
    if Cnt['SPN'] == 1:
        snno = Cnt['NSN1']
    elif Cnt['SPN'] == 11:
        snno = Cnt['NSN11']
    else:
        raise ValueError('unrecognised span: {}'.format(Cnt['SPN']))

    # dynamic images in one numpy array
    dynim = np.zeros((nfrm, Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMY']), dtype=np.float32)
    # if asked, output only scatter+randoms sinogram for each frame
    if ret_sinos and itr > 1 and recmod > 2:
        dynmsk = np.zeros((nfrm, Cnt['NSEG0'], Cnt['NSANGLES'], Cnt['NSBINS']), dtype=np.float32)
        dynrsn = np.zeros((nfrm, snno, Cnt['NSANGLES'], Cnt['NSBINS']), dtype=np.float32)
        dynssn = np.zeros((nfrm, snno, Cnt['NSANGLES'], Cnt['NSBINS']), dtype=np.float32)
        dynpsn = np.zeros((nfrm, snno, Cnt['NSANGLES'], Cnt['NSBINS']), dtype=np.float32)

    # > returning dictionary of histograms if requested
    if ret_histo:
        hsts = {}

    # import pdb; pdb.set_trace()

    # starting frame index with reasonable prompt data
    ifrmP = 0
    # iterate over frame index
    for ifrm in range(nfrm):
        # start time of a current (ifrm-th) dynamic frame
        t0 = int(t_frms[ifrm][0])
        # end time of a current (ifrm-th) dynamic frame
        t1 = int(t_frms[ifrm][1])
        # --------------
        # check if there is enough prompt data to do a reconstruction
        # --------------
        log.info('dynamic frame times t0={}, t1={}:'.format(t0, t1))
        if histo is None:
            hst = mmrhist(datain, scanner_params, t0=t0, t1=t1)
        else:
            hst = histo
            log.info(
                dedent('''\
                ------------------------------------------------------
                using provided histogram
                ------------------------------------------------------'''))

        if ret_histo:
            hsts[str(t0) + '-' + str(t1)] = hst

        if np.sum(hst['dhc']) > 0.99 * np.sum(hst['phc']):
            log.warning(
                dedent('''\
                ===========================================================================
                amount of randoms is the greater part of prompts => omitting reconstruction
                ==========================================================================='''))
            ifrmP = ifrm + 1
            continue
        # --------------------
        # transform the mu-map if given the affine transformation for each frame
        if tAffine is not None:
            # create the folder for aligned (registered for motion compensation) mu-maps
            nimpa.create_dir(fmureg)
            # the converted nii image resample to the reference size
            fmu = os.path.join(fmureg, 'mumap_dyn_frm' + str(ifrm) + fcomment + '.nii.gz')
            # command for resampling
            if os.path.isfile(Cnt['RESPATH']):
                cmd = [
                    Cnt['RESPATH'], '-ref', fmuref, '-flo', muod['fim'], '-trans', faff_frms[ifrm],
                    '-res', fmu, '-pad', '0']
                if log.getEffectiveLevel() > log.INFO:
                    cmd.append('-voff')
                call(cmd)
            else:
                raise IOError('Incorrect path to NiftyReg (resampling) executable.')
            # get the new mu-map from the just resampled file
            muodct = nimpa.getnii(fmu, output='all')
            muo = muodct['im']
            muo[muo < 0] = 0
            output['fmureg'].append(fmu)
        else:
            muo = muod['im']
        # ---------------------

        # output image file name
        if nfrm > 1:
            frmno = '_frm' + str(ifrm)
        else:
            frmno = ''

        # run OSEM reconstruction of a single time frame
        recimg = mmrrec.osemone(datain, [muhd['im'], muo], hst, scanner_params,
                                decay_ref_time=decay_ref_time, recmod=recmod, itr=itr, fwhm=fwhm,
                                psf=psf, outpath=petimg, frmno=frmno, fcomment=fcomment + '_i',
                                store_img=store_img_intrmd, store_itr=store_itr, fout=fout,
                                ret_sinos=ret_sinos)

        # form dynamic Numpy array
        if fwhm > 0:
            dynim[ifrm, :, :, :] = recimg.imsmo
        else:
            dynim[ifrm, :, :, :] = recimg.im

        if ret_sinos and itr > 1 and recmod > 2:
            dynpsn[ifrm, :, :, :] = np.squeeze(hst['psino'])
            dynssn[ifrm, :, :, :] = np.squeeze(recimg.ssn)
            dynrsn[ifrm, :, :, :] = np.squeeze(recimg.rsn)
            dynmsk[ifrm, :, :, :] = np.squeeze(recimg.amsk)

        if store_img_intrmd:
            output['fpeti'].append(recimg.fpet)
            if fwhm > 0:
                output['fsmoi'].append(recimg.fsmo)

        if nfrm == 1: output['tuple'] = recimg

    output['im'] = np.squeeze(dynim)

    if ret_sinos and itr > 1 and recmod > 2:
        output['sinos'] = {
            'psino': np.squeeze(dynpsn), 'ssino': np.squeeze(dynssn), 'rsino': np.squeeze(dynrsn),
            'amask': np.squeeze(dynmsk)}

    if ret_histo:
        output['hst'] = hsts

    # ----------------------------------------------------------------------
    # trim the PET image
    # images have to be stored for PVC
    if pvcroi: store_img_intrmd = True
    if trim:
        # create file name
        if 'lm_dcm' in datain:
            fnm = os.path.basename(datain['lm_dcm'])[:20]
        elif 'lm_ima' in datain:
            fnm = os.path.basename(datain['lm_ima'])[:20]
        # trim PET and upsample
        petu = nimpa.imtrimup(dynim, affine=image_affine(datain, Cnt), scale=trim_scale,
                              int_order=trim_interp, outpath=petimg, fname=fnm, fcomment=fcomment,
                              store_img_intrmd=store_img_intrmd, memlim=trim_memlim,
                              verbose=log.getEffectiveLevel())

        output.update({
            'trimmed': {'im': petu['im'], 'fpet': petu['fimi'], 'affine': petu['affine']}})
    # ----------------------------------------------------------------------

    # ----------------------------------------------------------------------
    # run PVC if requested and required input given
    if pvcroi:
        if not os.path.isfile(datain['T1lbl']):
            raise Exception('No labels and/or ROIs image definitions found!')
        else:
            # get the PSF kernel for PVC
            if not pvcpsf:
                pvcpsf = nimpa.psf_measured(scanner='mmr', scale=trim_scale)
            else:
                if (
                    isinstance(pvcpsf, (np.ndarray, np.generic)) and
                    pvcpsf.shape != (3, 2 * Cnt['RSZ_PSF_KRNL'] + 1)
                ):  # yapf: disable
                    raise ValueError(
                        'the PSF kernel has to be an numpy array with the shape of ({},{})'.format(
                            3, 2 * Cnt['RSZ_PSF_KRNL'] + 1))

        # > file names for NIfTI images of PVC ROIs and PVC corrected PET
        froi = []
        fpvc = []

        # > perform PVC for each time frame
        dynpvc = np.zeros(petu['im'].shape, dtype=np.float32)
        for i in range(ifrmP, nfrm):
            # transform the parcellations (ROIs) if given the affine transformation for each frame
            if tAffine is None:
                log.warning(
                    'affine transformation are not provided: will generate for the time frame.')
                faffpvc = None
                # raise StandardError('No affine transformation')
            else:
                faffpvc = faff_frms[i]

            # chose file name of individual PVC images
            if nfrm > 1:
                fcomment_pvc = '_frm' + str(i) + fcomment
            else:
                fcomment_pvc = fcomment
            # ===========================
            # perform PVC
            petpvc_dic = nimpa.pvc_iyang(petu['fimi'][i], datain, Cnt, pvcroi, pvcpsf,
                                         tool=pvcreg_tool, itr=pvcitr, faff=faffpvc,
                                         fcomment=fcomment_pvc, outpath=pvcdir,
                                         store_rois=store_rois, store_img=store_img_intrmd)
            # ===========================
            if nfrm > 1:
                dynpvc[i, :, :, :] = petpvc_dic['im']
            else:
                dynpvc = petpvc_dic['im']
            fpvc.append(petpvc_dic['fpet'])

            if store_rois: froi.append(petpvc_dic['froi'])

        # > update output dictionary
        output.update({'impvc': dynpvc})
        output['fprc'] = petpvc_dic['fprc']
        output['imprc'] = petpvc_dic['imprc']

        if store_img_intrmd: output.update({'fpvc': fpvc})
        if store_rois: output.update({'froi': froi})
    # ----------------------------------------------------------------------

    if store_img:
        # description for saving NIFTI image
        # attenuation number: if only bed present then it is 0.5
        attnum = (1 * muhd['exists'] + 1 * muod['exists']) / 2.
        descrip = (f"alg=osem"
                   f";att={attnum*(recmod>0)}"
                   f";sct={1*(recmod>1)}"
                   f";spn={Cnt['SPN']}"
                   f";sub=14"
                   f";itr={itr}"
                   f";fwhm={fwhm}"
                   f";psf={psf}"
                   f";nfrm={nfrm}")

        # squeeze the not needed dimensions
        dynim = np.squeeze(dynim)

        # NIfTI file name for the full PET image (single or multiple frame)

        # save the image to NIfTI file
        if nfrm == 1:
            t0 = hst['t0']
            t1 = hst['t1']
            if t1 == t0:
                t0 = 0
                t1 = hst['dur']
            # > --- file naming and saving ---
            if fout is None:
                fpet = os.path.join(
                    petimg,
                    os.path.basename(recimg.fpet)[:8] + f'_t-{t0}-{t1}sec_itr-{itr}')
                fpeto = f"{fpet}{fcomment}.nii.gz"
            else:
                fpeto = os.path.join(petimg, os.path.basename(fout) + '.nii.gz')

            nimpa.prc.array2nii(dynim[::-1, ::-1, :], recimg.affine, fpeto, descrip=descrip)
            # > --- ---
        else:
            if fout is None:
                fpet = os.path.join(petimg,
                                    os.path.basename(recimg.fpet)[:8] + f'_nfrm-{nfrm}_itr-{itr}')
                fpeto = f"{fpet}{fcomment}.nii.gz"
            else:
                fpeto = os.path.join(petimg, os.path.basename(fout) + f'_nfrm-{nfrm}.nii.gz')

            nimpa.prc.array2nii(dynim[:, ::-1, ::-1, :], recimg.affine, fpeto, descrip=descrip)

        output['fpet'] = fpeto

        # get output file names for trimmed/PVC images
        if trim:
            # folder for trimmed and dynamic
            pettrim = os.path.join(petimg, 'trimmed')
            # make folder
            nimpa.create_dir(pettrim)
            # trimming scale added to NIfTI descritoption
            descrip_trim = f'{descrip};trim_scale={trim_scale}'
            # file name for saving the trimmed image
            if fout is None:
                fpetu = os.path.join(
                    pettrim,
                    os.path.basename(fpet) + f'_trimmed-upsampled-scale-{trim_scale}')
            else:
                fpetu = os.path.join(
                    pettrim,
                    os.path.basename(fout) + f'_trimmed-upsampled-scale-{trim_scale}')
            # in case of PVC
            if pvcroi:
                # itertive Yang (iY) added to NIfTI descritoption
                descrip_pvc = f'{descrip_trim};pvc=iY'
                # file name for saving the PVC NIfTI image
                fpvc = f"{fpetu}_PVC{fcomment}.nii.gz"
                output['trimmed']['fpvc'] = fpvc

            # update the trimmed image file name
            fpetu += f'{fcomment}.nii.gz'
            # store the file name in the output dictionary
            output['trimmed']['fpet'] = fpetu

        # save images
        if nfrm == 1:
            if trim:
                nimpa.prc.array2nii(petu['im'][::-1, ::-1, :], petu['affine'], fpetu,
                                    descrip=descrip_trim)
            if pvcroi:
                nimpa.prc.array2nii(dynpvc[::-1, ::-1, :], petu['affine'], fpvc,
                                    descrip=descrip_pvc)
        elif nfrm > 1:
            if trim:
                nimpa.prc.array2nii(petu['im'][:, ::-1, ::-1, :], petu['affine'], fpetu,
                                    descrip=descrip_trim)
            if pvcroi:
                nimpa.prc.array2nii(dynpvc[:, ::-1, ::-1, :], petu['affine'], fpvc,
                                    descrip=descrip_pvc)

    if del_img_intrmd:
        if pvcroi:
            for fi in fpvc:
                os.remove(fi)
        if trim:
            for fi in petu['fimi']:
                os.remove(fi)

    return output
Esempio n. 4
0
def align_mumap(
    datain,
    scanner_params=None,
    outpath='',
    reg_tool='niftyreg',
    use_stored=False,
    hst=None,
    t0=0,
    t1=0,
    itr=2,
    faff='',
    fpet='',
    fcomment='',
    store=False,
    store_npy=False,
    petopt='ac',
    musrc='ute',         # another option is pct for mu-map source
    ute_name='UTE2',
    del_auxilary=True,
    verbose=False,
):
    '''
    Align the a pCT or MR-derived mu-map to a PET image reconstructed to chosen
    specifications (e.g., with/without attenuation and scatter corrections)

    use_sotred only works if hst or t0/t1 given but not when faff.
    '''
    if scanner_params is None:
        scanner_params = {}

    # > output folder
    if outpath == '':
        opth = os.path.join(datain['corepath'], 'mumap-obj')
    else:
        opth = os.path.join(outpath, 'mumap-obj')

    # > create the folder, if not existent
    nimpa.create_dir(opth)

    # > get the timing of PET if affine not given
    if faff == '' and hst is not None and isinstance(hst, dict) and 't0' in hst:
        t0 = hst['t0']
        t1 = hst['t1']

    # > file name for the output mu-map
    fnm = 'mumap-' + musrc.upper()

    # > output dictionary
    mu_dct = {}

    # ---------------------------------------------------------------------------
    # > used stored if requested
    if use_stored:
        fmu_stored = fnm + '-aligned-to_t'\
                     + str(t0)+'-'+str(t1)+'_'+petopt.upper()\
                     + fcomment
        fmupath = os.path.join(opth, fmu_stored + '.nii.gz')

        if os.path.isfile(fmupath):
            mudct_stored = nimpa.getnii(fmupath, output='all')
            # > create output dictionary
            mu_dct['im'] = mudct_stored['im']
            mu_dct['affine'] = mudct_stored['affine']
            # pu_dct['faff'] = faff
            return mu_dct
    # ---------------------------------------------------------------------------

    # > tmp folder for not aligned mu-maps
    tmpdir = os.path.join(opth, 'tmp')
    nimpa.create_dir(tmpdir)

    # > three ways of passing scanner constants <Cnt> are here decoded
    if 'Cnt' in scanner_params:
        Cnt = scanner_params['Cnt']
    elif 'SO_IMZ' in scanner_params:
        Cnt = scanner_params
    else:
        Cnt = rs.get_mmr_constants()

    # > if affine not provided histogram the LM data for recon and registration
    if not os.path.isfile(faff):
        from niftypet.nipet.prj import mmrrec

        # -histogram the list data if needed
        if hst is None:
            from niftypet.nipet import mmrhist
            if 'txLUT' in scanner_params:
                hst = mmrhist(datain, scanner_params, t0=t0, t1=t1)
            else:
                raise ValueError('Full scanner are parameters not provided\
                     but are required for histogramming.')

    # ========================================================
    # -get hardware mu-map
    if 'hmumap' in datain and os.path.isfile(datain['hmumap']):
        muh = np.load(datain['hmumap'], allow_pickle=True)["hmu"]
        (log.info if verbose else log.debug)('loaded hardware mu-map from file:\n{}'.format(
            datain['hmumap']))
    elif outpath != '':
        hmupath = os.path.join(outpath, "mumap-hdw", "hmumap.npz")
        if os.path.isfile(hmupath):
            muh = np.load(hmupath, allow_pickle=True)["hmu"]
            datain["hmumap"] = hmupath
        else:
            raise IOError('Invalid path to the hardware mu-map')
    else:
        log.error('the hardware mu-map is required first.')
        raise IOError('Could not find the hardware mu-map!')
    # ========================================================
    # -check if T1w image is available
    if not {'MRT1W#', 'T1nii', 'T1bc', 'T1N4'}.intersection(datain):
        log.error('no MR T1w images required for co-registration!')
        raise IOError('T1w image could not be obtained!')
    # ========================================================

    # -if the affine is not given,
    # -it will be generated by reconstructing PET image, with some or no corrections
    if not os.path.isfile(faff):
        # first recon pet to get the T1 aligned to it
        if petopt == 'qnt':
            # ---------------------------------------------
            # OPTION 1 (quantitative recon with all corrections using MR-based mu-map)
            # get UTE object mu-map (may not be in register with the PET data)
            mudic = obj_mumap(datain, Cnt, outpath=tmpdir, del_auxilary=del_auxilary)
            muo = mudic['im']
            # reconstruct PET image with UTE mu-map to which co-register T1w
            recout = mmrrec.osemone(datain, [muh, muo], hst, scanner_params, recmod=3, itr=itr,
                                    fwhm=0., fcomment=fcomment + '_QNT-UTE',
                                    outpath=os.path.join(outpath, 'PET',
                                                         'positioning'), store_img=True)
        elif petopt == 'nac':
            # ---------------------------------------------
            # OPTION 2 (recon without any corrections for scatter and attenuation)
            # reconstruct PET image with UTE mu-map to which co-register T1w
            muo = np.zeros(muh.shape, dtype=muh.dtype)
            recout = mmrrec.osemone(datain, [muh, muo], hst, scanner_params, recmod=1, itr=itr,
                                    fwhm=0., fcomment=fcomment + '_NAC',
                                    outpath=os.path.join(outpath, 'PET',
                                                         'positioning'), store_img=True)
        elif petopt == 'ac':
            # ---------------------------------------------
            # OPTION 3 (recon with attenuation correction only but no scatter)
            # reconstruct PET image with UTE mu-map to which co-register T1w
            mudic = obj_mumap(datain, Cnt, outpath=tmpdir, del_auxilary=del_auxilary)
            muo = mudic['im']

            recout = mmrrec.osemone(datain, [muh, muo], hst, scanner_params, recmod=1, itr=itr,
                                    fwhm=0., fcomment=fcomment + '_AC-UTE',
                                    outpath=os.path.join(outpath, 'PET',
                                                         'positioning'), store_img=True)

        fpet = recout.fpet
        mu_dct['fpet'] = fpet

        # ------------------------------
        if musrc == 'ute' and ute_name in datain and os.path.exists(datain[ute_name]):
            # change to NIfTI if the UTE sequence is in DICOM files (folder)
            if os.path.isdir(datain[ute_name]):
                fnew = os.path.basename(datain[ute_name])
                run([Cnt['DCM2NIIX'], '-f', fnew, datain[ute_name]])
                fute = glob.glob(os.path.join(datain[ute_name], fnew + '*nii*'))[0]
            elif os.path.isfile(datain[ute_name]):
                fute = datain[ute_name]

            # get the affine transformation
            if reg_tool == 'spm':
                regdct = nimpa.coreg_spm(fpet, fute,
                                         outpath=os.path.join(outpath, 'PET', 'positioning'))
            elif reg_tool == 'niftyreg':
                regdct = nimpa.affine_niftyreg(
                    fpet,
                    fute,
                    outpath=os.path.join(outpath, 'PET', 'positioning'),
                    executable=Cnt['REGPATH'],
                    omp=multiprocessing.cpu_count() / 2,                 # pcomment=fcomment,
                    rigOnly=True,
                    affDirect=False,
                    maxit=5,
                    speed=True,
                    pi=50,
                    pv=50,
                    smof=0,
                    smor=0,
                    rmsk=True,
                    fmsk=True,
                    rfwhm=15.,                                           # pillilitres
                    rthrsh=0.05,
                    ffwhm=15.,                                           # pillilitres
                    fthrsh=0.05,
                    verbose=verbose)
            else:
                raise ValueError('unknown registration tool requested')

            faff_mrpet = regdct['faff']

        elif musrc == 'pct':

            ft1w = nimpa.pick_t1w(datain)

            if reg_tool == 'spm':
                regdct = nimpa.coreg_spm(fpet, ft1w,
                                         outpath=os.path.join(outpath, 'PET', 'positioning'))
            elif reg_tool == 'niftyreg':
                regdct = nimpa.affine_niftyreg(
                    fpet,
                    ft1w,
                    outpath=os.path.join(outpath, 'PET', 'positioning'),
                    executable=Cnt['REGPATH'],
                    omp=multiprocessing.cpu_count() / 2,
                    rigOnly=True,
                    affDirect=False,
                    maxit=5,
                    speed=True,
                    pi=50,
                    pv=50,
                    smof=0,
                    smor=0,
                    rmsk=True,
                    fmsk=True,
                    rfwhm=15.,                                           # pillilitres
                    rthrsh=0.05,
                    ffwhm=15.,                                           # pillilitres
                    fthrsh=0.05,
                    verbose=verbose)
            else:
                raise ValueError('unknown registration tool requested')

            faff_mrpet = regdct['faff']

        else:
            raise IOError('Floating MR image not provided or is invalid.')

    else:
        faff_mrpet = faff
        regdct = {}
        if not os.path.isfile(fpet):
            raise IOError('e> the reference PET should be supplied with the affine.')

    # > output file name for the aligned mu-maps
    if musrc == 'pct':

        # > convert to mu-values before resampling to avoid artefacts with negative values
        nii = nib.load(datain['pCT'])
        img = nii.get_fdata(dtype=np.float32)
        img_mu = hu2mu(img)
        nii_mu = nib.Nifti1Image(img_mu, nii.affine)
        fflo = os.path.join(tmpdir, 'pct2mu-not-aligned.nii.gz')
        nib.save(nii_mu, fflo)

        freg = os.path.join(opth, 'pct2mu-aligned-' + fcomment + '.nii.gz')

    elif musrc == 'ute':
        freg = os.path.join(opth, 'UTE-res-tmp' + fcomment + '.nii.gz')
        if 'UTE' not in datain:
            fnii = 'converted-from-DICOM_'
            tstmp = nimpa.time_stamp(simple_ascii=True)
            # convert the DICOM mu-map images to nii
            if 'mumapDCM' not in datain:
                raise IOError('DICOM with the UTE mu-map are not given.')
            run([Cnt['DCM2NIIX'], '-f', fnii + tstmp, '-o', opth, datain['mumapDCM']])
            # piles for the T1w, pick one:
            fflo = glob.glob(os.path.join(opth, '*' + fnii + tstmp + '*.nii*'))[0]
        else:
            if os.path.isfile(datain['UTE']):
                fflo = datain['UTE']
            else:
                raise IOError('The provided NIfTI UTE path is not valid.')

    # > call the resampling routine to get the pCT/UTE in place
    if reg_tool == "spm":
        nimpa.resample_spm(fpet, fflo, faff_mrpet, fimout=freg, del_ref_uncmpr=True,
                           del_flo_uncmpr=True, del_out_uncmpr=True)
    else:
        nimpa.resample_niftyreg(fpet, fflo, faff_mrpet, fimout=freg, executable=Cnt['RESPATH'],
                                verbose=verbose)

    # -get the NIfTI of registered image
    nim = nib.load(freg)
    A = nim.affine
    imreg = nim.get_fdata(dtype=np.float32)
    imreg = imreg[:, ::-1, ::-1]
    imreg = np.transpose(imreg, (2, 1, 0))

    # -convert to mu-values; sort out the file name too.
    if musrc == 'pct':
        mu = imreg
    elif musrc == 'ute':
        mu = np.float32(imreg) / 1e4
        # -remove the converted file from DICOMs
        os.remove(fflo)
    else:
        raise NameError('Confused o_O')

    # > get rid of negatives and nans
    mu[mu < 0] = 0
    mu[np.isnan(mu)] = 0

    # > return image dictionary with the image itself and other parameters
    mu_dct['im'] = mu
    mu_dct['affine'] = A
    mu_dct['faff'] = faff_mrpet

    if store or store_npy:
        nimpa.create_dir(opth)
        if faff == '':
            fname = fnm + '-aligned-to_t'\
                    + str(t0)+'-'+str(t1)+'_'+petopt.upper()\
                    + fcomment
        else:
            fname = fnm + '-aligned-to-given-affine' + fcomment
    if store_npy:
        fnp = os.path.join(opth, fname + ".npz")
        np.savez(fnp, mu=mu, A=A)
    if store:
        # > NIfTI
        fmu = os.path.join(opth, fname + '.nii.gz')
        nimpa.array2nii(mu[::-1, ::-1, :], A, fmu)
        mu_dct['fim'] = fmu

    if del_auxilary:
        os.remove(freg)

        if musrc == 'ute' and not os.path.isfile(faff):
            os.remove(fute)
        shutil.rmtree(tmpdir)

    return mu_dct
Esempio n. 5
0
def obj_mumap(
    datain,
    params=None,
    outpath='',
    comment='',
    store=False,
    store_npy=False,
    gantry_offset=True,
    del_auxilary=True,
):
    '''Get the object mu-map from DICOM images'''
    if params is None:
        params = {}

    # three ways of passing scanner constants <Cnt> are here decoded
    if 'Cnt' in params:
        Cnt = params['Cnt']
    elif 'SO_IMZ' in params:
        Cnt = params
    else:
        Cnt = rs.get_mmr_constants()

    # output folder
    if outpath == '':
        fmudir = os.path.join(datain['corepath'], 'mumap-obj')
    else:
        fmudir = os.path.join(outpath, 'mumap-obj')
    nimpa.create_dir(fmudir)

    # > ref file name
    fmuref = os.path.join(fmudir, 'muref.nii.gz')

    # > ref affine
    B = image_affine(datain, Cnt, gantry_offset=gantry_offset)

    # > ref image (blank)
    im = np.zeros((Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMX']), dtype=np.float32)

    # > store ref image
    nimpa.array2nii(im, B, fmuref)

    # check if the object dicom files for MR-based mu-map exists
    if 'mumapDCM' not in datain or not os.path.isdir(datain['mumapDCM']):
        log.error('DICOM folder for the mu-map does not exist.')
        return None

    fnii = 'converted-from-object-DICOM_'
    tstmp = nimpa.time_stamp(simple_ascii=True)

    # find residual(s) from previous runs and delete them
    resdcm = glob.glob(os.path.join(fmudir, '*' + fnii + '*.nii*'))
    for d in resdcm:
        os.remove(d)

    # convert the DICOM mu-map images to nii
    run([Cnt['DCM2NIIX'], '-f', fnii + tstmp, '-o', fmudir, datain['mumapDCM']])
    # piles for the T1w, pick one:
    fmunii = glob.glob(os.path.join(fmudir, '*' + fnii + tstmp + '*.nii*'))[0]
    # fmunii = glob.glob( os.path.join(datain['mumapDCM'], '*converted*.nii*') )
    # fmunii = fmunii[0]

    # the converted nii image resample to the reference size
    fmu = os.path.join(fmudir, comment + 'mumap_tmp.nii.gz')
    if os.path.isfile(Cnt['RESPATH']):
        cmd = [Cnt['RESPATH'], '-ref', fmuref, '-flo', fmunii, '-res', fmu, '-pad', '0']
        if log.getEffectiveLevel() > logging.INFO:
            cmd.append('-voff')
        run(cmd)
    else:
        log.error('path to resampling executable is incorrect!')
        raise IOError('Path to executable is incorrect!')

    nim = nib.load(fmu)
    # get the affine transform
    A = nim.get_sform()
    mu = nim.get_fdata(dtype=np.float32)
    mu = np.transpose(mu[:, ::-1, ::-1], (2, 1, 0))
    # convert to mu-values
    mu = np.float32(mu) / 1e4
    mu[mu < 0] = 0

    # > return image dictionary with the image itself and some other stats
    mu_dct = {'im': mu, 'affine': A}
    if not del_auxilary:
        mu_dct['fmuref'] = fmuref

    # > store the mu-map if requested
    if store_npy:
        # to numpy array
        fnp = os.path.join(fmudir, "mumap-from-DICOM.npz")
        np.savez(fnp, mu=mu, A=A)

    if store:
        # with this file name
        fmumap = os.path.join(fmudir, 'mumap-from-DICOM_no-alignment' + comment + '.nii.gz')
        nimpa.array2nii(mu[::-1, ::-1, :], A, fmumap)
        mu_dct['fim'] = fmumap

    if del_auxilary:
        os.remove(fmuref)
        os.remove(fmunii)
        os.remove(fmu)

        if [f for f in os.listdir(fmudir)
                if not f.startswith('.') and not f.endswith('.json')] == []:
            shutil.rmtree(fmudir)

    return mu_dct
Esempio n. 6
0
def hdw_mumap(datain, hparts, params, outpath='', use_stored=False, del_interm=True):
    '''Get hardware mu-map components, including bed, coils etc.'''
    # two ways of passing Cnt are here decoded
    if 'Cnt' in params:
        Cnt = params['Cnt']
    else:
        Cnt = params

    if outpath != '':
        fmudir = os.path.join(outpath, 'mumap-hdw')
    else:
        fmudir = os.path.join(datain['corepath'], 'mumap-hdw')

    nimpa.create_dir(fmudir)

    # if requested to use the stored hardware mu_map get it from the path in datain
    if use_stored and "hmumap" in datain and os.path.isfile(datain["hmumap"]):
        if datain['hmumap'].endswith(('.nii', '.nii.gz')):
            dct = nimpa.getnii(datain['hmumap'], output='all')
            hmu = dct['im']
            A = dct['affine']
            fmu = datain['hmumap']
        elif datain["hmumap"].endswith(".npz"):
            arr = np.load(datain["hmumap"], allow_pickle=True)
            hmu, A, fmu = arr["hmu"], arr["A"], arr["fmu"]
            log.info('loaded hardware mu-map from file: {}'.format(datain['hmumap']))
            fnp = datain['hmumap']
    elif outpath and os.path.isfile(os.path.join(fmudir, "hmumap.npz")):
        fnp = os.path.join(fmudir, "hmumap.npz")
        arr = np.load(fnp, allow_pickle=True)
        hmu, A, fmu = arr["hmu"], arr["A"], arr["fmu"]
        datain['hmumap'] = fnp
    # otherwise generate it from the parts through resampling the high resolution CT images
    else:
        hmupos = get_hmupos(datain, hparts, Cnt, outpath=outpath)
        # just to get the dims, get the ref image
        nimo = nib.load(hmupos[0]['niipath'])
        A = nimo.affine
        imo = nimo.get_fdata(dtype=np.float32)
        imo[:] = 0

        for i in hparts:
            fin = os.path.join(
                os.path.dirname(hmupos[0]['niipath']),
                'r' + os.path.basename(hmupos[i]['niipath']).split('.')[0] + '.nii.gz')
            nim = nib.load(fin)
            mu = nim.get_fdata(dtype=np.float32)
            mu[mu < 0] = 0

            imo += mu

        hdr = nimo.header
        hdr['cal_max'] = np.max(imo)
        hdr['cal_min'] = np.min(imo)
        fmu = os.path.join(os.path.dirname(hmupos[0]['niipath']), 'hardware_umap.nii.gz')
        hmu_nii = nib.Nifti1Image(imo, A)
        nib.save(hmu_nii, fmu)

        hmu = np.transpose(imo[:, ::-1, ::-1], (2, 1, 0))

        # save the objects to numpy arrays
        fnp = os.path.join(fmudir, "hmumap.npz")
        np.savez(fnp, hmu=hmu, A=A, fmu=fmu)
        # ppdate the datain dictionary (assuming it is mutable)
        datain['hmumap'] = fnp

        if del_interm:
            for fname in glob.glob(os.path.join(fmudir, '_*.nii*')):
                os.remove(fname)
            for fname in glob.glob(os.path.join(fmudir, 'r_*.nii*')):
                os.remove(fname)

    # peturn image dictionary with the image itself and some other stats
    hmu_dct = {'im': hmu, 'fim': fmu, 'affine': A}
    if 'fnp' in locals():
        hmu_dct['fnp'] = fnp

    return hmu_dct
Esempio n. 7
0
def hist(
        datain,
        txLUT,
        axLUT,
        Cnt,
        t0=0,
        t1=0,
        cmass_sig=5,
        frms=None,  # np.array([0], dtype=np.uint16),
        use_stored=False,
        store=False,
        outpath=''):
    '''
    Process list mode data with histogramming and optional bootstrapping:
    Cnt['BTP'] = 0: no bootstrapping [default];
    Cnt['BTP'] = 1: non-parametric bootstrapping;
    Cnt['BTP'] = 2: parametric bootstrapping (using Poisson distribution with mean = 1)
    '''

    if Cnt['SPN'] == 1: nsinos = Cnt['NSN1']
    elif Cnt['SPN'] == 11: nsinos = Cnt['NSN11']
    elif Cnt['SPN'] == 0: nsinos = Cnt['NSEG0']

    log.debug('histogramming with span {}.'.format(Cnt['SPN']))

    if (use_stored is True and 'sinos' in datain and os.path.basename(
            datain['sinos']) == f"sinos_s{Cnt['SPN']}_frm-{t0}-{t1}.npz"):
        hstout = dict(np.load(datain['sinos'], allow_pickle=True))
        nitag = len(hstout['phc'])
        log.debug(
            'acquisition duration by integrating time tags is {} sec.'.format(
                nitag))

    elif os.path.isfile(datain['lm_bf']):
        # gather info about the LM time tags
        nele, ttags, tpos = mmr_lmproc.lminfo(datain['lm_bf'])
        nitag = int((ttags[1] - ttags[0] + 999) / 1000)
        log.debug(
            'acquisition duration by integrating time tags is {} sec.'.format(
                nitag))

        # adjust frame time if outside the limit
        if t1 > nitag: t1 = nitag
        # check if the time point is allowed
        if t0 >= nitag:
            raise ValueError(
                'e> the time frame definition is not allowed! (outside acquisition time)'
            )

        # ---------------------------------------
        # preallocate all the output arrays
        VTIME = 2
        MXNITAG = 5400  # limit to 1hr and 30mins
        if (nitag > MXNITAG):
            tn = int(MXNITAG / (1 << VTIME))
        else:
            tn = int((nitag + (1 << VTIME) - 1) / (1 << VTIME))

        pvs = np.zeros((tn, Cnt['NSEG0'], Cnt['NSBINS']), dtype=np.uint32)
        phc = np.zeros((nitag), dtype=np.uint32)
        dhc = np.zeros((nitag), dtype=np.uint32)
        mss = np.zeros((nitag), dtype=np.float32)

        bck = np.zeros((2, nitag, Cnt['NBCKT']), dtype=np.uint32)
        fan = np.zeros((Cnt['NRNG'], Cnt['NCRS']), dtype=np.uint32)

        # > prompt and delayed sinograms
        psino = np.zeros((nsinos, Cnt['NSANGLES'], Cnt['NSBINS']),
                         dtype=np.uint16)
        dsino = np.zeros((nsinos, Cnt['NSANGLES'], Cnt['NSBINS']),
                         dtype=np.uint16)

        # > single slice rebinned prompots
        ssr = np.zeros((Cnt['NSEG0'], Cnt['NSANGLES'], Cnt['NSBINS']),
                       dtype=np.uint32)

        hstout = {
            'phc': phc,
            'dhc': dhc,
            'mss': mss,
            'pvs': pvs,
            'bck': bck,
            'fan': fan,
            'psn': psino,
            'dsn': dsino,
            'ssr': ssr
        }
        # ---------------------------------------

        # do the histogramming and processing
        mmr_lmproc.hist(hstout, datain['lm_bf'], t0, t1, txLUT, axLUT, Cnt)

        if store:
            if outpath == '':
                fsino = os.path.dirname(datain['lm_bf'])
            else:
                fsino = os.path.join(outpath, 'sino')
                nimpa.create_dir(fsino)
            # complete the path with the file name
            fsino = os.path.join(fsino,
                                 f"sinos_s{Cnt['SPN']}_frm-{t0}-{t1}.npz")
            # store to the above path
            np.savez(fsino, **hstout)

    else:
        log.error('input list-mode data is not defined.')
        return

    # short (interval) projection views
    pvs_sgtl = np.right_shift(hstout['pvs'], 8).astype(np.float32)
    pvs_crnl = np.bitwise_and(hstout['pvs'], 255).astype(np.float32)

    cmass = Cnt['SO_VXZ'] * ndi.filters.gaussian_filter(
        hstout['mss'], cmass_sig, mode='mirror')
    log.debug(
        'centre of mass of axial radiodistribution (filtered with Gaussian of SD ={}):  COMPLETED.'
        .format(cmass_sig))

    # ========================= BUCKET SINGLES =========================
    # > number of single rates reported for the given second
    # > the last two bits are used for the number of reports
    nsr = (hstout['bck'][1, :, :] >> 30)

    # > average in a second period
    hstout['bck'][0, nsr > 0] = hstout['bck'][0, nsr > 0] / nsr[nsr > 0]

    # > time indeces when single rates given
    tmsk = np.sum(nsr, axis=1) > 0
    single_rate = np.copy(hstout['bck'][0, tmsk, :])

    # > time
    t = np.arange(nitag)
    t = t[tmsk]

    # > get the average bucket singles:
    buckets = np.int32(np.sum(single_rate, axis=0) / single_rate.shape[0])
    log.debug('dynamic and static buckets single rates:  COMPLETED.')
    # ==================================================================

    # account for the fact that when t0==t1 that means that full dataset is processed
    if t0 == t1: t1 = t0 + nitag

    return {
        't0': t0,
        't1': t1,
        'dur': t1 - t0,           # duration
        'phc': hstout['phc'],     # prompts head curve
        'dhc': hstout['dhc'],     # delayeds head curve
        'cmass': cmass,           # centre of mass of the radiodistribution in axial direction
        'pvs_sgtl': pvs_sgtl,     # sagittal projection views in short intervals
        'pvs_crnl': pvs_crnl,     # coronal projection views in short intervals
        'fansums': hstout['fan'], # fan sums of delayeds for variance reduction of randoms
        'sngl_rate': single_rate, # bucket singles over time
        'tsngl': t,               # time points of singles measurements in list-mode data
        'buckets': buckets,       # average bucket singles
        'psino': hstout['psn'].astype(np.uint16), # prompt sinogram
        'dsino': hstout['dsn'].astype(np.uint16), # delayeds sinogram
        'pssr': hstout['ssr']     # single-slice rebinned sinogram of prompts
    }  # yapf: disable