コード例 #1
0
ファイル: gluAifRidge.py プロジェクト: tblazey/nagini
    if args.fwhmSeg is not None:
        voxSize = pet.header.get_zooms()
        sigmas = np.divide(args.fwhmSeg[0] / np.sqrt(8.0 * np.log(2.0)),
                           voxSize[0:3])
        cbvData = filt.gaussian_filter(cbvData, sigmas).reshape(cbvData.shape)
        cbvData[segData == 0] = 0.0

#Get mask where inputs are non_zero
maskData = np.where(
    np.logical_and(
        roiData != 0,
        np.logical_and(np.logical_and(brainData != 0, cbfData != 0),
                       cbvData != 0)), 1, 0)

#Flatten the PET images and then mask
petMasked = nagini.reshape4d(petData)[maskData.flatten() > 0, :]
roiMasked = roiData[maskData > 0].flatten()

#Prep CBF and CBV data
if args.fwhm is None:
    #Do not smooth,just mask
    cbfMasked = cbfData[maskData > 0].flatten()
    cbvMasked = cbvData[maskData > 0].flatten()
else:
    #Prepare for smoothing
    voxSize = pet.header.get_zooms()
    sigmas = np.divide(args.fwhm[0] / np.sqrt(8.0 * np.log(2.0)), voxSize[0:3])

    #Smooth and mask data
    cbfMasked = filt.gaussian_filter(cbfData, sigmas)[maskData > 0].flatten()
    cbvMasked = filt.gaussian_filter(cbvData, sigmas)[maskData > 0].flatten()
コード例 #2
0
ファイル: ecatToWell.py プロジェクト: tblazey/nagini
#!/usr/bin/python

#Parse arguments
import argparse, sys
argParse = argparse.ArgumentParser(description='Convert ECAT PET image to decay corrected well counts/mL/sec:')
argParse.add_argument('pet',help='Nifti ECAT count image',nargs=1,type=str)
argParse.add_argument('info',help='Yi Su style info file',nargs=1,type=str)
argParse.add_argument('pie',help='Pie calibration factor',nargs=1,type=float)
argParse.add_argument('out',help='Root for outputed files',nargs=1,type=str)
args = argParse.parse_args()

#Load in libraries we will need
import numpy as np, nibabel as nib, nagini

#Load in PET image
pet = nib.load(args.pet[0]); petData = nagini.reshape4d(pet.get_data())

#Load in info file
info = nagini.loadInfo(args.info[0])

#Loop through frames
for fIdx in range(petData.shape[1]):

	#Convert to decay corrected well counts
	petData[:,fIdx] *= 60.0 * args.pie[0] / info[fIdx,2] * info[fIdx,3]

#Write out result
well = nib.Nifti1Image(petData.reshape(pet.shape),pet.affine,header=pet.header)
well.to_filename(args.out[0]+'.nii.gz')

コード例 #3
0
ファイル: cbfAif.py プロジェクト: tblazey/nagini
    #Load in  mask data
    brainData = brain.get_data()

else:
    #Make a fake mask
    brainData = np.ones(pet.shape[0:3])

#Make flat mask
if args.ps is not None:
    flatMask = np.logical_and(brainData.flatten() > 0, cbfData.flatten() > 0)
    cbfMasked = cbfData.flatten()[flatMask]
else:
    flatMask = brainData.flatten() > 0

#Flatten the PET images and then mask
petMasked = nagini.reshape4d(petData)[flatMask, :]

#Get pet start and end times. Assume start of first recorded frame is injection (has an offset which is subtracted out)
startTime = info[:, 0] - info[0, 0]
midTime = info[:, 1] - info[0, 0]
endTime = info[:, 2] + startTime

#Get aif time variable. Assume clock starts at injection.
aifTime = aif[:, 0]

#Extract AIF counts
aifC = aif[:, 1]

#Logic for preparing blood sucker curves
if args.dcv != 1:
コード例 #4
0
ファイル: splineSuvr.py プロジェクト: tblazey/nagini
import numpy as np, nibabel as nib, matplotlib.pyplot as plt, nagini

#Load in pet and mask headers
pet = nib.load(args.pet[0])
mask = nib.load(args.mask[0])

#Load in info file
info = nagini.loadInfo(args.info[0])

#Mask sure mask and pet dimensions mask
if pet.shape[0:3] != mask.shape[0:3]:
    print 'Error: Images do not have the same dimensions'
    sys.exit()

#Get image data
petData = nagini.reshape4d(pet.get_data())
maskData = mask.get_data().flatten()

#Mask pet data
petMasked = petData[maskData == 1, :]

#Get PET time vetor
petTime = info[:, 1]

#Caculate whole-brain timecourse
petMean = np.mean(petMasked, axis=0)

#Get spline knots and basis
petKnots = nagini.knotLoc(petTime, args.nKnots[0])
petKnots[0] = 10
petBasis = nagini.rSplineBasis(petTime, petKnots)
コード例 #5
0
pet = nagini.loadHeader(args.pet[0])
brain = nagini.loadHeader(args.brain[0])
roi = nagini.loadHeader(args.roi[0])

#Check to make sure dimensions match
if pet.shape[0:3] != brain.shape[0:3] or pet.shape[3] != info.shape[0] or brain.shape[0:3] != roi.shape[0:3]:
	print 'ERROR: Data dimensions do not match. Please check...'
	sys.exit()

#Get the image data
petData = pet.get_data()
brainData = brain.get_data()
roiData = roi.get_data()

#Flatten the PET images and then mask
petFlat = nagini.reshape4d(petData)
petMasked = petFlat[brainData.flatten()>0,:]

#Get pet into rois
petRoi = nagini.roiAvg(petFlat,roiData.flatten())

#If cbv image is given, load it up
if args.cbv is not None:

	#Load in CBV image
	cbv = nagini.loadHeader(args.cbv[0])
	if cbv.shape[0:3] != pet.shape[0:3]:
		print 'ERROR: CBV image does not match PET resolution...'
		sys.exit()
	cbvData = cbv.get_data()
コード例 #6
0
ファイル: gluAif.py プロジェクト: tblazey/nagini
	#Remask CBV image from ROI averages
	cbvData = nagini.roiBack(cbvAvgs,segData.flatten()).reshape(cbvData.shape)

	#Smooth
	if args.fwhmSeg is not None:
			voxSize = pet.header.get_zooms()
			sigmas = np.divide(args.fwhmSeg[0]/np.sqrt(8.0*np.log(2.0)),voxSize[0:3])
			cbvData = filt.gaussian_filter(cbvData,sigmas).reshape(cbvData.shape)
			cbvData[segData==0] = 0.0

#Get mask where inputs are non_zero
maskData = np.where(np.logical_and(np.logical_and(brainData!=0,cbfData!=0),cbvData!=0),1,0)

#Flatten the PET images and then mask
petMasked = nagini.reshape4d(petData)[maskData.flatten()>0,:]

#Prep CBF and CBV data
if args.fwhm is None:
	#Do not smooth,just mask
	cbfMasked = cbfData[maskData>0].flatten()
	cbvMasked = cbvData[maskData>0].flatten()
else:
	#Prepare for smoothing
	voxSize = pet.header.get_zooms()[0:3]
	sigmas = np.divide(args.fwhm[0]/np.sqrt(8.0*np.log(2.0)),voxSize[0:3])

	#Smooth and mask data
	cbfMasked = filt.gaussian_filter(cbfData,sigmas)[maskData>0].flatten()
	cbvMasked = filt.gaussian_filter(cbvData,sigmas)[maskData>0].flatten()
コード例 #7
0
ファイル: oxyIdaif.py プロジェクト: tblazey/nagini
if pet.shape[0:3] != brain.shape[0:3] or pet.shape[0:3] != cbf.shape[0:3] or \
   pet.shape[0:3] != lmbda.shape[0:3] or pet.shape[0:3] != cbv.shape[0:3] or \
   pet.shape[3] != idaif.shape[0] or pet.shape[3] != info.shape[0]:
    print 'ERROR: Data dimensions do not match. Please check...'
    sys.exit()

#Get the image data
petData = pet.get_data()
cbfData = cbf.get_data()
lmbdaData = lmbda.get_data()
cbvData = cbv.get_data()
brainData = brain.get_data()

#Flatten the PET images and then mask. Also convert parameteric images back to orginal PET units.
brainMask = brainData.flatten()
petMasked = nagini.reshape4d(petData)[brainMask > 0, :]
cbfMasked = cbfData.flatten()[brainMask > 0] / 6000.0 * args.d
lmbdaMasked = lmbdaData.flatten()[brainMask > 0] * args.d
cbvMasked = cbvData.flatten()[brainMask > 0] / 100 * args.d

#Limit pet range
timeMask = np.logical_and(petTime >= petRange[0], petTime <= petRange[1])
petTime = petTime[timeMask]
idaif = idaif[timeMask]
petMasked = petMasked[:, timeMask]

#Interpolate the aif to minimum sampling time
minTime = np.min(np.diff(petTime))
interpTime = np.arange(petTime[0], np.ceil(petTime[-1] + minTime), minTime)
nTime = interpTime.shape[0]
aifLinear = interp.interp1d(petTime,
コード例 #8
0
    maskData = np.ones((seg.shape[0], seg.shape[1], seg.shape[2]))

#Load in image data
petData = pet.get_data()
segData = seg.get_data()

#Remove 4th dimension of segmentation
if len(segData.shape) == 4:
    segData = segData[:, :, :, 0]

#Make a flattened version of the segmentation for use later
segFlat = segData.flatten()

#Reshape PET data as necessary
if len(petData.shape) == 4:
    petData = nagini.reshape4d(petData)
    nPet = petData.shape[1]
else:
    petData = petData.reshape((pet.shape[0] * pet.shape[1] * pet.shape[2], 1))
    nPet = 1

#Get ROI list and number of ROIs
roiList = np.unique(segData)
if args.noZero == 1:
    roiList = roiList[roiList != 0]
nRoi = roiList.shape[0]

#Make weight matrices
if args.weight is not None:

    #Load w matrix
コード例 #9
0
ファイル: oxyIdaifLin.py プロジェクト: tblazey/nagini
if pet.shape[0:3] != brain.shape[0:3] or pet.shape[0:3] != cbf.shape[0:3] or \
   pet.shape[0:3] != cbv.shape[0:3] or pet.shape[3] != idaif.shape[0] or \
   pet.shape[3] != info.shape[0]:
	print 'ERROR: Data dimensions do not match. Please check...'
	sys.exit()

#Get the image data
petData = pet.get_data()
cbfData = cbf.get_data()
cbvData = cbv.get_data()
brainData = brain.get_data()

#Flatten the PET images and then mask. Also convert parameteric images back to original PET units. 
brainData = np.logical_and(np.logical_and(cbfData!=0,cbvData!=0),brainData!=0)
brainMask = brainData.flatten()
petMasked = nagini.reshape4d(petData)[brainMask,:]
cbfMasked = cbfData.flatten()[brainMask] / 6000 * args.d
cbvMasked = cbvData.flatten()[brainMask] / 100 * args.d
sys.exit()

#Interpolate the aif to minimum sampling time. Make sure we get the last time even if we have to go over range.
minTime = np.min(np.diff(petTime[petTime<petRange[1]]))
interpTime = np.arange(petRange[0],np.ceil(petRange[1]+minTime),minTime)
nTime = interpTime.shape[0]
aifLinear = interp.interp1d(petTime,idaif,kind="linear",fill_value="extrapolate")
aifInterp = aifLinear(interpTime)

#Get input function for h20 and oxygen seperately
aifDelay = aifLinear(interpTime-args.delay); aifDelay[aifDelay<0] = 0
aifWater = args.decay*np.convolve(aifDelay,np.exp(-args.decay*interpTime))[0:nTime]*minTime
aifOxy = aifInterp - aifWater
コード例 #10
0
ファイル: splineSuvr.py プロジェクト: tblazey/nagini
#Load libraries
import numpy as np, nibabel as nib, matplotlib.pyplot as plt, nagini

#Load in pet and mask headers
pet = nib.load(args.pet[0]); mask = nib.load(args.mask[0]);

#Load in info file
info = nagini.loadInfo(args.info[0])

#Mask sure mask and pet dimensions mask
if pet.shape[0:3] != mask.shape[0:3]:
	print 'Error: Images do not have the same dimensions'
	sys.exit()

#Get image data
petData = nagini.reshape4d(pet.get_data())
maskData = mask.get_data().flatten()

#Mask pet data
petMasked = petData[maskData==1,:]

#Get PET time vetor
petTime = info[:,1]

#Caculate whole-brain timecourse
petMean = np.mean(petMasked,axis=0)

#Get spline knots and basis
petKnots = nagini.knotLoc(petTime,args.nKnots[0]); petKnots[0] = 10
petBasis = nagini.rSplineBasis(petTime,petKnots)
コード例 #11
0
ファイル: projectRois.py プロジェクト: tblazey/nagini
args = argParse.parse_args()

#Load needed libraries
import numpy as np, nibabel as nib, nagini, sys

#Load images headers
val = nagini.loadHeader(args.val[0])
roi = nagini.loadHeader(args.roi[0])

#Load image data
valData = val.get_data()
roiData = roi.get_data()

#Reshape image data as necessary
if len(valData.shape) == 4:
	valData = nagini.reshape4d(valData)
else:
	valData = valData.reshape((val.shape[0],1))
roiData = roiData.flatten()

#Sample into ROIs
projData = nagini.roiBack(valData,roiData)

#Save projected ROI image
proj = nib.Nifti1Image(projData.reshape((roi.shape[0],roi.shape[1],roi.shape[2],projData.shape[-1])),roi.affine,header=roi.header)
proj.to_filename('%s_roiProj.nii.gz'%(args.out[0]))

	


コード例 #12
0
ファイル: sampleRois.py プロジェクト: tblazey/nagini
#Load image headers
img = nagini.loadHeader(args.img[0])
roi = nagini.loadHeader(args.roi[0])

#Check to make sure images have same dimensions
if img.shape[0:3] != roi.shape[0:3]:
	print 'ERROR: Images do not have same dimensions. Exiting...'
	sys.exit()

#Load image data
imgData = img.get_data()
roiData = roi.get_data()

#Reshape image data as necessary
if len(imgData.shape) == 4:
	imgData = nagini.reshape4d(imgData)
else:
	imgData = imgData.reshape((img.shape[0]*img.shape[1]*img.shape[2],1))
roiData = roiData.flatten()

#Sample into ROIs
avgData = nagini.roiAvg(imgData,roiData,stat=args.stat)

#Save roi averages in the format user wants.
if args.nii == 1:
	avg = nib.Nifti1Image(avgData.reshape((avgData.shape[0],1,1,avgData.shape[1])),np.identity(4))
	avg.to_filename('%s_roi_%s.nii.gz'%(args.out[0],args.stat))
else:
	nagini.writeText('%s_roi_%s.txt'%(args.out[0],args.stat),avgData)
	
コード例 #13
0
argParse.add_argument('out', help='Root for output file', nargs=1)
args = argParse.parse_args()

#Load needed libraries
import numpy as np, nibabel as nib, nagini, sys

#Load images headers
val = nagini.loadHeader(args.val[0])
roi = nagini.loadHeader(args.roi[0])

#Load image data
valData = val.get_data()
roiData = roi.get_data()

#Reshape image data as necessary
if len(valData.shape) == 4:
    valData = nagini.reshape4d(valData)
else:
    valData = valData.reshape((val.shape[0], 1))
roiData = roiData.flatten()

#Sample into ROIs
projData = nagini.roiBack(valData, roiData)

#Save projected ROI image
proj = nib.Nifti1Image(projData.reshape(
    (roi.shape[0], roi.shape[1], roi.shape[2], projData.shape[-1])),
                       roi.affine,
                       header=roi.header)
proj.to_filename('%s_roiProj.nii.gz' % (args.out[0]))
コード例 #14
0
#Load image headers
img = nagini.loadHeader(args.img[0])
roi = nagini.loadHeader(args.roi[0])

#Check to make sure images have same dimensions
if img.shape[0:3] != roi.shape[0:3]:
    print 'ERROR: Images do not have same dimensions. Exiting...'
    sys.exit()

#Load image data
imgData = img.get_data()
roiData = roi.get_data()

#Reshape image data as necessary
if len(imgData.shape) == 4:
    imgData = nagini.reshape4d(imgData)
else:
    imgData = imgData.reshape((img.shape[0] * img.shape[1] * img.shape[2], 1))
roiData = roiData.flatten()

#Sample into ROIs
avgData = nagini.roiAvg(imgData, roiData, stat=args.stat)

#Save roi averages in the format user wants.
if args.nii == 1:
    avg = nib.Nifti1Image(
        avgData.reshape((avgData.shape[0], 1, 1, avgData.shape[1])),
        np.identity(4))
    avg.to_filename('%s_roi_%s.nii.gz' % (args.out[0], args.stat))
else:
    nagini.writeText('%s_roi_%s.txt' % (args.out[0], args.stat), avgData)
コード例 #15
0
ファイル: pvcCalc.py プロジェクト: tblazey/nagini
	maskData = np.ones((seg.shape[0],seg.shape[1],seg.shape[2]))

#Load in image data
petData = pet.get_data()
segData = seg.get_data()

#Remove 4th dimension of segmentation 
if len(segData.shape) == 4:
	segData = segData[:,:,:,0]

#Make a flattened version of the segmentation for use later
segFlat = segData.flatten()

#Reshape PET data as necessary
if len(petData.shape) == 4:
	petData = nagini.reshape4d(petData)
	nPet = petData.shape[1]
else:
	petData = petData.reshape((pet.shape[0]*pet.shape[1]*pet.shape[2],1))
	nPet = 1

#Get ROI list and number of ROIs
roiList = np.unique(segData)
if args.noZero == 1:
	roiList = roiList[roiList!=0]
nRoi = roiList.shape[0]

#Make weight matrices
if args.weight is not None:

	#Load w matrix