def correlationCoefficient(x, y, mask=None):
    """
        calcualate the correlation coefficient of two numpys
        """
    if x.shape != y.shape:
        apDisplay.printError(
            "images are not the same shape in correlation calc")
    if mask != None:
        if x.shape != mask.shape:
            apDisplay.printError(
                "mask is not the same shape as images in correlation calc")
        tot = mask.sum()
        if tot == 0:
            return 0.0
        x = imagenorm.normStdevMask(x, mask)
        y = imagenorm.normStdevMask(y, mask)
    else:
        tot = float(x.shape[0] * x.shape[1])
        x = imagenorm.normStdev(x)
        y = imagenorm.normStdev(y)
    z = x * y
    if mask != None:
        z = z * mask
    sm = z.sum()
    return sm / tot
def msd(x,y,mask=None):
        if mask != None:
                tot = float(mask.sum())
                if tot == 0:
                        return 1.0e13
                x = imagenorm.normStdevMask(x,mask)
                y = imagenorm.normStdevMask(y,mask)
        else:
                tot = float(x.shape[0]*x.shape[1])
                x = imagenorm.normStdev(x)
                y = imagenorm.normStdev(y)
        z = (x-y)**2
        if mask != None:
                z = z*mask
        sm  = z.sum()
        return sm/tot
def msd(x, y, mask=None):
    if mask != None:
        tot = float(mask.sum())
        if tot == 0:
            return 1.0e13
        x = imagenorm.normStdevMask(x, mask)
        y = imagenorm.normStdevMask(y, mask)
    else:
        tot = float(x.shape[0] * x.shape[1])
        x = imagenorm.normStdev(x)
        y = imagenorm.normStdev(y)
    z = (x - y)**2
    if mask != None:
        z = z * mask
    sm = z.sum()
    return sm / tot
Пример #4
0
def getImageFiles(imgtree, rawdir, link, copy):
    #This function should replace linkImageFiles in all calls (i.e. in tomoaligner.py and everywhere else)
    filenamelist = []
    newimgtree = []
    for imagedata in imgtree:
        #set up names
        imgpath = imagedata['session']['image path']
        presetname = imagedata['preset']['name']
        imgprefix = presetname + imagedata['filename'].split(presetname)[-1]
        imgname = imgprefix + '.mrc'
        filenamelist.append(imgprefix)
        destpath = os.path.join(rawdir, imgname)
        newimgtree.append(destpath)
        imgfullpath = os.path.join(imgpath, imagedata['filename'] + '.mrc')

        if link == "True":
            #create symlinks to files
            if os.path.islink(destpath):
                os.remove(destpath)
            if not os.path.isfile(destpath):
                os.symlink(imgfullpath, destpath)
        elif copy == "True":
            shutil.copy(imgfullpath, destpath)

            #Y-flip raw images, normalize them, and convert them to float32 because Protomo
            image = mrc.read(destpath)
            image = numpy.flipud(image)
            image = imagenorm.normStdev(image)
            image = numpy.float32(image)
            mrc.write(image, destpath)
        #else: just return values
    return filenamelist, newimgtree
Пример #5
0
def getImageFiles(imgtree, rawdir, link, copy):
	#This function should replace linkImageFiles in all calls (i.e. in tomoaligner.py and everywhere else)
	filenamelist = []
	newimgtree=[]
	for imagedata in imgtree:
		#set up names
		imgpath=imagedata['session']['image path']
		presetname=imagedata['preset']['name']
		imgprefix=presetname+imagedata['filename'].split(presetname)[-1]
		imgname=imgprefix+'.mrc'
		filenamelist.append(imgprefix)
		destpath = os.path.join(rawdir,imgname)
		newimgtree.append(destpath)
		imgfullpath = os.path.join(imgpath,imagedata['filename']+'.mrc')

		if link == "True":
			#create symlinks to files
			if os.path.islink(destpath):
				os.remove(destpath)
			if not os.path.isfile(destpath):
				os.symlink(imgfullpath,destpath)
		elif copy == "True":
			shutil.copy(imgfullpath,destpath)	
			
			#Y-flip raw images, normalize them, and convert them to float32 because Protomo
			image=mrc.read(destpath)
			image=numpy.flipud(image)
			image=imagenorm.normStdev(image)
			image=numpy.float32(image)
			mrc.write(image,destpath)
		#else: just return values
	return filenamelist, newimgtree
def correlationCoefficient(x,y,mask=None):
        """
        calcualate the correlation coefficient of two numpys
        """
        if x.shape != y.shape:
                apDisplay.printError("images are not the same shape in correlation calc")
        if mask != None:
                if x.shape != mask.shape:
                        apDisplay.printError("mask is not the same shape as images in correlation calc")
                tot = mask.sum()
                if tot == 0:
                        return 0.0
                x = imagenorm.normStdevMask(x,mask)
                y = imagenorm.normStdevMask(y,mask)
        else:
                tot = float(x.shape[0]*x.shape[1])
                x = imagenorm.normStdev(x)
                y = imagenorm.normStdev(y)
        z = x*y
        if mask != None:
                z = z*mask
        sm  = z.sum()
        return sm/tot
def doseCompensate(seriesname, rundir, sessionname, tiltseriesnumber, frame_aligned_images, raw_path, pixelsize, dose_presets, dose_a, dose_b, dose_c):
	"""
	Images will be lowpass filtered using equation (3) from Grant & Grigorieff, 2015.
	No changes to the database are made. No backups are made.
	"""
	sessiondata = apDatabase.getSessionDataFromSessionName(sessionname)
	tiltseriesdata = apDatabase.getTiltSeriesDataFromTiltNumAndSessionId(tiltseriesnumber,sessiondata)
	tiltdata = apTomo.getImageList([tiltseriesdata])
	
	frame_tiltdata, non_frame_tiltdata = frameOrNonFrameTiltdata(tiltdata)
	tilts, ordered_imagelist, accumulated_dose_list, ordered_mrc_files, refimg = apTomo.orderImageList(frame_tiltdata, non_frame_tiltdata, frame_aligned="False")
	if frame_aligned_images == "True":  #For different image filenames
		a, ordered_imagelist, c, d, e = apTomo.orderImageList(frame_tiltdata, non_frame_tiltdata, frame_aligned=frame_aligned_images)
	newfilenames, new_ordered_imagelist = apProTomo.getImageFiles(ordered_imagelist, raw_path, link=False, copy=False)
	if (dose_presets == "Light"):
		dose_a = 0.245
		dose_b = -1.6
		dose_c = 12
	elif (dose_presets == "Moderate"):
		dose_a = 0.245
		dose_b = -1.665
		dose_c = 2.81
	elif (dose_presets == "Heavy"):
		dose_a = 0.245
		dose_b = -1.4
		dose_c = 2
	
	apDisplay.printMsg('Dose compensating all tilt images with a=%s, b=%s, and c=%s...' % (dose_a, dose_b, dose_c))
	
	for image, j in zip(new_ordered_imagelist, range(len(new_ordered_imagelist))):
		lowpass = float(np.real(complex(dose_a/(accumulated_dose_list[j] - dose_c))**(1/dose_b)))  #equation (3) from Grant & Grigorieff, 2015
		if lowpass < 0.0:
			lowpass = 0.0
		im = mrc.read(image)
		im = imagefilter.lowPassFilter(im, apix=pixelsize, radius=lowpass, msg=False)
		im=imagenorm.normStdev(im)
		mrc.write(im, image)
	
	#Make plots
	apProTomo2Aligner.makeDosePlots(rundir, seriesname, tilts, accumulated_dose_list, dose_a, dose_b, dose_c)
	
	apDisplay.printMsg("Dose compensation finished for tilt-series #%s!" % tiltseriesnumber)
	
	return
def createDots(angle=0.0, noiselevel=3.0, shape=(128,128), shift=None):
        a = numpy.zeros(shape, dtype=numpy.float64)
        a30 = int(min(shape)*.30) #25%
        a40 = int(2*min(shape)/5.0) #40%
        a50 = int(min(shape)/2.0) #50%
        a60 = int(3*min(shape)/5.0) #60%
        a70 = int(min(shape)*.70) #75%
        a[a30:a40,a30:a40] = 1
        a[a50:a60,a30:a40] = 1
        a[a60:a70,a40:a50] = 1
        a[a40:a50,a50:a60] = 1
        a[a30:a40,a60:a70] = 1
        a[a60:a70,a60:a70] = 1
        b = a
        b = ndimage.rotate(b, angle, reshape=False, order=1)
        if shift is not None:
                b = ndimage.shift(b, shift=shift, mode='wrap', order=1)
        bnoise = b + noiselevel*numpy.random.random(shape)
        bnoise = ndimage.median_filter(bnoise, size=2)
        bnoise = imagenorm.normStdev(bnoise)

        return bnoise
Пример #9
0
def createDots(angle=0.0, noiselevel=3.0, shape=(128,128), shift=None):
	a = numpy.zeros(shape, dtype=numpy.float64)
	a30 = int(min(shape)*.30) #25%
	a40 = int(2*min(shape)/5.0) #40%
	a50 = int(min(shape)/2.0) #50%
	a60 = int(3*min(shape)/5.0) #60%
	a70 = int(min(shape)*.70) #75%
	a[a30:a40,a30:a40] = 1
	a[a50:a60,a30:a40] = 1
	a[a60:a70,a40:a50] = 1
	a[a40:a50,a50:a60] = 1
	a[a30:a40,a60:a70] = 1
	a[a60:a70,a60:a70] = 1
	b = a
	b = ndimage.rotate(b, angle, reshape=False, order=1)
	if shift is not None:
		b = ndimage.shift(b, shift=shift, mode='wrap', order=1)
	bnoise = b + noiselevel*numpy.random.random(shape)
	bnoise = ndimage.median_filter(bnoise, size=2)
	bnoise = imagenorm.normStdev(bnoise)

	return bnoise
Пример #10
0
	def mergeImageStackIntoBigStack(self, imgstackfile, imgdata):
		t0 = time.time()
		apDisplay.printMsg("filtering particles and adding to stack")
		# if applying a boxmask, write to a temporary file before adding to main stack
		bigimgstack = os.path.join(self.params['rundir'], self.params['single'])
		if self.params['boxmask'] is not None:
			bigimgstack = os.path.splitext(imgstackfile)[0]+"-premask.hed"
		### here is the craziness
		### step 1: read imgstackfile into memory
		imgstackmemmap = imagic.read(imgstackfile)
		### when only particle is read it defaults to a 2D array instead of 3D array
		if len(imgstackmemmap.shape) < 3:
			imgstackmemmap = imgstackmemmap.reshape(1, imgstackmemmap.shape[0], imgstackmemmap.shape[1])
		if self.params['debug'] is True:
			print "imgstackmemmap.shape", imgstackmemmap.shape
		apix = self.params['apix'] #apDatabase.getPixelSize(imgdata)

		boxshape = (self.boxsize, self.boxsize)
		processedParticles = []
		for particle in imgstackmemmap:

			### step 2: filter particles
			### high / low pass filtering
			#if self.params['pixlimit']:
			#	particle = imagefilter.pixelLimitFilter(particle, self.params['pixlimit'])
			if self.params['lowpass']:
				particle = imagefilter.lowPassFilter(particle, apix=apix, radius=self.params['lowpass'])
			if self.params['highpass']:
				particle = imagefilter.highPassFilter2(particle, self.params['highpass'], apix=apix)
			### unless specified, invert the images
			if self.params['inverted'] is True:
				particle = -1.0 * particle
			if particle.shape != boxshape:
				if self.boxsize <= particle.shape[0] and self.boxsize <= particle.shape[1]:
					particle = imagefilter.frame_cut(particle, boxshape)
				else:
					apDisplay.printError("particle shape (%dx%d) is smaller than boxsize (%d)"
						%(particle.shape[0], particle.shape[1], self.boxsize))

			### step 3: normalize particles
			#self.normoptions = ('none', 'boxnorm', 'edgenorm', 'rampnorm', 'parabolic') #normalizemethod
			if self.params['normalizemethod'] == 'boxnorm':
				particle = imagenorm.normStdev(particle)
			elif self.params['normalizemethod'] == 'edgenorm':
				particle = imagenorm.edgeNorm(particle)
			elif self.params['normalizemethod'] == 'rampnorm':
				particle = imagenorm.rampNorm(particle)
			elif self.params['normalizemethod'] == 'parabolic':
				particle = imagenorm.parabolicNorm(particle)

			### step 4: decimate/bin particles if specified
			### binning is last, so we maintain most detail and do not have to deal with binned apix
			if self.params['bin'] > 1:
				particle = imagefun.bin2(particle, self.params['bin'])

			#from scipy.misc import toimage
			#toimage(particle).show()

			processedParticles.append(particle)

		### step 5: merge particle list with larger stack
		apImagicFile.appendParticleListToStackFile(processedParticles, bigimgstack,
			msg=self.params['debug'])

		#remove original image stack from memory
		del imgstackmemmap
		del processedParticles

		t0 = time.time()
		# if applying boxmask, now mask the particles & append to stack
		if self.params['boxmask'] is not None:
			# normalize particles before boxing, since zeros in mask
			# can affect subsequent processing if not properly normalized
			apEMAN.executeEmanCmd("proc2d %s %s edgenorm inplace"%(bigimgstack,bigimgstack),showcmd=False)
			imgstack = apImagicFile.readImagic(bigimgstack, msg=False)
			maskstack = apImagicFile.readImagic(self.params['boxmaskf'],msg=False)
			for i in range(len(imgstack['images'])):
				imgstack['images'][i]*=maskstack['images'][i]
			maskedpartstack = os.path.splitext(imgstackfile)[0]+"-aftermask.hed"
			apImagicFile.writeImagic(imgstack['images'], maskedpartstack)
			bigimgstack = os.path.join(self.params['rundir'], self.params['single'])
			apEMAN.executeEmanCmd("proc2d %s %s flip"%(maskedpartstack,bigimgstack))

		### count particles
		bigcount = apFile.numImagesInStack(bigimgstack, self.boxsize/self.params['bin'])
		imgcount = apFile.numImagesInStack(imgstackfile, self.boxsize)

		### append to particle log file
		partlogfile = os.path.join(self.params['rundir'], self.timestamp+"-particles.info")
		f = open(partlogfile, 'a')
		for i in range(imgcount):
			partnum = self.particleNumber + i + 1
			line = str(partnum)+'\t'+os.path.join(imgdata['session']['image path'], imgdata['filename']+".mrc")
			f.write(line+"\n")
		f.close()

		self.mergestacktimes.append(time.time()-t0)

		return bigcount
Пример #11
0
	def start(self):

		### Works
		# read from MRC image/stack
		# read from HED/IMG stack
		# write to MRC image
		# write to MRC stack		
		# write to HED/IMG stack
		# append to MRC stack
		# append to HED/IMG stack
		# filter images
		# implement binning
		# implement normalization

		### needs more testing
		# write pixelsize to new MRC file
		# get apix from MRC header
		# implement proc2d --average
		# implement proc2d --list feature

		### TODO
		# read from SPIDER stack
		# read from SPIDER image
		# read from EMAN2/HDF stack		
		# write to SPIDER image
		# write to SPIDER stack
		# write to EMAN2/HDF stack
		# get apix from HED/IMG header
		# implement proc2d --rotavg
		# implement proc2d --clip

		# determine numParticles to add
		if self.params['last'] is None:
			self.params['last'] = self.inputNumParticles - 1 #stacks start at 0
		elif self.params['last'] > self.inputNumParticles:
			apDisplay.printWarning("Last particle requested (%d) is larger than available particles (%d)"
				%(self.params['last'], self.inputNumParticles))
			self.params['last'] = self.inputNumParticles - 1 #stacks start at 0
		addNumParticles = self.params['last'] - self.params['first'] + 1

		### prepare for an existing file
		existNumParticles = self.outFileStatus()
		self.totalParticles = existNumParticles + addNumParticles

		indata = self.readFileData(self.params['infile'])

		#it more efficient to process X particles and write them to disk rather than
		# write each particle to disk each time.
		#particles are read using a memory map (numpy.memmap), so we can pretend to
		# continuously read all into memory
		particlesPerCycle = self.getParticlesPerCycle(self.params['infile'])

		if self.params['average'] is True:
			summedPartice = numpy.zeros((self.boxsize,self.boxsize))

		processedParticles = []
		if self.params['list']:
			partlist = self.readKeepList()
		else:
			partlist = range(self.params['first'], self.params['first']+addNumParticles)
		count = 0
		for partnum in partlist:
			count += 1
			particle = indata[partnum]
			if self.params['debug'] is True:
				print "---------"
				print "Particle Number: %d of %d"%(partnum, addNumParticles)
			if self.params['pixlimit']:
				self.message("pixlimit: %s"%(self.params['pixlimit']))
				particle = imagefilter.pixelLimitFilter(particle, self.params['pixlimit'])
			if self.params['lowpass']:
				self.message("lowpass: %s"%(self.params['lowpass']))
				particle = imagefilter.lowPassFilter(particle, apix=self.params['apix'], radius=self.params['lowpass'])
			if self.params['highpass']:
				self.message("highpass: %s"%(self.params['highpass']))
				particle = imagefilter.highPassFilter2(particle, self.params['highpass'], apix=self.params['apix'])
			### unless specified, invert the images
			if self.params['inverted'] is True:
				self.message("inverted: %s"%(self.params['inverted']))
				particle = -1.0 * particle
			### clipping
			"""
			if particle.shape != boxshape:
				if boxsize <= particle.shape[0] and boxsize <= particle.shape[1]:
					particle = imagefilter.frame_cut(particle, boxshape)
				else:
					apDisplay.printError("particle shape (%dx%d) is smaller than boxsize (%d)"
						%(particle.shape[0], particle.shape[1], boxsize))
			"""

			### step 3: normalize particles
			#self.normoptions = ('none', 'boxnorm', 'edgenorm', 'rampnorm', 'parabolic') #normalizemethod
			self.message("normalize method: %s"%(self.params['normalizemethod']))
			if self.params['normalizemethod'] == 'boxnorm':
				particle = imagenorm.normStdev(particle)
			elif self.params['normalizemethod'] == 'edgenorm':
				particle = imagenorm.edgeNorm(particle)
			elif self.params['normalizemethod'] == 'rampnorm':
				particle = imagenorm.rampNorm(particle)
			elif self.params['normalizemethod'] == 'parabolic':
				particle = imagenorm.parabolicNorm(particle)

			### step 4: decimate/bin particles if specified
			### binning is last, so we maintain most detail and do not have to deal with binned apix
			if self.params['bin'] > 1:
				particle = imagefun.bin2(particle, self.params['bin'])

			### working above this line
			if self.params['average'] is True:
				summedPartice += particle
			else:
				processedParticles.append(particle)

			if len(processedParticles) == particlesPerCycle:
				### step 5: merge particle list with larger stack
				self.appendParticleListToStackFile(processedParticles, self.params['outfile'])
				sys.stderr.write("%d of %d"%(count, len(partlist)))
				processedParticles = []
		if len(processedParticles) > 0:
			self.appendParticleListToStackFile(processedParticles, self.params['outfile'])
		if self.params['average'] is True:
			avgParticle = summedPartice/count
			self.appendParticleListToStackFile([avgParticle,], self.params['outfile'])
		print "Wrote %d particles to file "%(self.particlesWritten)