def __init__(self): parser = argparse.ArgumentParser( description="Extract images from a stack and write them into " "smaller stacks or even individual images.") add = parser.add_argument # shortcut add('--files', default='', help='Pattern to match input files.') add('--ext', default='mrc', help='Define extension of output files.') add('-n', default=1, help='Group output images in stacks of this size.') self.args = parser.parse_args() self.n = int(self.args.n) filePaths = glob.glob(self.args.files) filePaths.sort() self.imgh = ImageHandler() for fileN in filePaths: self.outIndex = 1 self.groupCount = 1 n = self.getNumOfElements(fileN) if fileN.endswith('.mrc'): fileN += ':mrcs' filePrefix = os.path.splitext(fileN)[0] for i in range(1, n + 1): outputLoc = self.getOutputLoc(filePrefix, i) self.imgh.convert((i, fileN), outputLoc)
def main(): parser = argparse.ArgumentParser(description="make individual micrographs from a stack") add = parser.add_argument # shortcut add('--files', default='', help='list of files to split on individual microgrpahs') add('--ext', default='mrc', help='define extension of individual microgrpahs') args = parser.parse_args() def getNumOfElements(fileN): _, _, z, n = imgh.getDimensions(fileN) if z > 1: return z else: return n filePaths = glob.glob(args.files) filePaths.sort() imgh = ImageHandler() for fileN in filePaths: n = getNumOfElements(fileN) for i in range(1, n+1): outputFn = "%s_%03d.%s" % (os.path.splitext(fileN)[0], i, args.ext) imgh.convert((i, fileN), outputFn)
def scipion_split_particle_stacks(inputStar, inputStack, output, filename_prefix, deleteStack): """ Read a STAR file with particles and write as individual images. If a stack of images is given, use these instead of the images from the STAR file. Also write a new STAR file pointing to these images. This function requires that the script is run within Scipion Python environment. """ import pyworkflow.utils as pwutils from pyworkflow.em import ImageHandler ih = ImageHandler() md = MetaData(inputStar) md.addLabels('rlnOriginalName') # Initialize progress bar progressbar = ProgressBar(width=60, total=len(md)) for i, particle in enumerate(md, start=1): outputImageName = '%s/%s_%06d.mrc' % (output, filename_prefix, i) if inputStack: ih.convert((i, inputStack), outputImageName ) particle.rlnOriginalName = '%s/%06d@%s' %(output, i, inputStack) else: ih.convert(particle.rlnImageName, outputImageName) particle.rlnOriginalName = particle.rlnImageName particle.rlnImageName = outputImageName progressbar.notify() print("\n") md.write("%s/%s.star" % (output, filename_prefix)) if inputStack and deleteStack: pwutils.cleanPath(inputStack)
def generateMicImage(self, input_file, output_file=None): if not output_file: output_file = os.path.splitext(input_file)[0] + '.png' img = ImageHandler().createImage() img.read(input_file) pimg = getPILImage(img) pwutils.makeFilePath(output_file) pimg.save(output_file, "PNG")
def sharpeningAndMonoResStep(self): last_Niters = -1 last_lambda_sharpening = 1e38 nextIter = True while nextIter is True: self.iteration = self.iteration + 1 #print iteration print('\n====================\n' 'Iteration %s' % (self.iteration)) self.sharpenStep(self.iteration) mtd = md.MetaData() mtd.read(self._getFileName('METADATA_PARAMS_SHARPENING')) lambda_sharpening = mtd.getValue(MDL_COST, 1) Niters = mtd.getValue(MDL_ITER, 1) # if (Niters == last_Niters): # nextIter = False # break if (abs(lambda_sharpening - last_lambda_sharpening) <= 0.2): nextIter = False break last_Niters = Niters last_lambda_sharpening = lambda_sharpening self.MonoResStep(self.iteration) imageFile = self._getFileName('OUTPUT_RESOLUTION_FILE') img = ImageHandler().read(imageFile) imgData = img.getData() max_res = np.amax(imgData) min_res = 2 * self.inputVolume.get().getSamplingRate() #print ("minres %s y maxres %s" % (min_res, max_res)) if (max_res - min_res < 0.75): nextIter = False break # TODO: please copy the file using python not the operating system os.system('cp ' + self._getExtraPath('sharpenedMap_' + str(self.iteration) + '.mrc') + ' ' + self._getExtraPath('sharpenedMap_last.mrc')) resFile = self.resolutionVolume.get().getFileName() pathres = dirname(resFile) if not exists(os.path.join(pathres, 'mask_data.xmd')): print( '\n====================\n' ' WARNING---This is not the ideal case because resolution map has been imported.' ' The ideal case is to calculate it previously' ' in the same project using MonoRes.' '\n====================\n')
def _sendFrames(cls, delay=0, port=5000): # start with a delay so the protocol has already launched the # socket server when we send the first file time.sleep(10) # Create a test folder path pattern = cls.ds.getFile('ribo/Falcon*mrcs') files = glob(pattern) nFiles = len(files) nMovies = MOVS ih = ImageHandler() clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) host = '' clientSocket.connect((host, port)) for i in range(nMovies): # Loop over the number of input movies if we want more for testing f = files[i % nFiles] _, _, _, nFrames = ih.getDimensions(f) paths = "" print("Writing frame stack for movie %d..." % (i + 1)) for j in range(1, nFrames + 1): outputFramePath = cls.proj.getTmpPath('movie%06d_%03d.mrc' % (i + 1, j)) print("%d : %s" % (j, outputFramePath)) ih.convert((j, f), outputFramePath) paths += os.path.abspath(outputFramePath) + '\n' time.sleep(delay) try: print("Sending movie stack %d" % (i + 1)) clientSocket.sendall(paths) except socket.error as err: # Send failed print('Failed to send file: %s' % paths) print(err) print('Trying to reconnect...') clientSocket.shutdown(socket.SHUT_WR) clientSocket.close() time.sleep(2) clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) clientSocket.connect((host, port)) # Everything sent, lets wait until server has read all files serverConnected = True lastFile = paths.strip().split('\n')[-1] print('Waiting for server socket to finish...') print('Last file sent: %s' % lastFile) while serverConnected: reply = clientSocket.recv(4096) if reply: if lastFile in reply: serverConnected = False time.sleep(delay) else: serverConnected = False print('Finished! Closing client socket') clientSocket.shutdown(socket.SHUT_WR) clientSocket.close()
def createChimeraScript(self): fnRoot = "extra/" scriptFile = self.protocol._getPath('Chimera_resolution.cmd') fhCmd = open(scriptFile, 'w') imageFile = self.protocol._getExtraPath(OUTPUT_RESOLUTION_FILE_CHIMERA) img = ImageHandler().read(imageFile) imgData = img.getData() min_Res = round(np.amin(imgData) * 100) / 100 max_Res = round(np.amax(imgData) * 100) / 100 numberOfColors = 21 colors_labels = self.numberOfColors(min_Res, max_Res, numberOfColors) colorList = self.colorMapToColorList(colors_labels, self.getColorMap()) if self.protocol.halfVolumes.get() is True: #fhCmd.write("open %s\n" % (fnRoot+FN_MEAN_VOL)) #Perhaps to check the use of mean volume is useful fnbase = removeExt(self.protocol.inputVolume.get().getFileName()) ext = getExt(self.protocol.inputVolume.get().getFileName()) fninput = abspath(fnbase + ext[0:4]) fhCmd.write("open %s\n" % fninput) else: fnbase = removeExt(self.protocol.inputVolumes.get().getFileName()) ext = getExt(self.protocol.inputVolumes.get().getFileName()) fninput = abspath(fnbase + ext[0:4]) fhCmd.write("open %s\n" % fninput) fhCmd.write("open %s\n" % (fnRoot + OUTPUT_RESOLUTION_FILE_CHIMERA)) if self.protocol.halfVolumes.get() is True: smprt = self.protocol.inputVolume.get().getSamplingRate() else: smprt = self.protocol.inputVolumes.get().getSamplingRate() fhCmd.write("volume #0 voxelSize %s\n" % (str(smprt))) fhCmd.write("volume #1 voxelSize %s\n" % (str(smprt))) fhCmd.write("vol #1 hide\n") scolorStr = '%s,%s:' * numberOfColors scolorStr = scolorStr[:-1] line = ("scolor #0 volume #1 perPixel false cmap " + scolorStr + "\n") % colorList fhCmd.write(line) scolorStr = '%s %s ' * numberOfColors str_colors = () for idx, elem in enumerate(colorList): if (idx % 2 == 0): if ((idx % 8) == 0): str_colors += str(elem), else: str_colors += '" "', else: str_colors += elem, line = ("colorkey 0.01,0.05 0.02,0.95 " + scolorStr + "\n") % str_colors fhCmd.write(line) fhCmd.close()
def _sendFrames(cls, delay=0, port=5000): # start with a delay so the protocol has already launched the # socket server when we send the first file time.sleep(10) # Create a test folder path pattern = cls.ds.getFile('ribo/Falcon*mrcs') files = glob(pattern) nFiles = len(files) nMovies = MOVS ih = ImageHandler() clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) host = '' clientSocket.connect((host, port)) for i in range(nMovies): # Loop over the number of input movies if we want more for testing f = files[i % nFiles] _, _, _, nFrames = ih.getDimensions(f) paths = "" print("Writing frame stack for movie %d..." % (i+1)) for j in range(1, nFrames + 1): outputFramePath = cls.proj.getTmpPath('movie%06d_%03d.mrc' % (i + 1, j)) print("%d : %s" % (j, outputFramePath)) ih.convert((j, f), outputFramePath) paths += os.path.abspath(outputFramePath)+'\n' time.sleep(delay) try: print("Sending movie stack %d" %(i+1)) clientSocket.sendall(paths) except socket.error as err: # Send failed print('Failed to send file: %s' % paths) print(err) print('Trying to reconnect...') clientSocket.shutdown(socket.SHUT_WR) clientSocket.close() time.sleep(2) clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) clientSocket.connect((host, port)) # Everything sent, lets wait until server has read all files serverConnected = True lastFile = paths.strip().split('\n')[-1] print('Waiting for server socket to finish...') print('Last file sent: %s' % lastFile) while serverConnected: reply = clientSocket.recv(4096) if reply: if lastFile in reply: serverConnected = False time.sleep(delay) else: serverConnected = False print('Finished! Closing client socket') clientSocket.shutdown(socket.SHUT_WR) clientSocket.close()
def createChimeraScript(self): fnRoot = "extra/" scriptFile = self.protocol._getPath('Chimera_resolution.cmd') fhCmd = open(scriptFile, 'w') imageFile = self.protocol._getExtraPath(OUTPUT_RESOLUTION_FILE_CHIMERA) img = ImageHandler().read(imageFile) imgData = img.getData() min_Res = round(np.amin(imgData)*100)/100 max_Res = round(np.amax(imgData)*100)/100 numberOfColors = 21 colors_labels = self.numberOfColors(min_Res, max_Res, numberOfColors) colorList = self.colorMapToColorList(colors_labels, self.getColorMap()) if self.protocol.halfVolumes.get() is True: #fhCmd.write("open %s\n" % (fnRoot+FN_MEAN_VOL)) #Perhaps to check the use of mean volume is useful fnbase = removeExt(self.protocol.inputVolume.get().getFileName()) ext = getExt(self.protocol.inputVolume.get().getFileName()) fninput = abspath(fnbase + ext[0:4]) fhCmd.write("open %s\n" % fninput) else: fnbase = removeExt(self.protocol.inputVolumes.get().getFileName()) ext = getExt(self.protocol.inputVolumes.get().getFileName()) fninput = abspath(fnbase + ext[0:4]) fhCmd.write("open %s\n" % fninput) fhCmd.write("open %s\n" % (fnRoot + OUTPUT_RESOLUTION_FILE_CHIMERA)) if self.protocol.halfVolumes.get() is True: smprt = self.protocol.inputVolume.get().getSamplingRate() else: smprt = self.protocol.inputVolumes.get().getSamplingRate() fhCmd.write("volume #0 voxelSize %s\n" % (str(smprt))) fhCmd.write("volume #1 voxelSize %s\n" % (str(smprt))) fhCmd.write("vol #1 hide\n") scolorStr = '%s,%s:' * numberOfColors scolorStr = scolorStr[:-1] line = ("scolor #0 volume #1 perPixel false cmap " + scolorStr + "\n") % colorList fhCmd.write(line) scolorStr = '%s %s ' * numberOfColors str_colors = () for idx, elem in enumerate(colorList): if (idx % 2 == 0): if ((idx % 8) == 0): str_colors += str(elem), else: str_colors += '" "', else: str_colors += elem, line = ("colorkey 0.01,0.05 0.02,0.95 " + scolorStr + "\n") % str_colors fhCmd.write(line) fhCmd.close()
def sharpeningAndMonoResStep(self): last_Niters = -1 last_lambda_sharpening = 1e38 nextIter = True iteration = 0 while nextIter is True: iteration = iteration + 1 #print iteration print ('\n====================\n' 'Iteration %s' % (iteration)) self.sharpenStep(iteration) mtd = md.MetaData() mtd.read(self._getFileName('METADATA_PARAMS_SHARPENING')) lambda_sharpening = mtd.getValue(MDL_COST,1) Niters = mtd.getValue(MDL_ITER,1) # if (Niters == last_Niters): # nextIter = False # break if (abs(lambda_sharpening - last_lambda_sharpening)<= 0.2): nextIter = False break last_Niters = Niters last_lambda_sharpening = lambda_sharpening self.MonoResStep(iteration) imageFile = self._getFileName('OUTPUT_RESOLUTION_FILE') img = ImageHandler().read(imageFile) imgData = img.getData() max_res = np.amax(imgData) min_res = 2*self.inputVolume.get().getSamplingRate() #print ("minres %s y maxres %s" % (min_res, max_res)) if (max_res-min_res<0.75): nextIter = False break os.system('cp ' +self._getExtraPath('sharpenedMap_'+str(iteration)+'.mrc')+ ' ' +self._getExtraPath('sharpenedMap_last.mrc')) resFile = self.resolutionVolume.get().getFileName() pathres=dirname(resFile) if not exists(os.path.join(pathres,'mask_data.xmd')): print ('\n====================\n' ' WARNING---This is not the ideal case because resolution map has been imported.' ' The ideal case is to calculate it previously' ' in the same project using MonoRes.' '\n====================\n')
class Main(): def getNumOfElements(self, fileN): _, _, z, n = self.imgh.getDimensions(fileN) if z > 1: return z else: return n def getOutputLoc(self, filePrefix, i): if self.n == 1: index = 1 fn = "%s_%03d.%s" % (filePrefix, i, self.args.ext) else: index = self.outIndex fn = "%s_%03d.%s" % (filePrefix, self.groupCount, self.args.ext) self.outIndex += 1 if self.outIndex > self.n: self.outIndex = 1 self.groupCount += 1 return (index, fn) def __init__(self): parser = argparse.ArgumentParser(description="Extract images from a stack and write them into " "smaller stacks or even individual images.") add = parser.add_argument # shortcut add('--files', default='', help='Pattern to match input files.') add('--ext', default='mrc', help='Define extension of output files.') add('-n', default=1, help='Group output images in stacks of this size.') self.args = parser.parse_args() self.n = int(self.args.n) filePaths = glob.glob(self.args.files) filePaths.sort() self.imgh = ImageHandler() for fileN in filePaths: self.outIndex = 1 self.groupCount = 1 n = self.getNumOfElements(fileN) if fileN.endswith('.mrc'): fileN += ':mrcs' filePrefix = os.path.splitext(fileN)[0] for i in range(1, 2): outputLoc = self.getOutputLoc(filePrefix, i) self.imgh.convert((i, fileN), outputLoc)
class Main(): def getNumOfElements(self, fileN): _, _, z, n = self.imgh.getDimensions(fileN) if z > 1: return z else: return n def getOutputLoc(self, filePrefix, i): if self.n == 1: index = 1 fn = "%s_%03d.%s" % (filePrefix, i, self.args.ext) else: index = self.outIndex fn = "%s_%03d.%s" % (filePrefix, self.groupCount, self.args.ext) self.outIndex += 1 if self.outIndex > self.n: self.outIndex = 1 self.groupCount += 1 return (index, fn) def __init__(self): parser = argparse.ArgumentParser(description="Extract images from a stack and write them into " "smaller stacks or even individual images.") add = parser.add_argument # shortcut add('--files', default='', help='Pattern to match input files.') add('--ext', default='mrc', help='Define extension of output files.') add('-n', default=1, help='Group output images in stacks of this size.') self.args = parser.parse_args() self.n = int(self.args.n) filePaths = glob.glob(self.args.files) filePaths.sort() self.imgh = ImageHandler() for fileN in filePaths: self.outIndex = 1 self.groupCount = 1 n = self.getNumOfElements(fileN) if fileN.endswith('.mrc'): fileN += ':mrcs' filePrefix = os.path.splitext(fileN)[0] for i in range(1, n+1): outputLoc = self.getOutputLoc(filePrefix, i) self.imgh.convert((i, fileN), outputLoc)
class ImageGenerator: def __init__(self, project_path, images_path, bigThumb=None, smallThumb=None): self.project_path = project_path self.images_path = images_path self.ih = ImageHandler() self.img = self.ih.createImage() self.bigThumb = bigThumb self.smallThumb = smallThumb def generate_image(self, input_file, outputName=None): output_root = join(self.images_path, basename(outputName)) output_file = output_root + '.png' print "Generating image: ", output_file if not exists(output_file): from PIL import Image self.img.read(join(self.project_path, input_file)) pimg = getPILImage(self.img) pwutils.makeFilePath(output_file) if self.bigThumb: pimg.save(output_file, "PNG") if self.smallThumb: pimg.thumbnail((self.smallThumb, self.smallThumb), Image.ANTIALIAS) pimg.save(output_root + 't.png', "PNG") return output_file
def convertInputStep(self): # blocres works with .map vol1Fn = self.inputVolume.get().getFileName() vol2Fn = self.inputVolume2.get().getFileName() if self.mask.get().getFileName() != '': maskFn = self.mask.get().getFileName() self.fnvol1 = self._getFileName("half1") self.fnvol2 = self._getFileName("half2") ImageHandler().convert(vol1Fn, self.fnvol1) ImageHandler().convert(vol2Fn, self.fnvol2) if self.mask.get().getFileName() != '': self.fnmask = self._getFileName("maskvol") ImageHandler().convert(maskFn, self.fnmask) else: self.fnmask = self.maks.get().getFileName()
def __init__(self, project_path, images_path, bigThumb=None, smallThumb=None): self.project_path = project_path self.images_path = images_path self.ih = ImageHandler() self.img = self.ih.createImage() self.bigThumb = bigThumb self.smallThumb = smallThumb
def checkBackgroundStep(self): initRes=self.resolutionVolume.get().getFileName() img = ImageHandler().read(initRes) imgData = img.getData() max_value = np.amax(imgData) min_value = np.amin(imgData) #print ("minvalue %s y maxvalue %s" % (min_value, max_value)) if (min_value > 0.01): params = ' -i %s' % self.resFn params += ' -o %s' % self.resFn params += ' --select above %f' % (max_value-1) params += ' --substitute value 0' self.runJob('xmipp_transform_threshold', params)
def _processMovie(self, movieId, movieName, movieFolder): # if not mrc convert format to mrc # special case is mrc but ends in mrcs moviePath = os.path.join(movieFolder, movieName) movieNameMrc = pwutils.replaceExt(movieName, "mrc") moviePathMrc = pwutils.replaceExt(moviePath, "mrc") ih = ImageHandler() if movieName.endswith('.mrc'): pass # Do nothing elif movieName.endswith('.mrcs'): # Create a link to the mrc or mrcs file but with .mrc extension createLink(moviePath, moviePathMrc) else: # Convert to mrc if the movie is in other format ih.convert(moviePath, moviePathMrc, DT_FLOAT) _, _, z, n = ih.getDimensions(moviePathMrc) numberOfFrames = max(z, n) # Deal with mrc ambiguity # Write dummy auxiliary shift file. # TODO: this should be done properly when we define # how to transfer between movies shiftFnName = os.path.join(movieFolder, self._getShiftFnName(movieId)) f = open(shiftFnName,'w') shift = ("0 " * numberOfFrames + "\n" ) * 2 f.write(shift) f.close() if self.alignFrameRange != -1: if self.alignFrameRange > numberOfFrames: raise Exception('Frame number (%d) is greater than ' 'the total frames of the movie (%d)' % (numberOfFrames, self.alignFrameRange)) numberOfFrames = self.alignFrameRange.get() self._argsSummovie(movieNameMrc, movieFolder, movieId, numberOfFrames) try: self.runJob(self._program, self._args, cwd=movieFolder) except: print("ERROR: Movie %s failed\n"%movieName) logFile = self._getLogFile(movieId)
def _showVolumeColorSlices(self, param=None): imageFile = self.protocol.resolution_Volume.getFileName() img = ImageHandler().read(imageFile) imgData = img.getData() max_Res = np.amax(imgData) # This is to generate figures for the paper # min_Res = np.amin(imgData) # imgData2 = imgData imgData2 = np.ma.masked_where(imgData < 0.1, imgData, copy=True) min_Res = np.amin(imgData2) fig, im = self._plotVolumeSlices('MonoRes slices', imgData2, min_Res, max_Res, self.getColorMap(), dataAxis=self._getAxis()) cax = fig.add_axes([0.9, 0.1, 0.03, 0.8]) cbar = fig.colorbar(im, cax=cax) cbar.ax.invert_yaxis() return plt.show(fig)
def _createFrames(cls, delay=0): # Create a test folder path pattern = cls.ds.getFile('ribo/Falcon*mrcs') files = glob(pattern) nFiles = len(files) nMovies = MOVS ih = ImageHandler() for i in range(nMovies): # Loop over the number of input movies if we want more for testing f = files[i % nFiles] _, _, _, nFrames = ih.getDimensions(f) for j in range(1, nFrames + 1): outputFramePath = cls.proj.getTmpPath('movie%06d_%03d.mrc' % (i+1, j)) ih.convert((j, f), outputFramePath) time.sleep(delay)
def _showVolumeColorSlices(self, param=None): imageFile = self.protocol._getExtraPath(OUTPUT_RESOLUTION_FILE) img = ImageHandler().read(imageFile) imgData = img.getData() max_Res = np.amax(imgData) # This is to generate figures for the paper # min_Res = np.amin(imgData) # imgData2 = imgData imgData2 = np.ma.masked_where(imgData < 0.1, imgData, copy=True) min_Res = np.amin(imgData2) fig, im = self._plotVolumeSlices('MonoRes slices', imgData2, min_Res, max_Res, self.getColorMap(), dataAxis=self._getAxis()) cax = fig.add_axes([0.9, 0.1, 0.03, 0.8]) cbar = fig.colorbar(im, cax=cax) cbar.ax.invert_yaxis() return plt.show(fig)
def MonoResStep(self, iter): sampling = self.inputVolume.get().getSamplingRate() if (iter == 1): resFile = self.resolutionVolume.get().getFileName() else: resFile = self._getFileName('OUTPUT_RESOLUTION_FILE') pathres=dirname(resFile) img = ImageHandler().read(resFile) imgData = img.getData() max_res = np.amax(imgData) significance = 0.95 mtd = md.MetaData() if exists(os.path.join(pathres,'mask_data.xmd')): mtd.read(os.path.join(pathres,'mask_data.xmd')) radius = mtd.getValue(MDL_SCALE,1) else: xdim, _ydim, _zdim = self.inputVolume.get().getDim() radius = xdim*0.5 params = ' --vol %s' % self._getExtraPath('sharpenedMap_'+str(iter)+'.mrc') params += ' --mask %s' % self._getFileName('BINARY_MASK') params += ' --sampling_rate %f' % sampling params += ' --minRes %f' % (2*sampling) params += ' --maxRes %f' % max_res params += ' --step %f' % 0.25 params += ' --mask_out %s' % self._getTmpPath('refined_mask.vol') params += ' -o %s' % self._getFileName('OUTPUT_RESOLUTION_FILE') params += ' --volumeRadius %f' % radius params += ' --exact' params += ' --chimera_volume %s' % self._getFileName( 'OUTPUT_RESOLUTION_FILE_CHIMERA') params += ' --sym %s' % 'c1' params += ' --significance %f' % significance params += ' --md_outputdata %s' % self._getTmpPath('mask_data.xmd') params += ' --threads %i' % self.numberOfThreads.get() self.runJob('xmipp_resolution_monogenic_signal', params)
def ifNomask(self, fnVol): xdim, _ydim, _zdim = self.inputVolume.get().getDim() params = ' -i %s' % fnVol params += ' -o %s' % self._getFileName(FN_GAUSSIAN_MAP) setsize = 0.02 * xdim params += ' --fourier real_gaussian %f' % (setsize) self.runJob('xmipp_transform_filter', params) img = ImageHandler().read(self._getFileName(FN_GAUSSIAN_MAP)) imgData = img.getData() max_val = np.amax(imgData) * 0.05 params = ' -i %s' % self._getFileName(FN_GAUSSIAN_MAP) params += ' --select below %f' % max_val params += ' --substitute binarize' params += ' -o %s' % self._getFileName(BINARY_MASK) self.runJob('xmipp_transform_threshold', params) self.maskFn = self._getFileName(BINARY_MASK)
def _validate(self): from pyworkflow.em.convert import ImageHandler ci = self.getImportClass() if ci is None: errors = ProtImportMicBase._validate(self) for micFn, _ in self.iterFiles(): imgh = ImageHandler() if imgh.isImageFile(micFn): _, _, z, n = imgh.getDimensions(micFn) if n > 1 or z > 1: errors.append("The protocol not support micrographs stored in stacks. " "If you want to obtain your micrographs individually, " "you can run the following command:\n" "scipion run scipion_directory/scripts/split_stacks.py --files *your files* --ext *extension*") # JMRT: only check the first image, for large dataset # even reading the header can take a while break return errors else: return ci.validateMicrographs()
def projectStep(self, start, end, samplingRate, threadNumber): # Project md = xmipp.MetaData(self._getInputParticlesSubsetFn(threadNumber)) ## projection = xmipp.Image() projection.setDataType(xmipp.DT_DOUBLE) ## for id in md: rot = md.getValue(xmipp.MDL_ANGLE_ROT, id) tilt = md.getValue(xmipp.MDL_ANGLE_TILT, id) psi = md.getValue(xmipp.MDL_ANGLE_PSI, id) ##projection =self.vol.projectVolumeDouble(rot, tilt, psi) self.fourierProjectVol.projectVolume(projection, rot, tilt, psi) ## # Apply CTF if self.projType == self.CORRECT_NONE: pass elif self.projType == self.CORRECT_FULL_CTF: projection.applyCTF(md, samplingRate, id, False) elif self.projType == self.CORRECT_PHASE_FLIP: projection.applyCTF(md, samplingRate, id, True) else: raise Exception("ERROR: Unknown projection mode: %d" % self.projType) # Shift image projection.applyGeo(md,id,True,False)#onlyapplyshist, wrap ih = ImageHandler() expProj = ih.read(md.getValue(xmipp.MDL_IMAGE, id)) expProj.convert2DataType(xmipp.DT_DOUBLE) # Subtract from experimental and write result projection.resetOrigin() if self.normalize: expProj = expProj.adjustAndSubtract(projection) else: expProj.inplaceSubtract(projection) expProj.write( self._getProjGalleryIndexFn(id+start-1))
def importVolumesStep(self, pattern, samplingRate): """ Copy images matching the filename pattern Register other parameters. """ self.info("Using pattern: '%s'" % pattern) # Create a Volume template object vol = Volume() vol.setSamplingRate(self.samplingRate.get()) copyOrLink = self.getCopyOrLink() imgh = ImageHandler() volSet = self._createSetOfVolumes() volSet.setSamplingRate(self.samplingRate.get()) for fileName, fileId in self.iterFiles(): dst = self._getExtraPath(basename(fileName)) copyOrLink(fileName, dst) x, y, z, n = imgh.getDimensions(dst) # First case considers when reading mrc without volume flag # Second one considers single volumes (not in stack) if (z == 1 and n != 1) or (z !=1 and n == 1): vol.setObjId(fileId) if dst.endswith('.mrc'): dst += ':mrc' vol.setLocation(dst) volSet.append(vol) else: for index in range(1, n+1): vol.cleanObjId() vol.setLocation(index, dst) volSet.append(vol) if volSet.getSize() > 1: self._defineOutputs(outputVolumes=volSet) else: self._defineOutputs(outputVolume=vol)
def ifNomask(self, fnVol): if self.halfVolumes: xdim, _ydim, _zdim = self.inputVolume.get().getDim() params = ' -i %s' % fnVol else: xdim, _ydim, _zdim = self.inputVolumes.get().getDim() params = ' -i %s' % fnVol params += ' -o %s' % self._getFileName(FN_GAUSSIAN_MAP) setsize = 0.02*xdim params += ' --fourier real_gaussian %f' % (setsize) self.runJob('xmipp_transform_filter', params) img = ImageHandler().read(self._getFileName(FN_GAUSSIAN_MAP)) imgData = img.getData() max_val = np.amax(imgData)*0.05 params = ' -i %s' % self._getFileName(FN_GAUSSIAN_MAP) params += ' --select below %f' % max_val params += ' --substitute binarize' params += ' -o %s' % self._getFileName(BINARY_MASK) self.runJob('xmipp_transform_threshold', params) self.maskFn = self._getFileName(BINARY_MASK)
def _validate(self): from pyworkflow.em.convert import ImageHandler ci = self.getImportClass() if ci is None: errors = ProtImportMicBase._validate(self) for micFn, _ in self.iterFiles(): imgh = ImageHandler() if imgh.isImageFile(micFn): _, _, z, n = imgh.getDimensions(micFn) if n > 1 or z > 1: errors.append("The protocol not support micrographs " "stored in stacks. If you want to " "obtain your micrographs individually, " "you can run the following command:\n" "scipion run scipion_directory/scripts/" "split_stacks.py --files *your files* " "--ext *extension*") # JMRT: only check the first image, for large dataset # even reading the header can take a while break return errors else: return ci.validateMicrographs()
def _processMovie(self, movieId, movieName, movieFolder): # if not mrc convert format to mrc # special case is mrc but ends in mrcs moviePath = os.path.join(movieFolder, movieName) movieNameMrc = pwutils.replaceExt(movieName, "mrc") moviePathMrc = pwutils.replaceExt(moviePath, "mrc") ih = ImageHandler() if movieName.endswith('.mrc'): pass # Do nothing elif movieName.endswith('.mrcs'): # Create a link to the mrc or mrcs file but with .mrc extension createLink(moviePath, moviePathMrc) else: # Convert to mrc if the movie is in other format ih.convert(moviePath, moviePathMrc, DT_FLOAT) _, _, z, n = ih.getDimensions(moviePathMrc) numberOfFrames = max(z, n) # Deal with mrc ambiguity if self.alignFrameRange != -1: if self.alignFrameRange > numberOfFrames: raise Exception('Frame number (%d) is greater than ' 'the total frames of the movie (%d)' % (numberOfFrames, self.alignFrameRange)) numberOfFrames = self.alignFrameRange.get() self._argsUnblur(movieNameMrc, movieFolder, movieId, numberOfFrames) try: self.runJob(self._program, self._args, cwd=movieFolder) except: print("ERROR: Movie %s failed\n" % movieName) logFile = self._getLogFile(movieId)
def __convertCorrectionImage(self, correctionImage): """ Will convert a gain or dark file to a compatible one and return the final file name. Otherwise, will return same passed parameter""" # Get final correction image file finalName = self.getFinalCorrectionImagePath(correctionImage) # If correctionImage is None, finalName will be None if finalName is None: return None elif not os.path.exists(finalName): # Conversion never happened... print('converting %s to %s' % (correctionImage, finalName)) ImageHandler().convert(correctionImage, finalName) # return final name return os.path.abspath(finalName)
def getMinMax(self, imageFile): img = ImageHandler().read(imageFile) imgData = img.getData() min_res = round(np.amin(imgData) * 100) / 100 max_res = round(np.amax(imgData) * 100) / 100 return min_res, max_res
def __init__(self): parser = argparse.ArgumentParser( description="Create movie stacks from the individual " "frame files.") add = parser.add_argument # shortcut add('--files', default='', help='Pattern to match input frame files.') add('-n', help='Number of frames per movie.') add('--suffix', help='Provide suffix added to create movie file. ' 'e.g. _frames.mrcs') add('--delete_frames', action='store_true', help='Provide this option if you want to delete individual frame ' 'files after the movie stack is created. ') args = parser.parse_args() n = int(args.n) frameRegex = re.compile("(?P<prefix>.+[^\d]+)(?P<frameid>\d+)") # Group all frames for each movie # Key of the dictionary will be the common prefix and the value # will be a list with all frames in that movie frameDict = {} filePaths = glob.glob(args.files) filePaths.sort() for fileName in filePaths: fnNoExt = pwutils.removeExt(fileName) match = frameRegex.match(fnNoExt) if match is None: raise Exception("Incorrect match of frame files pattern!") d = match.groupdict() prefix = d['prefix'] frameid = int(d['frameid']) if prefix not in frameDict: frameDict[prefix] = [] frameDict[prefix].append((frameid, fileName)) suffix = args.suffix ih = ImageHandler() for k, v in frameDict.iteritems(): if len(v) != n: raise Exception("Incorrect number of frames!") movieFn = k + suffix movieOut = movieFn if movieOut.endswith("mrc"): movieOut += ":mrcs" print "Writing movie stack: ", movieFn for i, frame in enumerate(sorted(v, key=lambda x: x[0])): frameFn = frame[1] # Frame name stored previously ih.convert(frameFn, (i+1, movieFn)) if args.delete_frames: pwutils.cleanPath(frameFn)
def createOutputStep(self, particlesId, coordsId, boxSize): """ Create the input file in STAR format as expected by Relion. Params: particlesId: use this parameters just to force redo of convert if the input particles are changed. """ ih = ImageHandler() outputStack = self._getPath('particles.mrcs') outputImg = ih.createImage() inputParticles = self._getInputParticles() inputCoords = self.inputCoordinates.get() outputSet = self._createSetOfParticles() outputSet.copyInfo(inputParticles) boxSize = self.boxSize.get() b2 = int(round(boxSize / 2)) center = np.zeros((boxSize, boxSize)) ih = ImageHandler() i = 0 outliers = 0 partIdExcluded = [] lastPartId = None for coord in inputCoords.iterItems( orderBy=['_subparticle._micId', '_micId', 'id']): # The original particle id is stored in the sub-particle as micId partId = coord._micId.get() # Load the particle if it has changed from the last sub-particle if partId != lastPartId: particle = inputParticles[partId] if particle is None: partIdExcluded.append(partId) self.info("WARNING: Missing particle with id %s from " "input particles set" % partId) else: # Now load the particle image to extract later sub-particles img = ih.read(particle) x, y, _, _ = img.getDimensions() data = img.getData() lastPartId = partId # If particle is not in inputParticles, subparticles will not be # generated. Now, subtract from a subset of original particles is # supported. if not partId in partIdExcluded: xpos = coord.getX() ypos = coord.getY() # Check that the sub-particle will not lay out of the particle if (ypos - b2 < 0 or ypos + b2 > y or xpos - b2 < 0 or xpos + b2 > x): outliers += 1 continue # Crop the sub-particle data from the whole particle image center[:, :] = data[ypos - b2:ypos + b2, xpos - b2:xpos + b2] outputImg.setData(center) i += 1 outputImg.write((i, outputStack)) subpart = coord._subparticle subpart.setLocation( (i, outputStack)) # Change path to new stack subpart.setObjId(None) # Force to insert as a new item outputSet.append(subpart) if outliers: self.info( "WARNING: Discarded %s particles because laid out of the " "particle (for a box size of %d" % (outliers, boxSize)) self._defineOutputs(outputParticles=outputSet) self._defineSourceRelation(self.inputParticles, outputSet)
def testRisosome(self): print("Importing 2D averages (subset of 4)") ih = ImageHandler() classesFn = self.ds.getFile('import/classify2d/extra/' 'relion_it015_classes.mrcs') outputName = 'input_averages.mrcs' inputTmp = os.path.abspath(self.proj.getTmpPath()) outputFn = self.proj.getTmpPath(outputName) for i, index in enumerate([5, 16, 17, 18, 24]): ih.convert((index, classesFn), (i + 1, outputFn)) protAvgs = self.newProtocol(ProtImportAverages, objLabel='avgs - 5', filesPath=inputTmp, filesPattern=outputName, samplingRate=7.08) self.launchProtocol(protAvgs) # First, import a set of micrographs print("Importing a set of micrographs...") protImport = self.newProtocol(ProtImportMicrographs, filesPath=os.path.abspath( self.proj.getTmpPath()), filesPattern="*%s" % self.ext, samplingRateMode=1, magnification=79096, scannedPixelSize=56, voltage=300, sphericalAberration=2.0, dataStreaming=True, fileTimeout=10, timeout=60) protImport.setObjLabel('import 20 mics (streaming)') self.proj.launchProtocol(protImport, wait=False) self._waitOutput(protImport, 'outputMicrographs') # Now estimate CTF on the micrographs with ctffind print("Performing CTFfind...") protCTF = self.newProtocol(ProtCTFFind, useCtffind4=True, lowRes=0.02, highRes=0.45, minDefocus=1.2, maxDefocus=3, runMode=1, numberOfMpi=1, numberOfThreads=1) protCTF.inputMicrographs.set(protImport.outputMicrographs) protCTF.setObjLabel('CTF ctffind') self.proj.launchProtocol(protCTF, wait=False) self._waitOutput(protCTF, 'outputCTF') self._waitUntilMinSize(protCTF.outputCTF) # Select some good averages from the iterations mrcs a protPick = self.newProtocol(ProtRelion2Autopick, objLabel='autopick refs', runType=0, micrographsNumber=3, referencesType=0, refsHaveInvertedContrast=True, particleDiameter=380) protPick.inputMicrographs.set(protImport.outputMicrographs) protPick.ctfRelations.set(protCTF.outputCTF) protPick.inputReferences.set(protAvgs.outputAverages) self.launchProtocol(protPick) protPick.runType.set(1) self.launchProtocol(protPick)
def createOutputStep(self, particlesId, coordsId, boxSize): """ Create the input file in STAR format as expected by Relion. Params: particlesId: use this parameters just to force redo of convert if the input particles are changed. """ ih = ImageHandler() outputStack = self._getPath('particles.mrcs') outputImg = ih.createImage() inputParticles = self._getInputParticles() inputCoords = self.inputCoordinates.get() outputSet = self._createSetOfParticles() outputSet.copyInfo(inputParticles) boxSize = self.boxSize.get() b2 = int(round(boxSize / 2)) center = np.zeros((boxSize, boxSize)) ih = ImageHandler() i = 0 outliers = 0 partIdExcluded = [] lastPartId = None for coord in inputCoords.iterItems(orderBy=['_subparticle._micId', '_micId', 'id']): # The original particle id is stored in the sub-particle as micId partId = coord._micId.get() # Load the particle if it has changed from the last sub-particle if partId != lastPartId: particle = inputParticles[partId] if particle is None: partIdExcluded.append(partId) self.info("WARNING: Missing particle with id %s from " "input particles set" % partId) else: # Now load the particle image to extract later sub-particles img = ih.read(particle) x, y, _, _ = img.getDimensions() data = img.getData() lastPartId = partId # If particle is not in inputParticles, subparticles will not be # generated. Now, subtract from a subset of original particles is # supported. if not partId in partIdExcluded: xpos = coord.getX() ypos = coord.getY() # Check that the sub-particle will not lay out of the particle if (ypos - b2 < 0 or ypos + b2 > y or xpos - b2 < 0 or xpos + b2 > x): outliers += 1 continue # Crop the sub-particle data from the whole particle image center[:, :] = data[ypos-b2:ypos+b2, xpos-b2:xpos+b2] outputImg.setData(center) i += 1 outputImg.write((i, outputStack)) subpart = coord._subparticle subpart.setLocation((i, outputStack)) # Change path to new stack subpart.setObjId(None) # Force to insert as a new item outputSet.append(subpart) if outliers: self.info("WARNING: Discarded %s particles because laid out of the " "particle (for a box size of %d" % (outliers, boxSize)) self._defineOutputs(outputParticles=outputSet) self._defineSourceRelation(self.inputParticles, outputSet)
def _processMovie(self, movie): numberOfFrames = self._getNumberOfFrames(movie) #FIXME: Figure out how to properly write shifts for unblur #self._writeMovieAlignment(movie, numberOfFrames) a0, aN = self._getRange(movie, 'align') _, lstFrame, _ = movie.getFramesRange() movieBaseName = pwutils.removeExt(movie.getFileName()) aveMicFn = movieBaseName + '_uncorrected_avg.mrc' if a0 > 1 or aN < lstFrame: from pyworkflow.em import ImageHandler ih = ImageHandler() movieInputFn = movie.getFileName() if movieInputFn.endswith("mrc"): movieInputFn += ":mrcs" movieConverted = pwutils.removeExt(movieInputFn) + "_tmp.mrcs" ih.convertStack(movieInputFn, movieConverted, a0, aN) # Here, only temporal movie file (or link) stored in # tmp/movie_?????? is removed before move the converted file. It # is necessary 'cause if it is overwritten you may lost your # original data. os.remove(movie.getFileName()) pwutils.moveFile(movieConverted, movie.getFileName()) movieSet = self.inputMovies.get() self._createLink(movie) range = aN - a0 + 1 self._argsUnblur(movie, range) try: self.runJob(self._program, self._args) outMicFn = self._getExtraPath(self._getOutputMicName(movie)) if not os.path.exists(outMicFn): # if only DW mic is saved outMicFn = self._getExtraPath(self._getOutputMicWtName(movie)) if self.doComputePSD: # Compute uncorrected avg mic roi = [0, 0, 0, 0] fakeShiftsFn = self.writeZeroShifts(movie) self.averageMovie(movie, fakeShiftsFn, aveMicFn, binFactor=1, roi=roi, dark=None, gain=movieSet.getGain()) self.computePSDs(movie, aveMicFn, outMicFn, outputFnCorrected=self._getPsdJpeg(movie)) self._saveAlignmentPlots(movie) if self._doComputeMicThumbnail(): self.computeThumbnail(outMicFn, outputFn=self._getOutputMicThumbnail( movie)) except: print("ERROR: Movie %s failed\n" % movie.getFileName())
def testRisosome(self): print "Importing 2D averages (subset of 4)" ih = ImageHandler() classesFn = self.ds.getFile('import/classify2d/extra/' 'relion_it015_classes.mrcs') outputName = 'input_averages.mrcs' inputTmp = os.path.abspath(self.proj.getTmpPath()) outputFn = self.proj.getTmpPath(outputName) for i, index in enumerate([5, 16, 17, 18, 24]): ih.convert((index, classesFn), (i + 1, outputFn)) protAvgs = self.newProtocol(ProtImportAverages, objLabel='avgs - 5', filesPath=inputTmp, filesPattern=outputName, samplingRate=7.08 ) self.launchProtocol(protAvgs) # First, import a set of micrographs print "Importing a set of micrographs..." protImport = self.newProtocol(ProtImportMicrographs, filesPath=os.path.abspath(self.proj.getTmpPath()), filesPattern="*%s" % self.ext, samplingRateMode=1, magnification=79096, scannedPixelSize=56, voltage=300, sphericalAberration=2.0, dataStreaming=True, fileTimeout=10, timeout=60) protImport.setObjLabel('import 20 mics (streaming)') self.proj.launchProtocol(protImport, wait=False) self._waitOutput(protImport, 'outputMicrographs') # Now estimate CTF on the micrographs with ctffind print "Performing CTFfind..." protCTF = self.newProtocol(ProtCTFFind, useCtffind4=True, lowRes=0.02, highRes=0.45, minDefocus=1.2, maxDefocus=3, runMode=1, numberOfMpi=1, numberOfThreads=1) protCTF.inputMicrographs.set(protImport.outputMicrographs) protCTF.setObjLabel('CTF ctffind') self.proj.launchProtocol(protCTF, wait=False) self._waitOutput(protCTF, 'outputCTF') self._waitUntilMinSize(protCTF.outputCTF) # Select some good averages from the iterations mrcs a protPick = self.newProtocol(ProtRelion2Autopick, objLabel='autopick refs', runType=0, micrographsNumber=3, referencesType=0, refsHaveInvertedContrast=True, particleDiameter=380 ) protPick.inputMicrographs.set(protImport.outputMicrographs) protPick.ctfRelations.set(protCTF.outputCTF) protPick.inputReferences.set(protAvgs.outputAverages) self.launchProtocol(protPick) protPick.runType.set(1) self.launchProtocol(protPick)