def decimate(input_files,output_file,bin_factor): input_files = glob.glob(input_files) print "processing %d files"%len(input_files) img = ndimage_file.read_image(input_files[0]) nsize = img.shape[0]/2 for input_file in input_files: print 'processing ', input_file output_file = spider_utility.spider_filename(output_file, input_file) for i, img in enumerate(ndimage_file.iter_images(input_file)): img = ndimage_interpolate.downsample(img, bin_factor) #img = ndimage_interpolate.interpolate_ft(img, (nsize,nsize)) ndimage_file.write_image(output_file, img, i)
''' import sys #sys.path.append('/home/robertl/tmp/arachnid-0.0.1/') from arachnid.core.app import tracing from arachnid.core.metadata import format from arachnid.core.image import ndimage_file from arachnid.core.image import reconstruct import numpy,logging if __name__ == "__main__": image_file = sys.argv[1] align_file = sys.argv[2] output = sys.argv[3] thread_count=16 tracing.configure_logging() #Note: this code assuming you are reconstructing a dala stack or a translated stack # Read an alignment file align,header = format.read_alignment(align_file, ndarray=True) logging.error("Reconstructing %d particles"%len(align)) assert(header[0]=='id') index = align[:, 0].astype(numpy.int) align[:, 1] = numpy.rad2deg(align[:, 1]) iter_single_images = ndimage_file.iter_images(image_file, index) image_size = ndimage_file.read_image(image_file).shape[0] vol = reconstruct.reconstruct_bp3f_mp(iter_single_images, image_size, align, thread_count=thread_count) ndimage_file.write_image(output, vol)
.. literalinclude:: ../../arachnid/snippets/image/shift.py :language: python :lines: 17- :linenos: ''' import sys from arachnid.core.metadata import format from arachnid.core.metadata import spider_utility from arachnid.core.metadata import relion_utility from arachnid.core.image import ndimage_file from arachnid.core.image import ndimage_utility if __name__ == '__main__': # Parameters input_file = sys.argv[1] output_file = sys.argv[2] mult=1.0 # Read an alignment file align = format.read(input_file, numeric=True) for i in xrange(len(align)): filename, id = relion_utility.relion_file(align[i].rlnImageName) img = ndimage_file.read_image(filename, id-1) img = ndimage_utility.fourier_shift(img, align[i].rlnOriginX, align[i].rlnOriginY) ndimage_file.write_image(spider_utility.spider_filename(output_file, filename), img, id-1)
.. literalinclude:: ../../arachnid/snippets/image/rotate_translate_images.py :language: python :lines: 16- :linenos: ''' from arachnid.core.metadata import format from arachnid.core.metadata import spider_utility from arachnid.core.image import ndimage_file from arachnid.core.image import rotate import itertools import sys if __name__ == '__main__': # Parameters align_file = sys.argv[1] image_file = sys.argv[2] output_file = sys.argv[3] # Read an alignment file align, header = format.read_alignment(align_file, ndarray=True) align[:, 16]-=1 iter_single_images = ndimage_file.iter_images(image_file, align[:, 15:17]) iter_single_images = itertools.imap(rotate.rotate_image, iter_single_images, align) for i, img in enumerate(iter_single_images): ndimage_file.write_image(spider_utility.spider_filename(output_file, int(align[i, 4])), img)
.. sourcecode:: sh $ python unstack.py .. note:: You must have Arachnid installed to run this script .. literalinclude:: ../../arachnid/snippets/image/unstack.py :language: python :lines: 20- :linenos: ''' from arachnid.core.metadata import spider_utility from arachnid.core.image import ndimage_file if __name__ == '__main__': # Parameters input_stack = "stack_001.dat" output_image = 'image_001_00000.dat' # Script for i, img in enumerate(ndimage_file.iter_images(input_stack)): output_image = spider_utility.spider_filename(output_image, i+1) ndimage_file.write_image(output_image, img)
if __name__ == "__main__": tracing.configure_logging() image_file = sys.argv[1] # phase_flip_dala_stack.spi align_file = sys.argv[2] # align.spi param_file = sys.argv[3] output = sys.argv[4] # raw_vol.spi bin_factor = float( sys.argv[5]) if len(sys.argv) > 5 else 1.0 # raw_vol.spi thread_count = 32 extra = spider_params.read(param_file) extra.update(spider_params.update_params(bin_factor, **extra)) print "Loaded param file" extra.update(thread_count=thread_count) align, header = format.read_alignment(align_file, ndarray=True) logging.error("Reconstructing %d particles" % len(align)) selection = align[:, 15:17] align[:, 6:8] /= extra['apix'] iter_single_images = ndimage_file.iter_images(image_file, selection) image_size = ndimage_file.read_image(image_file).shape[0] vol = reconstruct.reconstruct_bp3f_mp(iter_single_images, image_size, align, process_image=process_image, **extra) if vol is not None: ndimage_file.write_image(output, vol)
extra = spider_params.read(param_file) extra.update(spider_params.update_params(bin_factor, **extra)) print "Loaded param file" extra.update(thread_count=thread_count) align, header = format.read_alignment(align_file, ndarray=True) logging.error("Reconstructing %d particles" % len(align)) if align.shape[1] > 17: selection = align[:, 15:17] selection[:, 1] -= 1 #align[:, 6:8] /= extra['apix'] else: selection = align[:, 4].astype(numpy.int) - 1 image_size = ndimage_file.read_image(image_file).shape[0] even = numpy.arange(0, len(selection), 2, dtype=numpy.int) odd = numpy.arange(1, len(selection), 2, dtype=numpy.int) iter_single_images1 = ndimage_file.iter_images(image_file, selection[even]) iter_single_images2 = ndimage_file.iter_images(image_file, selection[odd]) align1 = align[even] align2 = align[odd] vol = reconstruct.reconstruct3_bp3f_mp(image_size, iter_single_images1, iter_single_images2, align1, align2, **extra) if vol is not None: ndimage_file.write_image(output, vol[0]) ndimage_file.write_image(format_utility.add_prefix(output, 'h1_'), vol[1]) ndimage_file.write_image(format_utility.add_prefix(output, 'h2_'), vol[2])
import glob if __name__ == '__main__': # Parameters input_files = sys.argv[1] output_file = sys.argv[2] params_file = sys.argv[3] bin_factor = int(sys.argv[4]) input_files = glob.glob(input_files) print "processing %d files"%len(input_files) radius = spider_params.read(params_file)['pixel_diameter']/2 img = ndimage_file.read_image(input_files[0]) mask = ndimage_utility.model_disk(radius, img.shape)*-1+1 mask = ndimage_interpolate.downsample(mask, bin_factor) for input_file in input_files: print 'processing ', input_file output_file = spider_utility.spider_filename(output_file, input_file) for i, img in enumerate(ndimage_file.iter_images(input_file)): img = ndimage_interpolate.downsample(img, bin_factor) img=ndimage_utility.normalize_standard(img, mask) ndimage_file.write_image(output_file, img, i)
def bin(align_param_file, ref_ang_file, pref_image_in, pref_image_out, pref_sel, pref_sel_all,thres): # read in the alignment parameters and the reference angles # 1st column is psi, 2nd is theta, and 3rd is phi align = spider.parse(align_param_file) #align,header = format.read_alignment(align_param_file, ndarray=True) print("Reconstructing %d particles"%len(align)) #assert(header[0]=='id') # read in reference angles refang = spider.parse(ref_ang_file) index = align[:, 0].astype(np.int) #refang, header = format.read_alignment(ref_ang_file, ndarray=True) #assert(header[0]=='id') # from degree to radian from column 1 align[:,1:4] = np.deg2rad(align[:,1:4]) refang[:,1:4] = np.deg2rad(refang[:,1:4]) # read in pref of images iter_single_images = ndimage_file.iter_images(pref_image_in, index) # form unit directional vectors rphi = mcol(refang[:,3]) rtheta = mcol(refang[:,2]) unit_v = get_unitv(rphi,rtheta) # 2-array to track indeces of particles in the same angle bin # Max number of particles in the same angle bin MAX = 5000 index = np.zeros((refang.shape[0],MAX)) # array to track the number of particles in each bin quant = np.zeros((refang.shape[0])) # binning: loop through particles for i, img in enumerate(iter_single_images): # direction of one particle phi = align[i,3] theta = align[i,2] uv = get_unitv(phi,theta) # read in image #print i #img = ndimage_file.read_image(img) if theta > math.pi: img = get_mirror(img) ndimage_file.write_image(pref_image_out, img, i) # multiply with all ref ang and store the largest ip = np.dot(unit_v,uv.T) # store the largest in the right bin bin = ip.argmax() index[bin,quant[bin]] = align[i,0] quant[bin] += 1 #print index # adjust the psi angle rpsi = refang[bin,1] rtheta = refang[bin,2] rphi = refang[bin,3] psi = adjust_psi(rpsi,rtheta,rphi,theta,phi) align[i,1] = psi # loop through the bins and keep only those with more than 'thres' particles S = [] # will hold the selected bin numbers count = 0 for j in range(refang.shape[0]): sz = len(np.nonzero(index[j,:])[0]) if sz > thres: table = index[j,0:sz] #print table filename = pref_sel + '{:05d}'.format(j) spider.write(filename,table) S.append(j) #print S spider.write(pref_sel_all,S)
.. literalinclude:: ../../arachnid/snippets/image/rotate_translate_images.py :language: python :lines: 16- :linenos: ''' from arachnid.core.metadata import format from arachnid.core.metadata import spider_utility from arachnid.core.image import ndimage_file from arachnid.core.image import rotate import itertools import sys if __name__ == '__main__': # Parameters align_file = sys.argv[1] image_file = sys.argv[2] output_file = sys.argv[3] # Read an alignment file align, header = format.read_alignment(align_file, ndarray=True) align[:, 16] -= 1 iter_single_images = ndimage_file.iter_images(image_file, align[:, 15:17]) iter_single_images = itertools.imap(rotate.rotate_image, iter_single_images, align) for i, img in enumerate(iter_single_images): ndimage_file.write_image( spider_utility.spider_filename(output_file, int(align[i, 4])), img)
To run: .. sourcecode:: sh $ python unstack.py .. note:: You must have Arachnid installed to run this script .. literalinclude:: ../../arachnid/snippets/image/unstack.py :language: python :lines: 20- :linenos: ''' from arachnid.core.metadata import spider_utility from arachnid.core.image import ndimage_file if __name__ == '__main__': # Parameters input_stack = "stack_001.dat" output_image = 'image_001_00000.dat' # Script for i, img in enumerate(ndimage_file.iter_images(input_stack)): output_image = spider_utility.spider_filename(output_image, i + 1) ndimage_file.write_image(output_image, img)