def main(): def str2bool(v): return v.lower() in ("yes", "true", "t", "1") description = 'Convert single image xrm files into single image hdf5 ' \ 'files' parser = argparse.ArgumentParser(description=description, formatter_class=RawTextHelpFormatter) parser.register('type', 'bool', str2bool) parser.add_argument('txm_txt_script', metavar='txm_txt_script', type=str, help='TXM txt script used to create the ' 'xrm files') parser.add_argument('-s', '--subfolders', type='bool', default='False', help='- If True: Use subfolders for indexing\n' '- If False: Use general folder for indexing\n' '(default: False)') parser.add_argument('-c', '--cores', type=int, default=-2, help='Number of cores used for the format conversion\n' '(default is max of available CPUs: -1)') parser.add_argument('-u', '--update_db', type='bool', default='True', help='Update DB with hdf5 records\n' '(default: True)') args = parser.parse_args() db_filename = get_db_path(args.txm_txt_script) create_db(args.txm_txt_script) multiple_xrm_2_hdf5(db_filename, subfolders=args.subfolders, cores=args.cores, update_db=args.update_db)
def main(): """ - Convert from xrm to hdf5 individual image hdf5 files - Copy raw hdf5 to new files for processingtxm2nexuslib/workflows/magnetism.py:54 - Crop borders - Normalize - Create stacks by date, sample, energy and jj position, with multiple angles in each stack """ def str2bool(v): return v.lower() in ("yes", "true", "t", "1") description = "energyscan: many repetition images at different energies." \ "Only one FF used for each energy" parser = argparse.ArgumentParser(description=description, formatter_class=RawTextHelpFormatter) parser.register('type', 'bool', str2bool) parser.add_argument('txm_txt_script', type=str, help=('TXM txt script containing the commands used ' + 'to perform the image acquisition by the ' + 'BL09 TXM microscope')) parser.add_argument('--crop', type='bool', default='True', help='- If True: Crop images\n' '- If False: Do not crop images\n' '(default: True)') parser.add_argument('--table_for_stack', type=str, default='hdf5_averages', help=("DB table of image files to create the stacks" + "(default: hdf5_averages)")) parser.add_argument("--db", type=str2bool, nargs='?', const=True, default=True, help='- If True: Create database\n' '- If False: Do not create db\n' '(default: True)') parser.add_argument('--e', type=float, nargs="*", default=[], help='Energy to pre-process data') parser.add_argument('--stack', type='bool', nargs='?', const=True, default=True, help='- If True: Calculate stack\n' '- If False: Do not calculate stack\n' '(default: True)') args = parser.parse_args() print("\nWorkflow for energyscan experiments:\n" + "xrm -> hdf5 -> crop -> normalize -> align for same energy, " " and variable repetition ->" " average all images with same energy ->" " make normalized stacks") start_time = time.time() # Align and average by repetition variable = "repetition" db_filename = get_db_path(args.txm_txt_script) query = Query() if args.db: create_db(args.txm_txt_script) if args.e is not None: if len(args.e) == 0: partial_preprocesing_escan(db_filename, variable, crop=args.crop) # Average multiple hdf5 files: # working with many single images files average_image_groups(db_filename, variable=variable, jj=False) else: query_impl = (query.energy==args.e[0]) partial_preprocesing_escan(db_filename, variable, crop=args.crop, query=query_impl) # Average multiple hdf5 files: # working with many single images files average_image_group_by_energy(db_filename, variable=variable, energy=args.e[0]) if args.stack: # Build up hdf5 stacks from individual images. # Stacks of variable energy: spectrocopy stacks (energyscan) many_images_to_h5_stack(db_filename, table_name=args.table_for_stack, type_struct="normalized_spectroscopy", suffix="_specnorm") print("spectrocopy preprocessing took %d seconds\n" % (time.time() - start_time))
def main(): """ - Convert from xrm to hdf5 individual image hdf5 files - Copy raw hdf5 to new files for processing - Crop borders - Normalize - Create stacks by date, sample, energy and zpz, with multiple angles in each stack """ def str2bool(v): return v.lower() in ("yes", "true", "t", "1") description = "xtend: eXTEnded Depth of field" parser = argparse.ArgumentParser(description=description, formatter_class=RawTextHelpFormatter) parser.register('type', 'bool', str2bool) parser.add_argument('txm_txt_script', type=str, help=('TXM txt script containing the commands used ' + 'to perform the image acquisition by the ' + 'BL09 TXM microscope')) parser.add_argument('--crop', type='bool', default='True', help='- If True: Crop images\n' '- If False: Do not crop images\n' '(default: True)') parser.add_argument('--table_for_stack', type=str, default='hdf5_averages', help=("DB table of image files to create the stacks" + "(default: hdf5_averages)")) parser.add_argument('-z', '--stacks_zp', type='bool', default='True', help="Create individual ZP stacks\n" "(default: True)") args = parser.parse_args() print("\nWorkflow with Extended Depth of Field:\n" + "xrm -> hdf5 -> crop -> normalize -> align for same angle and" + " variable zpz -> average all images with same angle ->" + " make normalized stacks") start_time = time.time() db_filename = get_db_path(args.txm_txt_script) create_db(args.txm_txt_script) # Multiple xrm 2 hdf5 files: working with many single images files multiple_xrm_2_hdf5(db_filename) # Copy of multiple hdf5 raw data files to files for processing copy2proc_multiple(db_filename) # Multiple files hdf5 images crop: working with single images files if args.crop: crop_images(db_filename) # Normalize multiple hdf5 files: working with many single images files normalize_images(db_filename) if args.stacks_zp: many_images_to_h5_stack(db_filename, table_name="hdf5_proc", type_struct="normalized", suffix="_stack") # Align multiple hdf5 files: working with many single images files align_images(db_filename, align_method='cv2.TM_SQDIFF_NORMED') # Average multiple hdf5 files: working with many single images files average_image_groups(db_filename) # Build up hdf5 stacks from individual images many_images_to_h5_stack(db_filename, table_name=args.table_for_stack, type_struct="normalized_multifocus", suffix="_FS") print("xtendof took %d seconds\n" % (time.time() - start_time))
def main(): """ - Convert from xrm to hdf5 individual image hdf5 files - Copy raw hdf5 to new files for processing - Crop borders - Normalize - Create stacks by date, sample, energy and zpz, with multiple angles in each stack """ def str2bool(v): return v.lower() in ("yes", "true", "t", "1") description = "pre-processing for biological samples" parser = argparse.ArgumentParser(description=description, formatter_class=RawTextHelpFormatter) parser.register('type', 'bool', str2bool) parser.add_argument('txm_txt_script', type=str, help=('TXM txt script containing the commands used ' + 'to perform the image acquisition by the ' + 'BL09 TXM microscope')) parser.add_argument('--crop', type='bool', default='True', help='- If True: Crop images\n' '- If False: Do not crop images\n' '(default: True)') parser.add_argument('--table_for_stack', type=str, default='hdf5_averages', help=("DB table of image files to create the stacks" + "(default: hdf5_averages)")) parser.add_argument('-z', '--stacks_zp', type='bool', default='True', help="Create individual ZP stacks\n" "(default: True)") parser.add_argument('-m', '--hdf_to_mrc', type='bool', default='True', help="Convert FS hdf5 to mrc") parser.add_argument('--db', type=str2bool, nargs='?', const=True, default=False, help='- If True: Create database\n' '- If False: Do not create db\n' '(default: False)') parser.add_argument('--id', type=float, help='- ID of the record in DB\n') args = parser.parse_args() print("\nWorkflow with Extended Depth of Field:\n" + "xrm -> hdf5 -> crop -> normalize -> align for same angle and" + " variable zpz -> average all images with same angle ->" + " make normalized stacks") start_time = time.time() # Align and average by repetition # variable = "sample" db_filename = get_db_path(args.txm_txt_script) query = Query() if args.db: create_db(args.txm_txt_script) if args.id: db = get_db(args.txm_txt_script, use_existing_db=True) dbsample = db.get(doc_id=args.id) date = dbsample['date'] energy = dbsample['energy'] sample = dbsample['sample'] query_impl = ((query.date == date) & (query.sample == sample) & (query.energy == energy)) partial_preprocesing(db_filename, args.crop, query=query_impl, date=date, sample=sample, energy=energy, stacks_zp=args.stacks_zp, table_name=args.table_for_stack) print("Execution took %d seconds\n" % (time.time() - start_time))
def main(): """ - Convert from xrm to hdf5 individual image hdf5 files - Copy raw hdf5 to new files for processing - Crop borders - Normalize - Create stacks by date, sample, energy and zpz, with multiple angles in each stack """ def str2bool(v): return v.lower() in ("yes", "true", "t", "1") description = "pre-processing for biological samples" parser = argparse.ArgumentParser(description=description, formatter_class=RawTextHelpFormatter) parser.register('type', 'bool', str2bool) parser.add_argument('txm_txt_script', type=str, help=('TXM txt script containing the commands used ' + 'to perform the image acquisition by the ' + 'BL09 TXM microscope')) parser.add_argument('--crop', type='bool', default='True', help='- If True: Crop images\n' '- If False: Do not crop images\n' '(default: True)') parser.add_argument('--table_for_stack', type=str, default='hdf5_averages', help=("DB table of image files to create the stacks" + "(default: hdf5_averages)")) parser.add_argument('-z', '--stacks_zp', type='bool', default='True', help="Create individual ZP stacks\n" "(default: True)") parser.add_argument('-m', '--hdf_to_mrc', type='bool', default='True', help="Convert FS hdf5 to mrc") args = parser.parse_args() print("\nWorkflow with Extended Depth of Field:\n" + "xrm -> hdf5 -> crop -> normalize -> align for same angle and" + " variable zpz -> average all images with same angle ->" + " make normalized stacks") start_time = time.time() db_filename = get_db_path(args.txm_txt_script) create_db(args.txm_txt_script) # Multiple xrm 2 hdf5 files: working with many single images files multiple_xrm_2_hdf5(db_filename) # Copy of multiple hdf5 raw data files to files for processing copy2proc_multiple(db_filename) # Multiple files hdf5 images crop: working with single images files if args.crop: crop_images(db_filename) # Normalize multiple hdf5 files: working with many single images files normalize_images(db_filename) if args.stacks_zp: many_images_to_h5_stack(db_filename, table_name="hdf5_proc", type_struct="normalized", suffix="_stack") # Align multiple hdf5 files: working with many single images files align_images(db_filename, align_method='cv2.TM_SQDIFF_NORMED') # Average multiple hdf5 files: working with many single images files average_image_groups(db_filename) # Build up hdf5 stacks from individual images many_images_to_h5_stack(db_filename, table_name=args.table_for_stack, type_struct="normalized_multifocus", suffix="_FS") """Convert FS stacks from hdf5 to mrc""" if args.hdf_to_mrc: db = TinyDB(db_filename) stack_table = db.table("hdf5_stacks") print("Converting FS stacks from hdf5 to mrc") for record in stack_table.all(): fn_hdf5 = record["filename"] raw_fname, _ = os.path.splitext(fn_hdf5) mrc_file = raw_fname + '.mrc' image_convert = "scipion xmipp_image_convert" tree_hdf5 = 'TomoNormalized/TomoNormalized@' + fn_hdf5 command = image_convert + " -i " + tree_hdf5 + " -o " + mrc_file print(command) print("converting: " + mrc_file) os.system(command) time.sleep(2) """ -------------------------------- """ print("Execution took %d seconds\n" % (time.time() - start_time))
def main(): """ - Convert from xrm to hdf5 individual image hdf5 files - Copy raw hdf5 to new files for processingtxm2nexuslib/workflows/magnetism.py:54 - Crop borders - Normalize - Create stacks by date, sample, energy and jj position, with multiple angles in each stack """ def str2bool(v): return v.lower() in ("yes", "true", "t", "1") description = "magnetism: many repetition images at different angles." \ " Normally using 2 different polarizations by setting the" \ " JJ positions to change to circular left and right" \ " polarizations" parser = argparse.ArgumentParser(description=description, formatter_class=RawTextHelpFormatter) parser.register('type', 'bool', str2bool) parser.add_argument('txm_txt_script', type=str, help=('TXM txt script containing the commands used ' + 'to perform the image acquisition by the ' + 'BL09 TXM microscope')) parser.add_argument('--crop', type='bool', default='True', help='- If True: Crop images\n' '- If False: Do not crop images\n' '(default: True)') parser.add_argument('--table_for_stack', type=str, default='hdf5_averages', help=("DB table of image files to create the stacks" + "(default: hdf5_averages)")) parser.add_argument("--db", type=str2bool, nargs='?', const=True, default=False, help='- If True: Create database\n' '- If False: Do not create db\n' '(default: False)') parser.add_argument("--ff", type=str2bool, nargs='?', const=True, default=False, help='- If True: Pre-process FF images\n' '- If False: Do not pre-process FF images\n' '(default: False)') parser.add_argument('--th', type=float, nargs="*", help=('Angle theta to pre-process data' + ' referred to this angle')) parser.add_argument('--stack', type='bool', nargs='?', const=True, default=False, help='- If True: Calculate stack\n' '- If False: Do not calculate stack\n' '(default: False)') args = parser.parse_args() print("\nWorkflow for magnetism experiments:\n" + "xrm -> hdf5 -> crop -> normalize -> align for same angle, same" " jj position and variable repetition ->" " average all images with same angle and" " same jj position ->" + " make normalized stacks") start_time = time.time() # Align and average by repetition variable = "repetition" db_filename = get_db_path(args.txm_txt_script) query = Query() if args.db: create_db(args.txm_txt_script) if args.ff: partial_preprocesing(db_filename, variable, args.crop, query.FF==True, is_ff=True) if args.th is not None: if len(args.th) == 0: partial_preprocesing(db_filename, variable, args.crop, query.FF==False) # Average multiple hdf5 files: # working with many single images files average_image_groups(db_filename, variable=variable) else: partial_preprocesing(db_filename, variable, args.crop, query.angle==args.th[0]) # Average multiple hdf5 files: # working with many single images files average_image_group_by_angle(db_filename, variable=variable, angle=args.th[0]) if args.stack: # Build up hdf5 stacks from individual images # Stack of variable angle. Each of the images has been done by # averaging many repetitions of the image at the same energy, jj, # angle... The number of repetitions by each of the images in this # stack files could be variable. many_images_to_h5_stack( db_filename, table_name=args.table_for_stack, type_struct="normalized_magnetism_many_repetitions", suffix="_FS") print("magnetism preprocessing took %d seconds\n" % (time.time() - start_time)) """