def run_sinogram_generation(args): """Make the sinograms with arguments provided by *args*.""" if not args.height: args.height = determine_shape(args, args.projections)[1] - args.y step = args.y_step * args.pass_size if args.pass_size else args.height starts = range(args.y, args.y + args.height, step) + [args.y + args.height] def generate_partial(append=False): pm = Ufo.PluginManager() graph = Ufo.TaskGraph() sched = Ufo.Scheduler() writer = pm.get_task('write') writer.props.filename = args.output writer.props.append = append sinos = create_sinogram_pipeline(args, graph) graph.connect_nodes(sinos, writer) sched.run(graph) for i in range(len(starts) - 1): args.y = starts[i] args.height = starts[i + 1] - starts[i] generate_partial(append=i != 0)
def lamino(params): """Laminographic reconstruction utilizing all GPUs.""" LOG.info('Z parameter: {}'.format(params.z_parameter)) if not params.overall_angle: params.overall_angle = 360. LOG.info('Overall angle not specified, using 360 deg') if not params.angle: if params.dry_run: if not params.number: raise ValueError('--number must be specified by --dry-run') num_files = params.number else: num_files = len(get_filenames(params.projections)) if not num_files: raise RuntimeError("No files found in `{}'".format( params.projections)) params.angle = params.overall_angle / num_files * params.step LOG.info('Angle not specified, calculating from ' + '{} projections and step {}: {} deg'.format( num_files, params.step, params.angle)) if not (params.width and params.height): proj_width, proj_height = determine_shape(params, params.projections) if not proj_width: raise RuntimeError("Could not determine width from the input") if not params.number: params.number = int( np.round(np.abs(params.overall_angle / params.angle))) if not params.width: params.width = proj_width if not params.height: params.height = proj_height - params.y if params.dry_run: LOG.info('Dummy data W x H x N: {} x {} x {}'.format( params.width, params.height, params.number)) params.projection_filter_scale = np.sin(np.deg2rad(params.lamino_angle)) # For now we need to make a workaround for the memory leak, which means we need to execute # the passes in separate processes to clean up the low level code. For that we also need to # call the region-splitting in a separate function. # TODO: Simplify after the memory leak fix! queue = Queue() proc = Process(target=_create_runs, args=( params, queue, )) proc.start() proc.join() x_region, y_region, regions, num_gpus = queue.get() for i in range(0, len(regions), num_gpus): z_subregion = regions[i:min(i + num_gpus, len(regions))] LOG.info('Computing slices {}..{}'.format(z_subregion[0][0], z_subregion[-1][1])) proc = Process(target=_run, args=(params, x_region, y_region, z_subregion, i / num_gpus)) proc.start() proc.join()
def create_preprocessing_pipeline(args, graph, source=None, processing_node=None): pm = Ufo.PluginManager() if not (args.width and args.height): width, height = determine_shape(args, args.projections) if not width: raise RuntimeError("Could not determine width from the input") if not args.width: args.width = width if not args.height: args.height = height - args.y LOG.debug('Image width x height: %d x %d', args.width, args.height) if source: current = source elif args.darks and args.flats: current = create_flat_correct_pipeline(args, graph, processing_node=processing_node) else: current = get_task('read') set_node_props(current, args) if not args.projections: raise RuntimeError('--projections not set') setup_read_task(current, args.projections, args) if args.absorptivity: absorptivity = get_task('calculate', processing_node=processing_node) absorptivity.props.expression = '-log(v)' graph.connect_nodes(current, absorptivity) current = absorptivity if args.transpose_input: transpose = get_task('transpose') graph.connect_nodes(current, transpose) current = transpose tmp = args.width args.width = args.height args.height = tmp if args.projection_filter != 'none': pf_first, pf_last = create_projection_filtering_pipeline( args, graph, processing_node=processing_node) graph.connect_nodes(current, pf_first) current = pf_last if args.energy is not None and args.propagation_distance is not None: pr_first, pr_last = create_phase_retrieval_pipeline( args, graph, processing_node=processing_node) graph.connect_nodes(current, pr_first) current = pr_last return current
def lamino(params): """Laminographic reconstruction utilizing all GPUs.""" LOG.info('Z parameter: {}'.format(params.z_parameter)) if not params.overall_angle: params.overall_angle = 360. LOG.info('Overall angle not specified, using 360 deg') if not params.angle: if params.dry_run: if not params.number: raise ValueError('--number must be specified by --dry-run') num_files = params.number else: num_files = len(get_filenames(params.projections)) if not num_files: raise RuntimeError("No files found in `{}'".format(params.projections)) params.angle = params.overall_angle / num_files * params.step LOG.info('Angle not specified, calculating from ' + '{} projections and step {}: {} deg'.format(num_files, params.step, params.angle)) if not (params.width and params.height): proj_width, proj_height = determine_shape(params, params.projections) if not proj_width: raise RuntimeError("Could not determine width from the input") if not params.number: params.number = int(np.round(np.abs(params.overall_angle / params.angle))) if not params.width: params.width = proj_width if not params.height: params.height = proj_height - params.y if params.dry_run: LOG.info('Dummy data W x H x N: {} x {} x {}'.format(params.width, params.height, params.number)) # For now we need to make a workaround for the memory leak, which means we need to execute # the passes in separate processes to clean up the low level code. For that we also need to # call the region-splitting in a separate function. # TODO: Simplify after the memory leak fix! queue = Queue() proc = Process(target=_create_runs, args=(params, queue,)) proc.start() proc.join() x_region, y_region, regions, num_gpus = queue.get() for i in range(0, len(regions), num_gpus): z_subregion = regions[i:min(i + num_gpus, len(regions))] LOG.info('Computing slices {}..{}'.format(z_subregion[0][0], z_subregion[-1][1])) proc = Process(target=_run, args=(params, x_region, y_region, z_subregion, i / num_gpus)) proc.start() proc.join()
def prepare_angular_arguments(params): if not params.overall_angle: params.overall_angle = 360. LOG.info('Overall angle not specified, using 360 deg') if not params.angle: if params.dry_run: if not params.number: raise ValueError('--number must be specified by --dry-run') num_files = params.number else: num_files = len(get_filenames(params.projections)) if not num_files: raise RuntimeError("No files found in `{}'".format(params.projections)) params.angle = params.overall_angle / num_files * params.step LOG.info('Angle not specified, calculating from ' + '{} projections and step {}: {} deg'.format(num_files, params.step, params.angle)) determine_shape(params, params.projections, store=True) if not params.number: params.number = int(np.round(np.abs(params.overall_angle / params.angle))) if params.dry_run: LOG.info('Dummy data W x H x N: {} x {} x {}'.format(params.width, params.height, params.number))
def get_projection_reader(params): reader = get_file_reader(params) setup_read_task(reader, params.projections, params) width, height = determine_shape(params, params.projections) return reader, width, height
def create_preprocessing_pipeline(args, graph, source=None, processing_node=None, cone_beam_weight=True, make_reader=True): """If *make_reader* is True, create a read task if *source* is None and no dark and flat fields are given. """ import numpy as np if not (args.width and args.height): width, height = determine_shape(args, args.projections) if not width: raise RuntimeError("Could not determine width from the input") if not args.width: args.width = width if not args.height: args.height = height - args.y LOG.debug('Image width x height: %d x %d', args.width, args.height) current = None if source: current = source elif args.darks and args.flats: current = create_flat_correct_pipeline(args, graph, processing_node=processing_node) else: if make_reader: current = get_task('read') set_node_props(current, args) if not args.projections: raise RuntimeError('--projections not set') setup_read_task(current, args.projections, args) if args.absorptivity: absorptivity = get_task('calculate', processing_node=processing_node) absorptivity.props.expression = 'v <= 0 ? 0.0f : -log(v)' if current: graph.connect_nodes(current, absorptivity) current = absorptivity if args.transpose_input: transpose = get_task('transpose') if current: graph.connect_nodes(current, transpose) current = transpose tmp = args.width args.width = args.height args.height = tmp if cone_beam_weight and not np.all(np.isinf(args.source_position_y)): # Cone beam projection weight LOG.debug('Enabling cone beam weighting') weight = get_task('cone-beam-projection-weight', processing_node=processing_node) weight.props.source_distance = (-np.array(args.source_position_y)).tolist() weight.props.detector_distance = args.detector_position_y weight.props.center_position_x = args.center_position_x or [args.width / 2. + (args.width % 2) * 0.5] weight.props.center_position_z = args.center_position_z or [args.height / 2. + (args.height % 2) * 0.5] weight.props.axis_angle_x = args.axis_angle_x if current: graph.connect_nodes(current, weight) current = weight if args.energy is not None and args.propagation_distance is not None: pr_first, pr_last = create_phase_retrieval_pipeline(args, graph, processing_node=processing_node) if current: graph.connect_nodes(current, pr_first) current = pr_last if args.projection_filter != 'none': pf_first, pf_last = create_projection_filtering_pipeline(args, graph, processing_node=processing_node) if current: graph.connect_nodes(current, pf_first) current = pf_last return current
def create_preprocessing_pipeline(args, graph, source=None, processing_node=None, cone_beam_weight=True, make_reader=True): """If *make_reader* is True, create a read task if *source* is None and no dark and flat fields are given. """ import numpy as np if not (args.width and args.height): width, height = determine_shape(args, args.projections) if not width: raise RuntimeError("Could not determine width from the input") if not args.width: args.width = width if not args.height: args.height = height - args.y LOG.debug('Image width x height: %d x %d', args.width, args.height) current = None if source: current = source elif args.darks and args.flats: current = create_flat_correct_pipeline(args, graph, processing_node=processing_node) else: if make_reader: current = get_task('read') set_node_props(current, args) if not args.projections: raise RuntimeError('--projections not set') setup_read_task(current, args.projections, args) if args.absorptivity: absorptivity = get_task('calculate', processing_node=processing_node) absorptivity.props.expression = 'v <= 0 ? 0.0f : -log(v)' if current: graph.connect_nodes(current, absorptivity) current = absorptivity if args.transpose_input: transpose = get_task('transpose') if current: graph.connect_nodes(current, transpose) current = transpose tmp = args.width args.width = args.height args.height = tmp if cone_beam_weight and not np.all(np.isinf(args.source_position_y)): # Cone beam projection weight LOG.debug('Enabling cone beam weighting') weight = get_task('cone-beam-projection-weight', processing_node=processing_node) weight.props.source_distance = ( -np.array(args.source_position_y)).tolist() weight.props.detector_distance = args.detector_position_y weight.props.center_position_x = args.center_position_x or [ args.width / 2. + (args.width % 2) * 0.5 ] weight.props.center_position_z = args.center_position_z or [ args.height / 2. + (args.height % 2) * 0.5 ] weight.props.axis_angle_x = args.axis_angle_x if current: graph.connect_nodes(current, weight) current = weight if args.energy is not None and args.propagation_distance is not None: pr_first, pr_last = create_phase_retrieval_pipeline( args, graph, processing_node=processing_node) if current: graph.connect_nodes(current, pr_first) current = pr_last if args.projection_filter != 'none': pf_first, pf_last = create_projection_filtering_pipeline( args, graph, processing_node=processing_node) if current: graph.connect_nodes(current, pf_first) current = pf_last return current