def __init__(self, file_names, open_option="r+", use_corrected=False, overwrite=False, batch=False): super(BaseHDF5Analyser, self).__init__() file_name = hadd(file_names) if not os.path.exists(file_name): print("File not found", file_name) raise IOError self.overwrite = overwrite self.batch = batch self.input_file = h5py.File(file_name, open_option) if use_corrected: if corrected_images_group in self.input_file: self.images = self.input_file[corrected_images_group] else: warnings.warn(""" Could not find corrected images! Using the raw ones.""") self.images = self.input_file[raw_images_group] else: try: self.images = self.input_file[raw_images_group] except KeyError: print(raw_images_group, "not found!\n\n") raise self.output_directory = self.input_file.require_group( post_processing_dirname) self.n_images = len(self.images) if self.overwrite and self.output_name() in self.output_directory: del self.output_directory[self.output_name()]
def __init__(self, image_files, flat_files, pixel, roi, steps, periods, extension, overwrite): self.overwrite = overwrite self.image_array = get_projection_stack(image_files, pixel, roi, overwrite) self.flat_image = get_projection_stack(flat_files, pixel, roi, overwrite) open_option = "a" self.input_file = h5py.File(hadd(image_files), open_option) output_names = self.set_names() self.output_directory = self.input_file.require_group( post_processing_dirname) self.export_name = hadd(image_files).replace("hdf5", extension) self.n_steps = steps self.n_periods = periods self.n_flats = self.flat_image.shape[0] // self.n_steps self.extension = extension #Overwrite if necessary if self.overwrite: self.exists_in_file = False for name in output_names: if name in self.output_directory: del self.output_directory[name] self.initialize_reconstruction() else: images = {} for name in output_names: if name in self.output_directory: images[name] = self.output_directory[name] if len(images) == 3: #All three images were saved, don't recalculate them self.absorption_image = self.output_directory[ self.absorption_image_name] self.differential_phase_image = self.output_directory[ self.differential_phase_image_name] self.dark_field_image = self.output_directory[ self.dark_field_image_name] self.exists_in_file = True else: self.exists_in_file = False self.initialize_reconstruction()
'--dataset', metavar='DATASET', nargs='+', default=['postprocessing/stack_pixel_509'], help='dataset(s) in the HDF5 file to be exported') commandline_parser.add_argument('--show', action='store_true', help='show each image.') if __name__ == '__main__': print_version(commandline_parser.prog) args = commandline_parser.parse_args() extension = args.format dataset_names = args.dataset file_name = hadd(args.file) input_file = h5py.File(file_name, "r") n = len(dataset_names) print('saving {0} images:'.format(n)) for i, name in enumerate(dataset_names): try: image_array = input_file[name] except KeyError: print(name, "not found!\n\n") raise output_name = "{0}.{1}".format(name, extension) without_slashes = output_name.replace("/", "_") full_output_path = file_name.replace(".hdf5", "_" + without_slashes) print(full_output_path) plt.imsave(full_output_path, image_array) if args.show:
def __init__(self, extension, file_name, *args, **kwargs): self.extension = extension.lower() self.parent_dir, _ = os.path.splitext(hadd(file_name)) self.image_dir = os.path.join(self.parent_dir, self.extension) super(ImageConverter, self).__init__(file_name, *args, **kwargs)
plt.ylabel("visibility $2 a_1 / a_0$ ($\\%$)") mean_visibility = mean_visibility[0] line = plt.axhline(y=mean_visibility, color='r') plt.legend([line], [ "average visibility: {0:.2f} $\\%$".format(mean_visibility * 100) ]) else: plt.errorbar(np.arange(1, mean_visibility.shape[0] + 1), mean_visibility, yerr=std_dev_visibility, fmt='o') plt.xlim(0, mean_visibility.shape[0] + 1) plt.xlabel("image number") plt.ylabel("average visibility $2 a_1 / a_0$ ($\\%$)") axis.yaxis.set_major_formatter( FuncFormatter(lambda x, pos=0: "{0:.2%}".format(x))) plt.tight_layout() if not args.batch: plt.ion() plt.show() input("Press ENTER to quit.") """Save to hdf5 file""" output_object = np.vstack((pixels, visibility)) output_name = "postprocessing/visibility_{0}".format(args.pixel) output_file_name = hadd(args.file) output_file = h5py.File(output_file_name) if output_name in output_file: del output_file[output_name] output_file.create_dataset(output_name, data=output_object) print("Saved", os.path.join(output_file_name, output_name))