#Protocols.grain_size_hex(**run_args), #Protocols.grain_size(**run_args), #Protocols.thumbnails(resize=0.5, crop=0.5), ] root_dir = './' if False: # Single directory pattern = '*' data_dir = './Dow 30/' #load_args['scale'] = 1000.0/1008 # nm/pixel source_dir = os.path.join(root_dir, '', data_dir) output_dir = os.path.join(root_dir, '', data_dir, 'analysis') tools.make_dir(output_dir) infiles = glob.glob(source_dir + '/' + pattern + '.jpg') infiles.sort() print('{} infiles'.format(len(infiles))) process.run(infiles, protocols, output_dir=output_dir, force=True, load_args=load_args, run_args=run_args) if True: # Walk directories for cur_root, subdirs, files in os.walk(root_dir):
######################################## run_id = random.randint(0, 16**8) #run_id = 100 run_str = '{:08x}'.format(run_id) #run_comment = 'adhocs' #run_comment = 'spots' run_comment = 'varied_sm' run_dir = '_'.join([run_str, run_comment]) print('Experiment {:d} ({})'.format(run_id, run_dir)) #root_dir = '/home/kyager/BNL/MachineLearning/synthetic/data2/' root_dir = 'synthetic_data/' exp_dir = os.path.join(root_dir, run_dir) tools.make_dir(exp_dir) analysis_dir = os.path.join(root_dir, run_dir, 'analysis', 'thumbnails') tools.make_dir(analysis_dir) analysis_dir = os.path.join(root_dir, run_dir, 'analysis', 'results') tools.make_dir(analysis_dir) analysis_dir = os.path.join(root_dir, run_dir, 'analysis', 'tagimgs') tools.make_dir(analysis_dir) # 1d curve (zg) analysis_dir = os.path.join(root_dir, run_dir, 'analysis', 'oned') tools.make_dir(analysis_dir) tags_experiment, calibration, mask, beam, blocking_experiment = define_experiment( run_id) # Generate a bunch of images
import glob import numpy as np import re from SciAnalysis import tools from SciAnalysis.Data import * #from SciAnalysis.XSAnalysis.Data import * #from SciAnalysis.XSAnalysis import Protocols # Settings ######################################## verbosity = 3 pattern = 'AJ_exp1' # Files to consider source_dir = '../' # The location of the SciAnalysis outputs output_dir = './{}/'.format(pattern) tools.make_dir(output_dir) # Helpers ######################################## filename_re = re.compile( '(.+)_x(-?\d+\.\d+)_yy(-?\d+\.\d+)_.+_(\d+)_saxs\.xml') def parse_filename(filename, verbosity=3): parsed = {'filename': filename} m = filename_re.match(filename) if m: parsed['basename'] = m.groups()[0] parsed['x'] = float(m.groups()[1])
def analysis(folder, step=0): # Files to analyze ######################################## source_dir = './' + folder + '/' output_dir = source_dir + 'analysis/' tools.make_dir(output_dir) # Directory names with '[' or ']' will confused "glob"... source_dir_e = multi_replace(source_dir, {'[': '[[]', ']': '[]]'}) output_dir_e = multi_replace(output_dir, {'[': '[[]', ']': '[]]'}) # Background #infiles = [ 'x003_y010', 'x004_y010' ] #infiles = [ '{}tile_{}.tif'.format(source_dir, infile) for infile in infiles ] pattern = 'tile*' #pattern = 'tile_x001_y005' #pattern = 'tile_x006_y010' #pattern = 'tile_x001_y00*' #pattern = 'tile_x00*_y00*' infiles = glob.glob(os.path.join(source_dir_e, pattern + '.tif')) #infiles = glob.glob(os.path.join(source_dir_e, pattern+'.jpg')) infiles.sort() # Bright image_contrast1 = (0, 1) image_contrast2 = (0.2, 0.8) image_contrast3 = (0.3, 0.7) image_contrast4 = (0.32, 0.6) image_contrast5 = (0.33, 0.55) image_contrast6 = (0.35, 0.48) # Dark image_contrastA = (0.45, 0.7) image_contrastB = (0.5, 0.6) # Using image_contrast = image_contrastA scale_bar_pix, scale_bar_um = 284, 50.0 pixel_size_um = scale_bar_um / scale_bar_pix # 0.176 um/pixel process = Protocols.ProcessorImRGB() run_args = { 'verbosity': 3, 'num_jobs': 5, # Parallel processing } load_args = { 'defer_load': False, 'scale': pixel_size_um # um/pixel } if step <= 1: # Combine images into maps ######################################## load_args['defer_load'] = True protocols = [ Multiple.tile_img(image_contrast=image_contrast, overlap=0.0), Multiple.tile_svg(subdir='../../', subdir_ext='.tif', overlap=0.0), Multiple.average_image(basename='tile', file_extension='.tiff'), ] process.run_multiple_all(basename='tile', infiles=infiles, protocols=protocols, output_dir=output_dir, load_args=load_args, run_args=run_args, force=False) if step <= 5: # Analyze flakes ######################################## load_args['defer_load'] = False protocols = [ Protocols.thumbnails_contrast( resize=0.5, image_contrast=image_contrast, ), #Protocols.find_flakes(image_contrast=image_contrast, background='./background.tif', size_threshold=50, overlays=4), #Protocols.flake_images(image_contrast=image_contrast3, image_contrast2=image_contrastB), ] print('Processing {} infiles...'.format(len(infiles))) if PARALLELLIZE: process.run_parallel(infiles, protocols, output_dir=output_dir, load_args=load_args, run_args=run_args, force=False) else: process.run(infiles, protocols, output_dir=output_dir, load_args=load_args, run_args=run_args, force=True) if step <= 7: # Maps of tagged images ######################################## infiles = glob.glob(output_dir_e + 'find_flakes/tile_x???_y???.png') load_args['defer_load'] = True protocols = [ Multiple.tile_img(image_contrast=image_contrast1, overlap=0.0), Multiple.tile_svg(subdir='../find_flakes/', subdir_ext='.png', overlap=0.0), ] process.run_multiple_all(basename='tile-flakes', infiles=infiles, protocols=protocols, output_dir=output_dir, load_args=load_args, run_args=run_args, force=False) if step <= 10: # Combine results ######################################## infiles = glob.glob(output_dir_e + 'find_flakes/tile_x???_y???.pkl') load_args['defer_load'] = True protocols = [ Multiple.histogram(min_area_pixels=80, interact=False), ] process.run_multiple_all(basename='aggregate', infiles=infiles, protocols=protocols, output_dir=output_dir, load_args=load_args, run_args=run_args, force=False)
def run(self, data, output_dir, **run_args): output_dir = os.path.join(output_dir, data.name) tools.make_dir(output_dir) results = {} if run_args['verbosity'] >= 5: im = PIL.Image.fromarray(np.uint8(data.data)) outfile = self.get_outfile('original', output_dir, ext='.png', ir=True) im.save(outfile) data.set_z_display([None, None, 'gamma', 1.0]) outfile = self.get_outfile('initial', output_dir, ext='.jpg', ir=True) data.plot_image(save=outfile, ztrim=[0, 0], cmap=mpl.cm.bone) # FFT data_fft = data.fft() results.update(data_fft.stats()) # FFT plots if run_args['verbosity'] >= 1: data_fft.set_z_display([None, None, 'gamma', 0.3]) outfile = self.get_outfile('fft', output_dir, ext='.png', ir=True) data_fft.plot(save=outfile, ztrim=[0.3, 0.001], blur=run_args['blur']) if run_args['verbosity'] >= 2: data_fft.set_z_display([None, None, 'gamma', 0.3]) outfile = self.get_outfile('fft_zoom', output_dir, ext='.png', ir=True) x_axis, y_axis = data_fft.xy_axes() if 'q0' in run_args: q_max = run_args['q0'] * 2.0 else: q_max = np.max(x_axis) * 0.25 data_fft.plot(save=outfile, ztrim=[0.3, 0.00004], plot_range=[-q_max, +q_max, -q_max, +q_max], blur=run_args['blur'], plot_buffers=[0, 0, 0, 0]) if run_args['verbosity'] >= 4: outfile = self.get_outfile('fft_zoom_blur', output_dir, ext='.png', ir=True) data_fft.plot( save=outfile, ztrim=[0.5, 0.001], plot_range=[-q_max, +q_max, -q_max, +q_max], blur=1.0, ) if run_args['verbosity'] >= 4: data_fft.set_z_display([None, None, 'linear', 0.3]) outfile = self.get_outfile('fft', output_dir, ext='.png', ir=True) x_axis, y_axis = data_fft.xy_axes() q_max = np.max(x_axis) * 0.5 data_fft.plot_components( save=outfile, plot_range=[-q_max, +q_max, -q_max, +q_max], blur=run_args['blur']) # 1D curve line = data_fft.circular_average() line.x_rlabel = '$q \, (\mathrm{nm}^{-1})$' outfile = self.get_outfile('fft_1d', output_dir, ext='.dat', ir=False) line.save_data(outfile) if 'q0' not in run_args: # Try to find a peak, in order to guess q0 q0 = self.find_q0(line, output_dir, **run_args) run_args['q0'] = q0 run_args['dq'] = q0 * 0.6 # Peak fit near q0 new_results, lines = self.analyze_q0(line, output_dir, **run_args) results.update(new_results) #results['q0']['value'] = 2*np.pi/36 if run_args['verbosity'] >= 2: lines.x_label = 'q' lines.x_rlabel = '$q \, (\mathrm{nm}^{-1})$' lines.y_label = 'I' lines.y_rlabel = r'$\langle I \rangle \, (\mathrm{counts/pixel})$' outfile = self.get_outfile('fft_1d', output_dir, ext='.png', ir=True) plot_range = [ 0, np.max(line.x) * 0.75, np.min(line.y[1:-1]), np.max(line.y[1:-1]) ] lines.plot(save=outfile, ylog=True, plot_range=plot_range) if run_args['verbosity'] >= 3: class DataLine_current(DataLine): def _plot(self, save=None, show=False, plot_range=[None, None, None, None], plot_buffers=[0.2, 0.05, 0.2, 0.05], error=False, error_band=False, xlog=False, ylog=False, xticks=None, yticks=None, dashes=None, **kwargs): # DataLine._plot() plot_args = self.plot_args.copy() plot_args.update(kwargs) self.process_plot_args(**plot_args) self.fig = plt.figure(figsize=(7, 7), facecolor='white') left_buf, right_buf, bottom_buf, top_buf = plot_buffers fig_width = 1.0 - right_buf - left_buf fig_height = 1.0 - top_buf - bottom_buf self.ax = self.fig.add_axes( [left_buf, bottom_buf, fig_width, fig_height]) p_args = dict([(i, plot_args[i]) for i in self.plot_valid_keys if i in plot_args]) self._plot_main(error=error, error_band=error_band, dashes=dashes, **p_args) plt.xlabel(self.x_rlabel) plt.ylabel(self.y_rlabel) if xlog: plt.semilogx() if ylog: plt.semilogy() if xticks is not None: self.ax.set_xticks(xticks) if yticks is not None: self.ax.set_yticks(yticks) # Axis scaling xi, xf, yi, yf = self.ax.axis() if plot_range[0] != None: xi = plot_range[0] if plot_range[1] != None: xf = plot_range[1] if plot_range[2] != None: yi = plot_range[2] if plot_range[3] != None: yf = plot_range[3] self.ax.axis([xi, xf, yi, yf]) self._plot_extra(**plot_args) s = '${:.1f} \, \mathrm{{nm}}$'.format(2. * np.pi / (self.q0)) self.ax.text(xi, yf, s, size=50, color='b', verticalalignment='top', horizontalalignment='left') self.ax.axvline(self.q0, color='b', linewidth=4) self.ax.text(self.q0, yf, '$1$', size=18, color='b', verticalalignment='top', horizontalalignment='left') self.ax.axvline(np.sqrt(3.0) * self.q0, color='b', linewidth=2, dashes=[5, 5]) self.ax.text(np.sqrt(3.0) * self.q0, yf, '$\sqrt{3}$', size=18, color='b', verticalalignment='top', horizontalalignment='left') self.ax.axvline(2.0 * self.q0, color='b', linewidth=2) self.ax.text(2 * self.q0, yf, '$2$', size=18, color='b', verticalalignment='top', horizontalalignment='left') self.ax.axvline(np.sqrt(7.0) * self.q0, color='b', linewidth=2, dashes=[5, 5]) self.ax.text(np.sqrt(7.0) * self.q0, yf, '$\sqrt{7}$', size=18, color='b', verticalalignment='top', horizontalalignment='left') if save: if 'dpi' in plot_args: plt.savefig(save, dpi=plot_args['dpi']) else: plt.savefig(save) if show: self._plot_interact() plt.show() plt.close(self.fig.number) # I*q^2 vs. q (Kratky plot style) line_Iq2 = DataLine_current() line_Iq2.x = line.x line_Iq2.y = line.y * np.square(line_Iq2.x) line_Iq2.y_label = 'Iq^2' line_Iq2.y_rlabel = r'$q^2 \langle I \rangle$' line_Iq2.plot_args['linestyle'] = '-' line_Iq2.plot_args['linewidth'] = 4.0 line_Iq2.plot_args['marker'] = None #line_Iq2.q0 = run_args['q0'] line_Iq2.q0 = results['q0']['value'] if 'q0' in run_args: q_max = np.sqrt(2.0) * run_args['q0'] * 2.0 else: q_max = np.max(line_Iq2.x) * 0.5 q_max = np.sqrt(2.0) * (2 * np.pi / 36) * 2.0 line_Iq2.trim(0, q_max) outfile = self.get_outfile('fft_1d_Iq2', output_dir, ext='.png', ir=True) plot_range = [0, np.max(line_Iq2.x), 0, None] line_Iq2.plot(save=outfile, plot_range=plot_range, plot_buffers=[0, 0, 0, 0]) if run_args['verbosity'] >= 3: # Orientation analysis at q0 q0 = results['q0']['value'] q_spread = results['sigma_q0']['value'] * 3.0 new_results = self.orientation_q0(data_fft, q0, q_spread, output_dir, **run_args) results.update(new_results) if run_args['verbosity'] >= 3: # Fourier-filtered image at q0 q_spread = results['sigma_q0']['value'] * 5.0 self.fourier_filter(data, q0, q_spread, output_dir, **run_args) return results