예제 #1
0
def process_line(args):
    """Wrapper function for line class, used in parallel processing."""

    signal_file, params, n_pixels = args
    signal_array = load.signal(signal_file)

    line_inst = line.Line(signal_array, params, n_pixels)
    tfp, shift, _ = line_inst.analyze()

    return tfp, shift
예제 #2
0
파일: polling.py 프로젝트: rajgiriUW/ffta
    def on_created(self, event):
        '''
        When it detects a new file is there, processes the line then saves
        the tfp and shift data. The processes instananeous frequency are not
        saved as this is a live imaging method
        '''
        if not event.is_directory:
            self.loaded = True
            time.sleep(self.wait_per_line)

            path = event.src_path.split('\\')

            signal = pixel_utils.load.signal(event.src_path)
            this_line = line.Line(signal, self.parameters, self.n_pixels)
            self.tfp, self.shift, _ = this_line.analyze()
            print('Analyzed', path[-1], 'tFP avg =', np.mean(self.tfp),
                  ' s; shift =', np.mean(self.shift), 'Hz')
            self.loaded = False
            self.lines_loaded += 1
예제 #3
0
def process_line(args):
    """
    Wrapper function for line class, used in parallel processing.
    
    :param args:
    :type args:
    
    :returns: tuple (tfp, shift)
        WHERE
        [type] tfp is...
        [type] shift is...
    """

    signal_file, params, n_pixels = args
    signal_array = load.signal(signal_file)

    line_inst = line.Line(signal_array, params, n_pixels)
    tfp, shift, _ = line_inst.analyze()

    return tfp, shift
예제 #4
0
def main(argv=None):
    """Main function of the executable file."""
    logging.basicConfig(filename='error.log', level=logging.INFO)

    # Get the CPU count to display in help.
    cpu_count = multiprocessing.cpu_count()

    if argv is None:
        argv = sys.argv[1:]

    # Parse arguments from the command line, and print out help.
    parser = ap.ArgumentParser(description='Analysis software for FF-trEFM')
    parser.add_argument('path', nargs='?', default=os.getcwd(),
                        help='path to directory')
    parser.add_argument('-p', help='parallel computing option should be'
                        'followed by the number of CPUs.', type=int,
                        choices=range(2, cpu_count + 1))
    parser.add_argument('-v', action='version',
                        version='FFtr-EFM 2.0 Release Candidate')
    args = parser.parse_args(argv)


    # Scan the path for .ibw and .cfg files.
    path = args.path
    filelist = os.listdir(path)

    data_files = [os.path.join(path, name)
                  for name in filelist if name[-3:] == 'ibw']

    config_file = [os.path.join(path, name)
                   for name in filelist if name[-3:] == 'cfg'][0]

    # Load parameters from .cfg file.
    n_pixels, parameters = load.configuration(config_file)

    print('Recombination: ', parameters['recombination'])
    if 'phase_fitting' in parameters:    

        print('Phase fitting: ', parameters['phase_fitting'])

    print( 'ROI: ', parameters['roi'])

    if not args.p:

        # Initialize arrays.
        tfp = np.zeros((len(data_files), n_pixels))
        shift = np.zeros((len(data_files), n_pixels))

        # Initialize plotting.
        plt.ion()

        fig = plt.figure(figsize=(12, 6), tight_layout=True)
        grid = gs.GridSpec(1, 2)
        tfp_ax = plt.subplot(grid[0, 0])
        shift_ax = plt.subplot(grid[0, 1])

        plt.setp(tfp_ax.get_xticklabels(), visible=False)
        plt.setp(tfp_ax.get_yticklabels(), visible=False)
        plt.setp(shift_ax.get_xticklabels(), visible=False)
        plt.setp(shift_ax.get_yticklabels(), visible=False)

        tfp_ax.set_title('tFP Image')
        shift_ax.set_title('Shift Image')

        kwargs = {'origin': 'lower', 'aspect': 'equal'}

        tfp_image = tfp_ax.imshow(tfp * 1e6, cmap='afmhot', **kwargs)
        shift_image = shift_ax.imshow(shift, cmap='cubehelix', **kwargs)
        text = plt.figtext(0.4, 0.1, '')
        plt.show()

        # Load every file in the file list one by one.
        for i, data_file in enumerate(data_files):

            signal_array = load.signal(data_file)
            line_inst = line.Line(signal_array, parameters, n_pixels)
            tfp[i, :], shift[i, :], _ = line_inst.analyze()
#            line_inst = line.Line(signal_array, parameters, n_pixels,fitphase=True)
#            tfpphase[i, :], _, _ = line_inst.analyze()

            tfp_image = tfp_ax.imshow(tfp * 1e6, cmap='inferno', **kwargs)
            shift_image = shift_ax.imshow(shift, cmap='cubehelix', **kwargs)

            tfp_sc = tfp[tfp.nonzero()] * 1e6
            tfp_image.set_clim(vmin=tfp_sc.min(), vmax=tfp_sc.max())

            shift_sc = shift[shift.nonzero()]
            shift_image.set_clim(vmin=shift_sc.min(), vmax=shift_sc.max())

            tfpmean = 1e6 * tfp[i, :].mean()
            tfpstd = 1e6 * tfp[i, :].std()

            string = ("Line {0:.0f}, average tFP (us) ="
                      " {1:.2f} +/- {2:.2f}".format(i + 1, tfpmean, tfpstd))

            text.remove()
            text = plt.figtext(0.35, 0.1, string)

            plt.draw()
            plt.pause(0.0001)

            del line_inst  # Delete the instance to open up memory.

    elif args.p:

        print('Starting parallel processing, using {0:1d} \
               CPUs.'.format(args.p))
        start_time = time.time()  # Keep when it's started.

        # Create a pool of workers.
        pool = multiprocessing.Pool(processes=args.p)

        # Create the iterable and map onto the function.
        n_files = len(data_files)
        iterable = zip(data_files, [parameters] * n_files,
                       [n_pixels] * n_files)
        result = pool.map(process_line, iterable)

        # Do not forget to close spawned processes.
        pool.close()
        pool.join()

        # Unzip the result.
        tfp_list, shift_list = zip(*result)

        # Initialize arrays.
        tfp = np.zeros((n_files, n_pixels))
        shift = np.zeros((n_files, n_pixels))

        # Convert list of arrays to 2D array.
        for i in range(n_files):

            tfp[i, :] = tfp_list[i]
            shift[i, :] = shift_list[i]

        elapsed_time = time.time() - start_time

        print ('It took {0:.1f} seconds.'.format(elapsed_time))

    # Filter bad pixels
    tfp_fixed, _ = badpixels.fix_array(tfp, threshold=2)
    tfp_fixed = np.array(tfp_fixed)

    # Save csv files.
    os.chdir(path)
    np.savetxt('tfp.csv', np.fliplr(tfp).T, delimiter=',')
    np.savetxt('shift.csv', np.fliplr(shift).T, delimiter=',')
    np.savetxt('tfp_fixed.csv', np.fliplr(tfp_fixed).T, delimiter=',')

    return
예제 #5
0
def load_FF(data_files, parm_dict, h5_path, verbose=False, loadverbose=True,
            average=True, mirror=True):
    """
    Generates the HDF5 file given path to data_files and parameters dictionary

    Creates a Datagroup FFtrEFM_Group with a single dataset in chunks

    :param data_files: List of the \*.ibw files to be invidually scanned. This is generated
        by load_folder above
    :type data_files: list
        
    :param parm_dict: Scan parameters to be saved as attributes. This is generated
        by load_folder above, or you can pass this explicitly.
    :type parm_dict: dict
        
    :param h5_path:
    :type h5_path : string
        
    :param verbose: Display outputs of each function or not
    :type verbose: bool, optional
        
    :param loadverbose: Whether to print any simple "loading Line X" statements for feedback
    :type loadverbose: bool, optional
        
    :param average: Whether to average each pixel before saving to H5. This saves both time and space
    :type average: bool, optional
        
    :param mirror: Mirrors the data when saving. This parameter is to match the FFtrEFM data
        with the associate topography as FFtrEFM is acquired during a retrace while
        topo is saved during a forward trace
    :type mirror: bool, optional
        
    :returns: The filename path to the H5 file created
    :rtype: str
        
    """

    # Prepare data for writing to HDF
    num_rows = parm_dict['num_rows']
    num_cols = parm_dict['num_cols']
    pnts_per_avg = parm_dict['pnts_per_avg']
    name = 'FF_Raw'

    if average:
        parm_dict['pnts_per_pixel'] = 1
        parm_dict['pnts_per_line'] = num_cols
        name = 'FF_Avg'

    pnts_per_pixel = parm_dict['pnts_per_pixel']
    pnts_per_line = parm_dict['pnts_per_line']

    dt = 1 / parm_dict['sampling_rate']
    def_vec = np.arange(0, parm_dict['total_time'], dt)
    if def_vec.shape[0] != parm_dict['pnts_per_avg']:
        def_vec = def_vec[:-1]
        # warnings.warn('Time-per-point calculation error')

    # To do: Fix the labels/attributes on the relevant data sets
    try:
        hdf = h5py.File(h5_path, 'r+')
    except:
        print('Creating HDF5 file...')
        hdf = h5py.File(h5_path, 'w')

    try:
        ff_group = hdf.create_group('FF_Group')
    except:
        print('Group already exists, creating new one')
        ff_group = usid.hdf_utils.create_indexed_group(hdf['/'], 'FF_Group')

    # Set up the position vectors for the data
    pos_desc = [Dimension('X', 'm', np.linspace(0, parm_dict['FastScanSize'], num_cols * pnts_per_pixel)),
                Dimension('Y', 'm', np.linspace(0, parm_dict['SlowScanSize'], num_rows))]

    spec_desc = [Dimension('Time', 's', np.linspace(0, parm_dict['total_time'], pnts_per_avg))]

    for p in parm_dict:
        ff_group.attrs[p] = parm_dict[p]
    ff_group.attrs['pnts_per_line'] = num_cols

    h5_ff = usid.hdf_utils.write_main_dataset(ff_group,  # parent HDF5 group
                                              (num_rows * num_cols * pnts_per_pixel, pnts_per_avg),
                                              # shape of Main dataset
                                              name,  # Name of main dataset
                                              'Deflection',  # Physical quantity contained in Main dataset
                                              'V',  # Units for the physical quantity
                                              pos_desc,  # Position dimensions
                                              spec_desc,  # Spectroscopic dimensions
                                              dtype=np.float32,  # data type / precision
                                              compression='gzip',
                                              main_dset_attrs=parm_dict)

    pnts_per_line = parm_dict['pnts_per_line']

    # Cycles through the remaining files. This takes a while (~few minutes)
    for k, num in zip(data_files, np.arange(0, len(data_files))):

        if loadverbose:
            fname = k.replace('/', '\\')
            print('####', fname.split('\\')[-1], '####')
            fname = str(num).rjust(4, '0')

        line_file = load.signal(k)

        if average:
            _ll = line.Line(line_file, parm_dict, n_pixels=num_cols, pycroscopy=False)
            _ll = _ll.pixel_wise_avg().T
        else:
            _ll = line_file.transpose()

        f = hdf.file[h5_ff.name]

        if mirror:
            f[pnts_per_line * num:pnts_per_line * (num + 1), :] = np.flipud(_ll[:, :])
        else:
            f[pnts_per_line * num:pnts_per_line * (num + 1), :] = _ll[:, :]

    if verbose == True:
        usid.hdf_utils.print_tree(hdf.file, rel_paths=True)

    hdf.flush()

    return h5_ff
예제 #6
0
파일: load_hdf.py 프로젝트: lindat18/ffta
def load_pixel_averaged_FF(data_files, parm_dict, h5_path,
						   verbose=False, loadverbose=True, mirror=False):
	"""
	Creates a new group FF_Avg where the raw FF files are averaged together

	This function does not process the Raw data and is more useful when the resulting 
	Raw data matrix is very large (causing memory errors)
	
	This is more useful as pixel-wise averages are more relevant in FF-processing

	This Dataset is (n_pixels*n_rows, n_pnts_per_avg)

	Parameters
	----------
	h5_file : h5py File
		H5 File to be examined. File typically set as h5_file = hdf.file
		hdf = px.ioHDF5(h5_path), h5_path = path to disk

	verbose : bool, optional
		Display outputs of each function or not

	loadverbose : Boolean (optional)
		Whether to print any simple "loading Line X" statements for feedback

	Returns
	-------
	h5_avg : Dataset
		The new averaged Dataset

	"""

	hdf = px.io.HDFwriter(h5_path)

	try:
		ff_avg_group = hdf.file.create_group('FF_Group')
	except:
		ff_avg_group = usid.hdf_utils.create_indexed_group(hdf.file['/'], 'FF_Group')

	try:
		ff_avg_group = hdf.file[ff_avg_group.name].create_group('FF_Avg')
	except:
		ff_avg_group = usid.hdf_utils.create_indexed_group(ff_avg_group, 'FF_Avg')

	num_rows = parm_dict['num_rows']
	num_cols = parm_dict['num_cols']
	pnts_per_avg = parm_dict['pnts_per_avg']
	pnts_per_line = parm_dict['pnts_per_line']
	pnts_per_pixel = parm_dict['pnts_per_pixel']
	parm_dict['pnts_per_pixel'] = 1  # only 1 average per pixel now
	parm_dict['pnts_per_line'] = num_cols  # equivalent now with averaged data
	n_pix = int(pnts_per_line / pnts_per_pixel)
	dt = 1 / parm_dict['sampling_rate']

	# Set up the position vectors for the data
	pos_desc = [Dimension('X', 'm', np.linspace(0, parm_dict['FastScanSize'], num_cols)),
				Dimension('Y', 'm', np.linspace(0, parm_dict['SlowScanSize'], num_rows))]
	ds_pos_ind, ds_pos_val = build_ind_val_dsets(pos_desc, is_spectral=False)

	spec_desc = [Dimension('Time', 's', np.linspace(0, parm_dict['total_time'], pnts_per_avg))]
	ds_spec_inds, ds_spec_vals = build_ind_val_dsets(spec_desc, is_spectral=True)

	for p in parm_dict:
		ff_avg_group.attrs[p] = parm_dict[p]
	ff_avg_group.attrs['pnts_per_line'] = num_cols  # to change number of pnts in a line
	ff_avg_group.attrs['pnts_per_pixel'] = 1  # to change number of pnts in a pixel

	h5_avg = usid.hdf_utils.write_main_dataset(ff_avg_group,  # parent HDF5 group
											   (num_rows * num_cols, pnts_per_avg),  # shape of Main dataset
											   'FF_Avg',  # Name of main dataset
											   'Deflection',  # Physical quantity contained in Main dataset
											   'V',  # Units for the physical quantity
											   pos_desc,  # Position dimensions
											   spec_desc,  # Spectroscopic dimensions
											   dtype=np.float32,  # data type / precision
											   compression='gzip',
											   main_dset_attrs=parm_dict)

	# Generates a line from each data file, averages, then saves the data

	for k, n in zip(data_files, np.arange(0, len(data_files))):

		if loadverbose:
			fname = k.replace('/', '\\')
			print('####', fname.split('\\')[-1], '####')
			fname = str(n).rjust(4, '0')

		line_file = load.signal(k)

		_ll = line.Line(line_file, parm_dict, n_pixels=n_pix, pycroscopy=False)
		_ll = _ll.pixel_wise_avg().T

		if mirror:
			h5_avg[n * num_cols:(n + 1) * num_cols, :] = np.flipud(_ll[:, :])
		else:
			h5_avg[n * num_cols:(n + 1) * num_cols, :] = _ll[:, :]

	if verbose == True:
		usid.hdf_utils.print_tree(hdf.file, rel_paths=True)
		h5_avg = usid.hdf_utils.find_dataset(hdf.file, 'FF_Avg')[0]

		print('H5_avg of size:', h5_avg.shape)

	hdf.flush()

	return h5_avg