示例#1
0
def analyze_pixel(ibw_file, param_file):
    '''
    Analyzes a single pixel
    
    Parameters
    ----------
    ibw_file : str
        path to \*.ibw file
    param_file : str
        path to parameters.cfg file
        
    Returns
    -------
    pixel : Pixel
        The pixel object read and analyzed
    '''
    signal_array = signal(ibw_file)
    n_pixels, params = configuration(param_file)
    pixel = Pixel(signal_array, params=params)

    pixel.analyze()
    pixel.plot()
    plt.xlabel('Time Step')
    plt.ylabel('Freq Shift (Hz)')

    print('tFP is', pixel.tfp, 's')

    return pixel.tfp
示例#2
0
def process_line(args):
    """Wrapper function for line class, used in parallel processing."""

    signal_file, params, n_pixels = args
    signal_array = load.signal(signal_file)

    line_inst = line.Line(signal_array, params, n_pixels)
    tfp, shift, _ = line_inst.analyze()

    return tfp, shift
示例#3
0
文件: load_hdf.py 项目: lindat18/ffta
def createHDF5_file(signal, parm_dict, h5_path='', ds_name='FF_Raw'):
	"""
	Generates the HDF5 file given path to a specific file and a parameters dictionary

	Parameters
	----------
	h5_path : string
		Path to desired h5 file.

	signal : str, ndarray
		Path to the data file to be converted or a workspace array

	parm_dict : dict
		Scan parameters

	Returns
	-------
	h5_path: str
		The filename path to the H5 file create

	"""

	sg = signal

	if 'str' in str(type(signal)):
		sg = load.signal(signal)

	if not any(h5_path):  # if not passed, auto-generate name
		fname = signal.replace('/', '\\')
		h5_path = fname[:-4] + '.h5'
	else:
		fname = h5_path

	hdf = px.ioHDF5(h5_path)
	usid.hdf_utils.print_tree(hdf.file)

	ff_group = px.MicroDataGroup('FF_Group', parent='/')
	root_group = px.MicroDataGroup('/')

	#    fname = fname.split('\\')[-1][:-4]
	sg = px.MicroDataset(ds_name, data=sg, dtype=np.float32, parent=ff_group)

	if 'pnts_per_pixel' not in parm_dict.keys():
		parm_dict['pnts_per_avg'] = signal.shape[1]
		parm_dict['pnts_per_pixel'] = 1
		parm_dict['pnts_per_line'] = parm_dict['num_cols']

	ff_group.addChildren([sg])
	ff_group.attrs = parm_dict

	# Get reference for writing the data
	h5_refs = hdf.writeData(ff_group, print_log=True)

	hdf.flush()
示例#4
0
def main(argv=None):
    """Main function of the executable file."""
    logging.basicConfig(filename='error.log', level=logging.INFO)

    # Get the CPU count to display in help.
    cpu_count = multiprocessing.cpu_count()

    if argv is None:
        argv = sys.argv[1:]

    # Parse arguments from the command line, and print out help.
    parser = ap.ArgumentParser(description='Analysis software for FF-trEFM')
    parser.add_argument('path', nargs='?', default=os.getcwd(),
                        help='path to directory')
    parser.add_argument('-p', help='parallel computing option should be'
                        'followed by the number of CPUs.', type=int,
                        choices=range(2, cpu_count + 1))
    parser.add_argument('-v', action='version',
                        version='FFtr-EFM 2.0 Release Candidate')
    args = parser.parse_args(argv)


    # Scan the path for .ibw and .cfg files.
    path = args.path
    filelist = os.listdir(path)

    data_files = [os.path.join(path, name)
                  for name in filelist if name[-3:] == 'ibw']

    config_file = [os.path.join(path, name)
                   for name in filelist if name[-3:] == 'cfg'][0]

    # Load parameters from .cfg file.
    n_pixels, parameters = load.configuration(config_file)

    print('Recombination: ', parameters['recombination'])
    if 'phase_fitting' in parameters:    

        print('Phase fitting: ', parameters['phase_fitting'])

    print( 'ROI: ', parameters['roi'])

    if not args.p:

        # Initialize arrays.
        tfp = np.zeros((len(data_files), n_pixels))
        shift = np.zeros((len(data_files), n_pixels))

        # Initialize plotting.
        plt.ion()

        fig = plt.figure(figsize=(12, 6), tight_layout=True)
        grid = gs.GridSpec(1, 2)
        tfp_ax = plt.subplot(grid[0, 0])
        shift_ax = plt.subplot(grid[0, 1])

        plt.setp(tfp_ax.get_xticklabels(), visible=False)
        plt.setp(tfp_ax.get_yticklabels(), visible=False)
        plt.setp(shift_ax.get_xticklabels(), visible=False)
        plt.setp(shift_ax.get_yticklabels(), visible=False)

        tfp_ax.set_title('tFP Image')
        shift_ax.set_title('Shift Image')

        kwargs = {'origin': 'lower', 'aspect': 'equal'}

        tfp_image = tfp_ax.imshow(tfp * 1e6, cmap='afmhot', **kwargs)
        shift_image = shift_ax.imshow(shift, cmap='cubehelix', **kwargs)
        text = plt.figtext(0.4, 0.1, '')
        plt.show()

        # Load every file in the file list one by one.
        for i, data_file in enumerate(data_files):

            signal_array = load.signal(data_file)
            line_inst = line.Line(signal_array, parameters, n_pixels)
            tfp[i, :], shift[i, :], _ = line_inst.analyze()
#            line_inst = line.Line(signal_array, parameters, n_pixels,fitphase=True)
#            tfpphase[i, :], _, _ = line_inst.analyze()

            tfp_image = tfp_ax.imshow(tfp * 1e6, cmap='inferno', **kwargs)
            shift_image = shift_ax.imshow(shift, cmap='cubehelix', **kwargs)

            tfp_sc = tfp[tfp.nonzero()] * 1e6
            tfp_image.set_clim(vmin=tfp_sc.min(), vmax=tfp_sc.max())

            shift_sc = shift[shift.nonzero()]
            shift_image.set_clim(vmin=shift_sc.min(), vmax=shift_sc.max())

            tfpmean = 1e6 * tfp[i, :].mean()
            tfpstd = 1e6 * tfp[i, :].std()

            string = ("Line {0:.0f}, average tFP (us) ="
                      " {1:.2f} +/- {2:.2f}".format(i + 1, tfpmean, tfpstd))

            text.remove()
            text = plt.figtext(0.35, 0.1, string)

            plt.draw()
            plt.pause(0.0001)

            del line_inst  # Delete the instance to open up memory.

    elif args.p:

        print('Starting parallel processing, using {0:1d} \
               CPUs.'.format(args.p))
        start_time = time.time()  # Keep when it's started.

        # Create a pool of workers.
        pool = multiprocessing.Pool(processes=args.p)

        # Create the iterable and map onto the function.
        n_files = len(data_files)
        iterable = zip(data_files, [parameters] * n_files,
                       [n_pixels] * n_files)
        result = pool.map(process_line, iterable)

        # Do not forget to close spawned processes.
        pool.close()
        pool.join()

        # Unzip the result.
        tfp_list, shift_list = zip(*result)

        # Initialize arrays.
        tfp = np.zeros((n_files, n_pixels))
        shift = np.zeros((n_files, n_pixels))

        # Convert list of arrays to 2D array.
        for i in range(n_files):

            tfp[i, :] = tfp_list[i]
            shift[i, :] = shift_list[i]

        elapsed_time = time.time() - start_time

        print ('It took {0:.1f} seconds.'.format(elapsed_time))

    # Filter bad pixels
    tfp_fixed, _ = badpixels.fix_array(tfp, threshold=2)
    tfp_fixed = np.array(tfp_fixed)

    # Save csv files.
    os.chdir(path)
    np.savetxt('tfp.csv', np.fliplr(tfp).T, delimiter=',')
    np.savetxt('shift.csv', np.fliplr(shift).T, delimiter=',')
    np.savetxt('tfp_fixed.csv', np.fliplr(tfp_fixed).T, delimiter=',')

    return
示例#5
0
def load_FF(data_files, parm_dict, h5_path, verbose=False, loadverbose=True,
            average=True, mirror=True):
    """
    Generates the HDF5 file given path to data_files and parameters dictionary

    Creates a Datagroup FFtrEFM_Group with a single dataset in chunks

    :param data_files: List of the \*.ibw files to be invidually scanned. This is generated
        by load_folder above
    :type data_files: list
        
    :param parm_dict: Scan parameters to be saved as attributes. This is generated
        by load_folder above, or you can pass this explicitly.
    :type parm_dict: dict
        
    :param h5_path:
    :type h5_path : string
        
    :param verbose: Display outputs of each function or not
    :type verbose: bool, optional
        
    :param loadverbose: Whether to print any simple "loading Line X" statements for feedback
    :type loadverbose: bool, optional
        
    :param average: Whether to average each pixel before saving to H5. This saves both time and space
    :type average: bool, optional
        
    :param mirror: Mirrors the data when saving. This parameter is to match the FFtrEFM data
        with the associate topography as FFtrEFM is acquired during a retrace while
        topo is saved during a forward trace
    :type mirror: bool, optional
        
    :returns: The filename path to the H5 file created
    :rtype: str
        
    """

    # Prepare data for writing to HDF
    num_rows = parm_dict['num_rows']
    num_cols = parm_dict['num_cols']
    pnts_per_avg = parm_dict['pnts_per_avg']
    name = 'FF_Raw'

    if average:
        parm_dict['pnts_per_pixel'] = 1
        parm_dict['pnts_per_line'] = num_cols
        name = 'FF_Avg'

    pnts_per_pixel = parm_dict['pnts_per_pixel']
    pnts_per_line = parm_dict['pnts_per_line']

    dt = 1 / parm_dict['sampling_rate']
    def_vec = np.arange(0, parm_dict['total_time'], dt)
    if def_vec.shape[0] != parm_dict['pnts_per_avg']:
        def_vec = def_vec[:-1]
        # warnings.warn('Time-per-point calculation error')

    # To do: Fix the labels/attributes on the relevant data sets
    try:
        hdf = h5py.File(h5_path, 'r+')
    except:
        print('Creating HDF5 file...')
        hdf = h5py.File(h5_path, 'w')

    try:
        ff_group = hdf.create_group('FF_Group')
    except:
        print('Group already exists, creating new one')
        ff_group = usid.hdf_utils.create_indexed_group(hdf['/'], 'FF_Group')

    # Set up the position vectors for the data
    pos_desc = [Dimension('X', 'm', np.linspace(0, parm_dict['FastScanSize'], num_cols * pnts_per_pixel)),
                Dimension('Y', 'm', np.linspace(0, parm_dict['SlowScanSize'], num_rows))]

    spec_desc = [Dimension('Time', 's', np.linspace(0, parm_dict['total_time'], pnts_per_avg))]

    for p in parm_dict:
        ff_group.attrs[p] = parm_dict[p]
    ff_group.attrs['pnts_per_line'] = num_cols

    h5_ff = usid.hdf_utils.write_main_dataset(ff_group,  # parent HDF5 group
                                              (num_rows * num_cols * pnts_per_pixel, pnts_per_avg),
                                              # shape of Main dataset
                                              name,  # Name of main dataset
                                              'Deflection',  # Physical quantity contained in Main dataset
                                              'V',  # Units for the physical quantity
                                              pos_desc,  # Position dimensions
                                              spec_desc,  # Spectroscopic dimensions
                                              dtype=np.float32,  # data type / precision
                                              compression='gzip',
                                              main_dset_attrs=parm_dict)

    pnts_per_line = parm_dict['pnts_per_line']

    # Cycles through the remaining files. This takes a while (~few minutes)
    for k, num in zip(data_files, np.arange(0, len(data_files))):

        if loadverbose:
            fname = k.replace('/', '\\')
            print('####', fname.split('\\')[-1], '####')
            fname = str(num).rjust(4, '0')

        line_file = load.signal(k)

        if average:
            _ll = line.Line(line_file, parm_dict, n_pixels=num_cols, pycroscopy=False)
            _ll = _ll.pixel_wise_avg().T
        else:
            _ll = line_file.transpose()

        f = hdf.file[h5_ff.name]

        if mirror:
            f[pnts_per_line * num:pnts_per_line * (num + 1), :] = np.flipud(_ll[:, :])
        else:
            f[pnts_per_line * num:pnts_per_line * (num + 1), :] = _ll[:, :]

    if verbose == True:
        usid.hdf_utils.print_tree(hdf.file, rel_paths=True)

    hdf.flush()

    return h5_ff
示例#6
0
def load_folder(folder_path='', xy_scansize=[0, 0], file_name='FF_H5',
                textload=False, verbose=False):
    """
    Sets up loading the HDF5 files. Parses the data file list and creates the .H5 file path

    :param folder_path: Path to folder you want to process
    :type folder_path: string
        
    :param xy_scansize: Width by Height in meters (e.g. [8e-6, 4e-6]), if not in parameters file
    :type xy_scansize: 2-float array
        
    :param file_name: Desired file name, otherwise is auto-generated
    :type file_name: str
        
    :param textload:  If you have a folder of .txt instead of .ibw (older files, some synthetic data)
    :type textload: bool, optional
       
    :param verbose: Whether to output the datasets being processed
    :type verbose: bool, optional
        

    :returns: tuple (h5_path, data_files, parm_dict)
        WHERE
        str h5_path is the filename path to the H5 file created
        List data_files is the list of \*.ibw files in the folder to be processed
        dict parm_dict is the dictionary of relevant scan parameters
    """

    if any(xy_scansize) and len(xy_scansize) != 2:
        raise Exception('XY Scan Size must be either empty (in .cfg) or length-2')

    if not any(folder_path):
        folder_path = sidpy.io.interface_utils.openfile_dialog(caption='Select Config File in FF-trEFM folder',
                                                               file_types='Config Files (*.cfg)')
        folder_path = '/'.join(folder_path.split('/')[:-1])

    print(folder_path, 'folder path')
    filelist = sorted(os.listdir(folder_path))

    if textload == False:

        data_files = [os.path.join(folder_path, name)
                      for name in filelist if (name[-3:] == 'ibw' and 'FFtrEFM' in name)]

    else:

        data_files = [os.path.join(folder_path, name)
                      for name in filelist if name[-3:] == 'txt']

    if not data_files:
        raise OSError('No data files found! Are these text files?')

    config_file = [os.path.join(folder_path, name)
                   for name in filelist if name[-3:] == 'cfg'][0]

    n_pixels, parm_dict = load.configuration(config_file)
    parm_dict['num_rows'] = len(data_files)
    parm_dict['num_cols'] = n_pixels

    # Add dimensions if not in the config file
    if 'FastScanSize' not in parm_dict.keys():
        if not any(xy_scansize):
            raise Exception('Need XY Scan Size! Save "Width" and "Height" in Config or pass xy_scansize')

        [width, height] = xy_scansize
        if width > 1e-3:  # if entering as microns
            width = width * 1e-6
            height = height * 1e-6

        parm_dict['FastScanSize'] = width
        parm_dict['SlowScanSize'] = height

    # sometimes use width/height in config files
    if 'width' in parm_dict.keys():
        parm_dict['FastScanSize'] = width
        parm_dict['SlowScanSize'] = height

    # Check ratio is correct
    ratio = np.round(parm_dict['FastScanSize'] * 1e6, 4) / np.round(parm_dict['SlowScanSize'] * 1e6, 4)
    if n_pixels / len(data_files) != ratio:
        print(ratio)
        print(parm_dict['FastScanSize'], parm_dict['SlowScanSize'],
              n_pixels / len(data_files),
              len(data_files))
        raise Exception('X-Y Dimensions do not match filelist. Add manually to config file. Check n-pixels.')

    # add associated dimension info
    #
    # e.g. if a 16000 point signal with 2000 averages and 10 pixels 
    #   (10MHz sampling of a 1.6 ms long signal=16000, 200 averages per pixel)
    # parm_dict['pnts_per_pixel'] = 200 (# signals at each pixel)
    #           ['pnts_per_avg'] = 16000 (# pnts per signal, called an "average")
    #           ['pnts_per_line'] = 2000 (# signals in each line)

    if 'pnts_per_pixel' not in parm_dict.keys():
        print('Loading first signal')
        # Uses first data set to determine parameters
        line_file = load.signal(data_files[0])
        parm_dict['pnts_per_avg'] = int(line_file.shape[0])

        try:
            # for 1 average per pixel, this will fail
            parm_dict['pnts_per_pixel'] = int(line_file.shape[1] / parm_dict['num_cols'])
            parm_dict['pnts_per_line'] = int(line_file.shape[1])
        except:
            parm_dict['pnts_per_pixel'] = 1
            parm_dict['pnts_per_line'] = 1

    folder_path = folder_path.replace('/', '\\')
    if os.path.exists(file_name) == False:
        h5_path = os.path.join(folder_path, file_name) + '.h5'
    else:
        h5_path = file_name

    return h5_path, data_files, parm_dict
示例#7
0
文件: load_hdf.py 项目: lindat18/ffta
def load_pixel_averaged_FF(data_files, parm_dict, h5_path,
						   verbose=False, loadverbose=True, mirror=False):
	"""
	Creates a new group FF_Avg where the raw FF files are averaged together

	This function does not process the Raw data and is more useful when the resulting 
	Raw data matrix is very large (causing memory errors)
	
	This is more useful as pixel-wise averages are more relevant in FF-processing

	This Dataset is (n_pixels*n_rows, n_pnts_per_avg)

	Parameters
	----------
	h5_file : h5py File
		H5 File to be examined. File typically set as h5_file = hdf.file
		hdf = px.ioHDF5(h5_path), h5_path = path to disk

	verbose : bool, optional
		Display outputs of each function or not

	loadverbose : Boolean (optional)
		Whether to print any simple "loading Line X" statements for feedback

	Returns
	-------
	h5_avg : Dataset
		The new averaged Dataset

	"""

	hdf = px.io.HDFwriter(h5_path)

	try:
		ff_avg_group = hdf.file.create_group('FF_Group')
	except:
		ff_avg_group = usid.hdf_utils.create_indexed_group(hdf.file['/'], 'FF_Group')

	try:
		ff_avg_group = hdf.file[ff_avg_group.name].create_group('FF_Avg')
	except:
		ff_avg_group = usid.hdf_utils.create_indexed_group(ff_avg_group, 'FF_Avg')

	num_rows = parm_dict['num_rows']
	num_cols = parm_dict['num_cols']
	pnts_per_avg = parm_dict['pnts_per_avg']
	pnts_per_line = parm_dict['pnts_per_line']
	pnts_per_pixel = parm_dict['pnts_per_pixel']
	parm_dict['pnts_per_pixel'] = 1  # only 1 average per pixel now
	parm_dict['pnts_per_line'] = num_cols  # equivalent now with averaged data
	n_pix = int(pnts_per_line / pnts_per_pixel)
	dt = 1 / parm_dict['sampling_rate']

	# Set up the position vectors for the data
	pos_desc = [Dimension('X', 'm', np.linspace(0, parm_dict['FastScanSize'], num_cols)),
				Dimension('Y', 'm', np.linspace(0, parm_dict['SlowScanSize'], num_rows))]
	ds_pos_ind, ds_pos_val = build_ind_val_dsets(pos_desc, is_spectral=False)

	spec_desc = [Dimension('Time', 's', np.linspace(0, parm_dict['total_time'], pnts_per_avg))]
	ds_spec_inds, ds_spec_vals = build_ind_val_dsets(spec_desc, is_spectral=True)

	for p in parm_dict:
		ff_avg_group.attrs[p] = parm_dict[p]
	ff_avg_group.attrs['pnts_per_line'] = num_cols  # to change number of pnts in a line
	ff_avg_group.attrs['pnts_per_pixel'] = 1  # to change number of pnts in a pixel

	h5_avg = usid.hdf_utils.write_main_dataset(ff_avg_group,  # parent HDF5 group
											   (num_rows * num_cols, pnts_per_avg),  # shape of Main dataset
											   'FF_Avg',  # Name of main dataset
											   'Deflection',  # Physical quantity contained in Main dataset
											   'V',  # Units for the physical quantity
											   pos_desc,  # Position dimensions
											   spec_desc,  # Spectroscopic dimensions
											   dtype=np.float32,  # data type / precision
											   compression='gzip',
											   main_dset_attrs=parm_dict)

	# Generates a line from each data file, averages, then saves the data

	for k, n in zip(data_files, np.arange(0, len(data_files))):

		if loadverbose:
			fname = k.replace('/', '\\')
			print('####', fname.split('\\')[-1], '####')
			fname = str(n).rjust(4, '0')

		line_file = load.signal(k)

		_ll = line.Line(line_file, parm_dict, n_pixels=n_pix, pycroscopy=False)
		_ll = _ll.pixel_wise_avg().T

		if mirror:
			h5_avg[n * num_cols:(n + 1) * num_cols, :] = np.flipud(_ll[:, :])
		else:
			h5_avg[n * num_cols:(n + 1) * num_cols, :] = _ll[:, :]

	if verbose == True:
		usid.hdf_utils.print_tree(hdf.file, rel_paths=True)
		h5_avg = usid.hdf_utils.find_dataset(hdf.file, 'FF_Avg')[0]

		print('H5_avg of size:', h5_avg.shape)

	hdf.flush()

	return h5_avg