def open_sgm_xas(sgm_file, scan_num): print "Opening scan", str(scan_num) print "in", sgm_file f = spec.open(sgm_file) scan = f[str(scan_num)] energy_array = scan['Energy'] scaler_array = [[], [], []] scaler_array[0] = scan['TEY'] scaler_array[1] = scan['I0'] scaler_array[2] = scan['Diode'] mcadata = scan['@A1'] print "Parsing MCAs" mca_array = [[], [], [], []] for i in range(0, len(energy_array)): mca_array[0].append(mcadata[i * 4]) mca_array[1].append(mcadata[i * 4 + 1]) mca_array[2].append(mcadata[i * 4 + 2]) mca_array[3].append(mcadata[i * 4 + 3]) print "Done!" return energy_array, mca_array, scaler_array
def open_sgm_map(sgm_file, scan_num): print "Opening scan", str(scan_num) print "in", sgm_file f = spec.open(sgm_file) scan = f[str(scan_num)] hex_x = scan['Hex_XP'] hex_y = scan['Hex_YP'] scaler_array = [[], [], []] scaler_array[0] = scan['TEY'] scaler_array[1] = scan['I0'] scaler_array[2] = scan['Diode'] print "Parsing MCAs" mcadata = scan['@A1'] mca_array = [[], [], [], []] for i in range(0, len(hex_x)): mca_array[0].append(mcadata[i * 4]) mca_array[1].append(mcadata[i * 4 + 1]) mca_array[2].append(mcadata[i * 4 + 2]) mca_array[3].append(mcadata[i * 4 + 3]) print "Done!" return hex_x, hex_y, mca_array, scaler_array, scan_num
def open_sgm_map(sgm_file, scan_num): print "Opening scan", str(scan_num) print "in", sgm_file f = spec.open(sgm_file) scan=f[str(scan_num)] hex_x = scan['Hex_XP'] hex_y = scan['Hex_YP'] scaler_array = [[],[],[]] scaler_array[0] = scan['TEY'] scaler_array[1] = scan['I0'] scaler_array[2] = scan['Diode'] print "Parsing MCAs" mcadata = scan['@A1'] mca_array = [[],[],[],[]] for i in range(0,len(hex_x)): mca_array[0].append(mcadata[i*4]) mca_array[1].append(mcadata[i*4 + 1]) mca_array[2].append(mcadata[i*4 + 2]) mca_array[3].append(mcadata[i*4 + 3]) print "Done!" return hex_x, hex_y, mca_array, scaler_array, scan_num
def open_sgm_xas(sgm_file, scan_num): print "Opening scan", str(scan_num) print "in", sgm_file f = spec.open(sgm_file) scan=f[str(scan_num)] energy_array = scan['Energy'] scaler_array = [[],[],[]] scaler_array[0] = scan['TEY'] scaler_array[1] = scan['I0'] scaler_array[2] = scan['Diode'] mcadata = scan['@A1'] print "Parsing MCAs" mca_array = [[],[],[],[]] for i in range(0,len(energy_array)): mca_array[0].append(mcadata[i*4]) mca_array[1].append(mcadata[i*4 + 1]) mca_array[2].append(mcadata[i*4 + 2]) mca_array[3].append(mcadata[i*4 + 3]) print "Done!" return energy_array, mca_array, scaler_array
def convert_to_phynx( spec_filename, h5_filename=None, force=False, report=False ): """convert a spec data file to phynx and return the phynx file object""" if report: print 'Converting spec file %s to phynx'% spec_filename if h5_filename is None: h5_filename = spec_filename + '.h5' if os.path.exists(h5_filename) and force==False: raise IOError( '%s already exists! Use "force" flag to overwrite'%h5_filename ) if report: print 'making file %s'% h5_filename h5_file = open(h5_filename, 'w') spec_file = spec.open(spec_filename) for scan in spec_file.values(): if len(scan.values()[0]): convert_scan(scan, h5_file, spec_filename, report=report) if report: print 'phynx %s complete'% h5_file return h5_file
def setUp(self): self.f = spec.open(self.file_name, lock=True)
def open_spec_data_file(file_directory): opened_file = spec.open(file_directory) return opened_file
def convert_scan(scan, h5file, spec_filename, report=False): # access a bunch of metadata before creating an hdf5 group # if specfile raises an error because the scan is empty, # we will skip it and move on to the next if report: print 'converting scan #%s'% scan.name scan_info = get_scan_metadata(scan.attrs['command'].split()) labels = [label.lower() for label in scan.keys()] # We need to update time metadata if it was a tseries: if scan_info['scan_type'] == 'tseries': scan_info['scan_shape'] = np.array([len(scan.values()[0])]) t = scan['Time'][:] scan_info['axis_info']['time']['range'] = str((t.min(), t.max())) # We need to update time metadata if it was a chess_escan: if scan_info['scan_type'] == 'chess_escan': scan_info['scan_shape'] = np.array([len(scan.values()[0])]) e = scan['Energy'][:] scan_info['axis_info']['energy']['range'] = str((e.min(), e.max())) attrs = {} attrs['acquisition_name'] = scan.name attrs['acquisition_id'] = scan.id attrs['npoints'] = len(scan.values()[0]) attrs['acquisition_command'] = scan.attrs['command'] attrs['source_file'] = scan.attrs['file_origin'] if len(scan_info['scan_shape']) < 2: if scan_info['scan_shape'] < 1: # an open-ended scan scan_info['scan_shape'] = np.array([len(scan.values()[0])]) attrs['acquisition_shape'] = str(tuple(scan_info['scan_shape'])) entry = h5file.create_group(scan.id, type='Entry', **attrs) measurement = entry.create_group('measurement', type='Measurement') positioners = measurement.create_group('positioners', type='Positioners') for motor, pos in scan.attrs['positions'].items(): try: positioners[motor] = pos except ValueError: if report: print ( """ Invalid spec motor configuration: "%s" is used to describe more than one positioner. Only the first occurance will be saved. Please report the problem to your beamline scientist """ % motor ) attrs = {} monitor = scan.attrs['monitor'] if monitor: attrs['monitor'] = monitor scalar_data = measurement.create_group( 'scalar_data', type='ScalarData', **attrs ) skipmode = [i for i in scan.attrs['comments'] if i.startswith('SKIPMODE')] if not skipmode: skipmode = [i for i in scan.attrs['user_comments'] if i.startswith('SKIPMODE')] if skipmode: mon, thresh = skipmode[0].split()[2:] thresh = int(thresh) skipped = scan[mon][:] < thresh kwargs = {'class':'Signal', 'counter':mon, 'threshold':thresh} masked = scalar_data.create_dataset( 'masked', dtype='uint8', data=skipped.astype('uint8'), **kwargs ) else: masked = None allmotors = scan.attrs['positions'].keys() for key, val in scan.items(): if key.startswith('@') or key in scalar_data: continue val = val[:] if (key in allmotors) \ or (key.lower() in ('energy', 'time', 'h', 'k', 'l', 'q')): kwargs = {'class':'Axis'} kwargs.update( scan_info['axis_info'].get(key.lower(), {}) ) dset = scalar_data.create_dataset( key, data=val, dtype='float32', **kwargs ) elif key.lower() == 'epoch': kwargs = {'class':'Axis'} dset = scalar_data.create_dataset( key, data=val+scan.attrs['epoch_offset'], dtype='float64', **kwargs ) else: kwargs = {'class':'Signal'} dset = scalar_data.create_dataset( key, data=val, dtype='float32', **kwargs ) # the last column should always be the primary counter scalar_data[scan.attrs['labels'][-1]].attrs['signal'] = 1 # and dont forget to include the index kwargs = {'class':'Axis'} dset = scalar_data.create_dataset( 'i', data=np.arange(len(dset)), dtype='i', **kwargs ) # process mca device files: if [i for i in scan.keys() if i.startswith('@')]: process_mca(scan, measurement, masked=masked) # we need to integrate external data files after processing the scan # in the main file, since we may reference some of that data dir, spec_filename = os.path.split(spec_filename) if not dir: dir = os.getcwd() for f in sorted(os.listdir(dir)): if ( f.startswith(spec_filename+'.scan%s.'%scan.name) and f.endswith('.mca') ): f = os.path.join(dir, f) if report: print 'integrating %s'%f process_mca( spec.open(f)[scan.id], measurement, masked=masked ) elif ( f.startswith(spec_filename+'_scan%03d_'%(int(scan.name))) and f.endswith('.tiff') ): from praxes.io.tifffile import TIFFfile f = os.path.join(dir, f) try: d = TIFFfile(f).asarray() except ValueError: continue r, c = d.shape if 'area_detector' not in measurement: measurement.create_group('area_detector', type='AreaDetector') ad = measurement['area_detector'] if 'counts' not in ad: ad.create_dataset( 'counts', shape=(len(scan.data), r, c), dtype='uint32', maxshape=(None, r, c) ) dset = ad['counts'] i = os.path.split(f)[-1].replace( spec_filename+'_scan%03d_'%(int(scan.name)), '' ) i = int(i.replace('.tiff', '')) try: dset[i] = d except: dset.resize((i+1, r, c)) dset[i] = d del d try: line = [i for i in scan.attrs['comments'] if i.startswith('subexposures')][0] n = int(line.split()[1].split('=')[1]) dset.attrs['subexposures'] = n except IndexError: pass if masked is not None and 'masked' not in ad: ad['masked'] = masked if report: print 'integrated %s' % f gc.collect() elif ( f.startswith(spec_filename+'.%s_'%scan.name) and f.endswith('.mar3450') ): f = os.path.join(dir, f) try: p = subprocess.Popen( ['marcvt', '-raw32', f], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) res = p.wait() raw = p.stdout.readline().split()[-1] d = np.fromfile(raw, dtype='uint32') os.remove(raw) d /= 2 p = int(np.sqrt(len(d))) d.shape = (p, p) if 'mar345' not in measurement: measurement.create_group('mar345', type='Mar345') mar = measurememt['mar345'] if counts not in mar: mar.create_dataset( 'counts', shape=(scan.lines(), p, p), dtype='uint16' ) dset = mar['counts'] i = f.replace(spec_filename+'.%s_'%scan_number, '') i = int(i.replace('.mar3450', '')) dset[i] = d del d if masked is not None and 'masked' not in mar: mar['masked'] = masked if report: print 'integrated %s' % f gc.collect() except (OSError, ValueError): if report: sys.stdout.write( 'Found mar image %s but unable to convert it.\n' % f ) if report: sys.stdout.write( 'marcvt must be installed to do so.\n' )
def setUp(self): self.f = spec.open(self.file_name)