def __init__(self, x=None, y=None, dx=None, dy=None, lam=None, dlam=None, isSesans=False): """ """ if x is None: x = [] if y is None: y = [] self.isSesans = isSesans PlotData1D.__init__(self, x, y, dx, dy, lam, dlam) LoadData1D.__init__(self, x, y, dx, dy, lam, dlam, isSesans) self.id = None self.list_group_id = [] self.group_id = None self.is_data = True self.path = None self.xtransform = None if self.isSesans: self.xtransform = "x" self.ytransform = None if self.isSesans: self.ytransform = "y" self.title = "" self.scale = None
def __init__(self, x, y, dx=None, dy=None, smearer=None, data=None, lam=None, dlam=None): """ :param smearer: is an object of class QSmearer or SlitSmearer that will smear the theory data (slit smearing or resolution smearing) when set. The proper way to set the smearing object would be to do the following: :: from sas.sascalc.fit.qsmearing import smear_selection smearer = smear_selection(some_data) fitdata1d = FitData1D( x= [1,3,..,], y= [3,4,..,8], dx=None, dy=[1,2...], smearer= smearer) :Note: that some_data _HAS_ to be of class DataLoader.data_info.Data1D Setting it back to None will turn smearing off. """ Data1D.__init__(self, x=x, y=y, dx=dx, dy=dy, lam=lam, dlam=dlam) self.num_points = len(x) self.sas_data = data self.smearer = smearer self._first_unsmeared_bin = None self._last_unsmeared_bin = None # Check error bar; if no error bar found, set it constant(=1) # TODO: Should provide an option for users to set it like percent, # constant, or dy data if dy is None or dy == [] or dy.all() == 0: self.dy = np.ones(len(y)) else: self.dy = np.asarray(dy).copy() ## Min Q-value #Skip the Q=0 point, especially when y(q=0)=None at x[0]. if min(self.x) == 0.0 and self.x[0] == 0 and\ not np.isfinite(self.y[0]): self.qmin = min(self.x[self.x != 0]) else: self.qmin = min(self.x) ## Max Q-value self.qmax = max(self.x) # Range used for input to smearing self._qmin_unsmeared = self.qmin self._qmax_unsmeared = self.qmax # Identify the bin range for the unsmeared and smeared spaces self.idx = (self.x >= self.qmin) & (self.x <= self.qmax) self.idx_unsmeared = (self.x >= self._qmin_unsmeared) \ & (self.x <= self._qmax_unsmeared)
def __init__(self, x, y, dx=None, dy=None, smearer=None, data=None, lam=None, dlam=None): """ :param smearer: is an object of class QSmearer or SlitSmearer that will smear the theory data (slit smearing or resolution smearing) when set. The proper way to set the smearing object would be to do the following: :: from sas.sascalc.data_util.qsmearing import smear_selection smearer = smear_selection(some_data) fitdata1d = FitData1D( x= [1,3,..,], y= [3,4,..,8], dx=None, dy=[1,2...], smearer= smearer) :Note: that some_data _HAS_ to be of class DataLoader.data_info.Data1D Setting it back to None will turn smearing off. """ Data1D.__init__(self, x=x, y=y, dx=dx, dy=dy, lam=lam, dlam=dlam) self.num_points = len(x) self.sas_data = data self.smearer = smearer self._first_unsmeared_bin = None self._last_unsmeared_bin = None # Check error bar; if no error bar found, set it constant(=1) # TODO: Should provide an option for users to set it like percent, # constant, or dy data if dy is None or dy == [] or dy.all() == 0: self.dy = np.ones(len(y)) else: self.dy = np.asarray(dy).copy() ## Min Q-value #Skip the Q=0 point, especially when y(q=0)=None at x[0]. if min(self.x) == 0.0 and self.x[0] == 0 and\ not np.isfinite(self.y[0]): self.qmin = min(self.x[self.x != 0]) else: self.qmin = min(self.x) ## Max Q-value self.qmax = max(self.x) # Range used for input to smearing self._qmin_unsmeared = self.qmin self._qmax_unsmeared = self.qmax # Identify the bin range for the unsmeared and smeared spaces self.idx = (self.x >= self.qmin) & (self.x <= self.qmax) self.idx_unsmeared = (self.x >= self._qmin_unsmeared) \ & (self.x <= self._qmax_unsmeared)
def test_allowed_bins(self): x = np.asarray(np.asarray([0,1,2,3])) y = np.asarray(np.asarray([1,1,1,1])) dy = np.asarray(np.asarray([1,1,1,1])) g = invariant.Guinier() data = Data1D(x=x, y=y, dy=dy) self.assertEqual(g.get_allowed_bins(data), [False, True, True, True]) data = Data1D(x=y, y=x, dy=dy) self.assertEqual(g.get_allowed_bins(data), [False, True, True, True]) data = Data1D(x=dy, y=y, dy=x) self.assertEqual(g.get_allowed_bins(data), [False, True, True, True])
def __str__(self): """ print data """ _str = "%s\n" % LoadData1D.__str__(self) return _str
def read(self, path): """ Load data file @param path: file path @return: Data1D object, or None @raise RuntimeError: when the file can't be opened @raise ValueError: when the length of the data vectors are inconsistent """ if os.path.isfile(path): basename = os.path.basename(path) root, extension = os.path.splitext(basename) if extension.lower() in self.ext: try: input_f = open(path,'r') except : raise RuntimeError, "ascii_reader: cannot open %s" % path buff = input_f.read() lines = buff.split('\n') x = np.zeros(0) y = np.zeros(0) dy = np.zeros(0) output = Data1D(x, y, dy=dy) self.filename = output.filename = basename for line in lines: x = np.append(x, float(line)) output.x = x return output else: raise RuntimeError, "%s is not a file" % path return None
def compute(self): qs = self.extrapolation.x iqs = self.extrapolation.y q = self.data.x background = self.background self.ready(delay=0.0) self.update(msg="Starting Fourier transform.") self.ready(delay=0.0) if self.isquit(): return try: gamma = dct((iqs - background) * qs**2) gamma = gamma / gamma.max() except: self.update(msg="Fourier transform failed.") self.complete(transform=None) return if self.isquit(): return self.update(msg="Fourier transform completed.") xs = np.pi * np.arange(len(qs), dtype=np.float32) / (q[1] - q[0]) / len(qs) transform = Data1D(xs, gamma) self.complete(transform=transform)
def __init__(self, x=None, y=None, dy=None): """ """ if x is None: x = [] if y is None: y = [] PlotTheory1D.__init__(self, x, y, dy) LoadData1D.__init__(self, x, y, dy) self.id = None self.list_group_id = [] self.group_id = None self.is_data = True self.path = None self.xtransform = None self.ytransform = None self.title = "" self.scale = None
def __init__(self, x=None, y=None, dx=None, dy=None): """ """ if x is None: x = [] if y is None: y = [] PlotData1D.__init__(self, x, y, dx, dy) LoadData1D.__init__(self, x, y, dx, dy) self.id = None self.list_group_id = [] self.group_id = None self.is_data = True self.path = None self.xtransform = None self.ytransform = None self.title = "" self.scale = None
def setUp(self): """ Generate a power law distribution. After extrapolating, we will verify that we obtain the scale and m parameters """ self.scale = 1.5 self.m = 3.0 x = np.arange(0.0001, 0.1, 0.0001) y = np.asarray([self.scale * math.pow(q ,-1.0*self.m) for q in x]) dy = y*.1 self.data = Data1D(x=x, y=y, dy=dy)
def setUp(self): """ Generate a Guinier distribution. After extrapolating, we will verify that we obtain the scale and rg parameters """ self.scale = 1.5 self.rg = 30.0 x = np.arange(0.0001, 0.1, 0.0001) y = np.asarray([self.scale * math.exp( -(q*self.rg)**2 / 3.0 ) for q in x]) dy = y*.1 self.data = Data1D(x=x, y=y, dy=dy)
def test_linearization(self): """ Check that the linearization process filters out points that can't be transformed """ x = np.asarray(np.asarray([0,1,2,3])) y = np.asarray(np.asarray([1,1,1,1])) g = invariant.Guinier() data_in = Data1D(x=x, y=y) data_out = g.linearize_data(data_in) x_out, y_out, dy_out = data_out.x, data_out.y, data_out.dy self.assertEqual(len(x_out), 3) self.assertEqual(len(y_out), 3) self.assertEqual(len(dy_out), 3)
def test_error_treatment(self): x = np.asarray(np.asarray([0,1,2,3])) y = np.asarray(np.asarray([1,1,1,1])) # These are all the values of the dy array that would cause # us to set all dy values to 1.0 at __init__ time. dy_list = [ [], None, [0,0,0,0] ] for dy in dy_list: data = Data1D(x=x, y=y, dy=dy) inv = invariant.InvariantCalculator(data) self.assertEqual(len(inv._data.x), len(inv._data.dy)) self.assertEqual(len(inv._data.dy), 4) for i in range(4): self.assertEqual(inv._data.dy[i],1)
def compute_extrapolation(self): """ Extrapolate and interpolate scattering data :return: The extrapolated data """ q = self._data.x iq = self._data.y params, s2 = self._fit_data(q, iq) qs = np.arange(0, q[-1] * 100, (q[1] - q[0])) iqs = s2(qs) extrapolation = Data1D(qs, iqs) return params, extrapolation
def reset_state(self): self.current_dataset = Data1D(np.empty(0), np.empty(0), np.empty(0), np.empty(0)) self.datasets = [] self.raw_data = None self.errors = set() self.logging = [] self.output = [] self.detector = Detector() self.collimation = Collimation() self.aperture = Aperture() self.process = Process() self.source = Source() self.sample = Sample() self.trans_spectrum = TransmissionSpectrum() self.upper = 5 self.lower = 5
def set_data(self, data, scale=1): """ Prepares the data for analysis :return: new_data = data * scale - background """ if data is None: return # Only process data of the class Data1D if not issubclass(data.__class__, Data1D): raise ValueError("Data must be of the type DataLoader.Data1D") # Prepare the data new_data = Data1D(x=data.x, y=data.y) new_data *= scale # Ensure the errors are set correctly if new_data.dy is None or len(new_data.x) != len(new_data.dy) or \ (min(new_data.dy) == 0 and max(new_data.dy) == 0): new_data.dy = np.ones(len(new_data.x)) self._data = new_data
def load_data(filename="98929.txt"): data = np.loadtxt(find(filename), dtype=np.float64) q = data[:,0] iq = data[:,1] return Data1D(x=q, y=iq)
def read(self, path): """ Load data file :param path: file path :return: Data1D object, or None :raise RuntimeError: when the file can't be opened :raise ValueError: when the length of the data vectors are inconsistent """ if os.path.isfile(path): basename = os.path.basename(path) _, extension = os.path.splitext(basename) if self.allow_all or extension.lower() in self.ext: try: # Read in binary mode since GRASP frequently has no-ascii # characters that breaks the open operation input_f = open(path, 'rb') except: raise RuntimeError, "ascii_reader: cannot open %s" % path buff = input_f.read() lines = buff.splitlines() # Arrays for data storage tx = numpy.zeros(0) ty = numpy.zeros(0) tdy = numpy.zeros(0) tdx = numpy.zeros(0) # The first good line of data will define whether # we have 2-column or 3-column ascii has_error_dx = None has_error_dy = None #Initialize counters for data lines and header lines. is_data = False # More than "5" lines of data is considered as actual # data unless that is the only data min_data_pts = 5 # To count # of current data candidate lines candidate_lines = 0 # To count total # of previous data candidate lines candidate_lines_previous = 0 #minimum required number of columns of data lentoks = 2 for line in lines: toks = self.splitline(line) # To remember the # of columns in the current line of data new_lentoks = len(toks) try: if new_lentoks == 1 and not is_data: ## If only one item in list, no longer data raise ValueError elif new_lentoks == 0: ## If the line is blank, skip and continue on ## In case of breaks within data sets. continue elif new_lentoks != lentoks and is_data: ## If a footer is found, break the loop and save the data break elif new_lentoks != lentoks and not is_data: ## If header lines are numerical candidate_lines = 0 candidate_lines_previous = 0 #Make sure that all columns are numbers. for colnum in range(len(toks)): # Any non-floating point values throw ValueError float(toks[colnum]) candidate_lines += 1 _x = float(toks[0]) _y = float(toks[1]) _dx = None _dy = None #If 5 or more lines, this is considering the set data if candidate_lines >= min_data_pts: is_data = True # If a 3rd row is present, consider it dy if new_lentoks > 2: _dy = float(toks[2]) has_error_dy = False if _dy == None else True # If a 4th row is present, consider it dx if new_lentoks > 3: _dx = float(toks[3]) has_error_dx = False if _dx == None else True # Delete the previously stored lines of data candidates if # the list is not data if candidate_lines == 1 and -1 < candidate_lines_previous < min_data_pts and \ is_data == False: try: tx = numpy.zeros(0) ty = numpy.zeros(0) tdy = numpy.zeros(0) tdx = numpy.zeros(0) except: pass if has_error_dy == True: tdy = numpy.append(tdy, _dy) if has_error_dx == True: tdx = numpy.append(tdx, _dx) tx = numpy.append(tx, _x) ty = numpy.append(ty, _y) #To remember the # of columns on the current line # for the next line of data lentoks = new_lentoks candidate_lines_previous = candidate_lines except ValueError: # It is data and meet non - number, then stop reading if is_data == True: break lentoks = 2 has_error_dx = None has_error_dy = None #Reset # of lines of data candidates candidate_lines = 0 except: pass input_f.close() if not is_data: return None # Sanity check if has_error_dy == True and not len(ty) == len(tdy): msg = "ascii_reader: y and dy have different length" raise RuntimeError, msg if has_error_dx == True and not len(tx) == len(tdx): msg = "ascii_reader: y and dy have different length" raise RuntimeError, msg # If the data length is zero, consider this as # though we were not able to read the file. if len(tx) == 0: raise RuntimeError, "ascii_reader: could not load file" #Let's re-order the data to make cal. # curve look better some cases ind = numpy.lexsort((ty, tx)) x = numpy.zeros(len(tx)) y = numpy.zeros(len(ty)) dy = numpy.zeros(len(tdy)) dx = numpy.zeros(len(tdx)) output = Data1D(x, y, dy=dy, dx=dx) self.filename = output.filename = basename for i in ind: x[i] = tx[ind[i]] y[i] = ty[ind[i]] if has_error_dy == True: dy[i] = tdy[ind[i]] if has_error_dx == True: dx[i] = tdx[ind[i]] # Zeros in dx, dy if has_error_dx: dx[dx == 0] = _ZERO if has_error_dy: dy[dy == 0] = _ZERO #Data output.x = x[x != 0] output.y = y[x != 0] output.dy = dy[x != 0] if has_error_dy == True\ else numpy.zeros(len(output.y)) output.dx = dx[x != 0] if has_error_dx == True\ else numpy.zeros(len(output.x)) output.xaxis("\\rm{Q}", 'A^{-1}') output.yaxis("\\rm{Intensity}", "cm^{-1}") # Store loading process information output.meta_data['loader'] = self.type_name if len(output.x) < 1: raise RuntimeError, "%s is empty" % path return output else: raise RuntimeError, "%s is not a file" % path return None
def read(self, path): """ Load data file. :param path: file path :return: Data1D object, or None :raise RuntimeError: when the file can't be opened :raise ValueError: when the length of the data vectors are inconsistent """ if os.path.isfile(path): basename = os.path.basename(path) root, extension = os.path.splitext(basename) if extension.lower() in self.ext: try: input_f = open(path, 'r') except: raise RuntimeError, "abs_reader: cannot open %s" % path buff = input_f.read() lines = buff.split('\n') x = np.zeros(0) y = np.zeros(0) dy = np.zeros(0) dx = np.zeros(0) output = Data1D(x, y, dy=dy, dx=dx) detector = Detector() output.detector.append(detector) output.filename = basename is_info = False is_center = False is_data_started = False data_conv_q = None data_conv_i = None if has_converter == True and output.x_unit != '1/A': data_conv_q = Converter('1/A') # Test it data_conv_q(1.0, output.x_unit) if has_converter == True and output.y_unit != '1/cm': data_conv_i = Converter('1/cm') # Test it data_conv_i(1.0, output.y_unit) for line in lines: # Information line 1 if is_info == True: is_info = False line_toks = line.split() # Wavelength in Angstrom try: value = float(line_toks[1]) if has_converter == True and \ output.source.wavelength_unit != 'A': conv = Converter('A') output.source.wavelength = conv( value, units=output.source.wavelength_unit) else: output.source.wavelength = value except: #goes to ASC reader msg = "abs_reader: cannot open %s" % path raise RuntimeError, msg # Distance in meters try: value = float(line_toks[3]) if has_converter == True and \ detector.distance_unit != 'm': conv = Converter('m') detector.distance = conv( value, units=detector.distance_unit) else: detector.distance = value except: #goes to ASC reader msg = "abs_reader: cannot open %s" % path raise RuntimeError, msg # Transmission try: output.sample.transmission = float(line_toks[4]) except: # Transmission is not a mandatory entry pass # Thickness in mm try: value = float(line_toks[5]) if has_converter == True and \ output.sample.thickness_unit != 'cm': conv = Converter('cm') output.sample.thickness = conv( value, units=output.sample.thickness_unit) else: output.sample.thickness = value except: # Thickness is not a mandatory entry pass #MON CNT LAMBDA DET ANG DET DIST TRANS THICK # AVE STEP if line.count("LAMBDA") > 0: is_info = True # Find center info line if is_center == True: is_center = False line_toks = line.split() # Center in bin number center_x = float(line_toks[0]) center_y = float(line_toks[1]) # Bin size if has_converter == True and \ detector.pixel_size_unit != 'mm': conv = Converter('mm') detector.pixel_size.x = conv( 5.0, units=detector.pixel_size_unit) detector.pixel_size.y = conv( 5.0, units=detector.pixel_size_unit) else: detector.pixel_size.x = 5.0 detector.pixel_size.y = 5.0 # Store beam center in distance units # Det 640 x 640 mm if has_converter == True and \ detector.beam_center_unit != 'mm': conv = Converter('mm') detector.beam_center.x = conv( center_x * 5.0, units=detector.beam_center_unit) detector.beam_center.y = conv( center_y * 5.0, units=detector.beam_center_unit) else: detector.beam_center.x = center_x * 5.0 detector.beam_center.y = center_y * 5.0 # Detector type try: detector.name = line_toks[7] except: # Detector name is not a mandatory entry pass #BCENT(X,Y) A1(mm) A2(mm) A1A2DIST(m) DL/L # BSTOP(mm) DET_TYP if line.count("BCENT") > 0: is_center = True # Parse the data if is_data_started == True: toks = line.split() try: _x = float(toks[0]) _y = float(toks[1]) _dy = float(toks[2]) _dx = float(toks[3]) if data_conv_q is not None: _x = data_conv_q(_x, units=output.x_unit) _dx = data_conv_i(_dx, units=output.x_unit) if data_conv_i is not None: _y = data_conv_i(_y, units=output.y_unit) _dy = data_conv_i(_dy, units=output.y_unit) x = np.append(x, _x) y = np.append(y, _y) dy = np.append(dy, _dy) dx = np.append(dx, _dx) except: # Could not read this data line. If we are here # it is because we are in the data section. Just # skip it. pass #The 6 columns are | Q (1/A) | I(Q) (1/cm) | std. dev. # I(Q) (1/cm) | sigmaQ | meanQ | ShadowFactor| if line.count("The 6 columns") > 0: is_data_started = True # Sanity check if not len(y) == len(dy): msg = "abs_reader: y and dy have different length" raise ValueError, msg # If the data length is zero, consider this as # though we were not able to read the file. if len(x) == 0: raise ValueError, "ascii_reader: could not load file" output.x = x[x != 0] output.y = y[x != 0] output.dy = dy[x != 0] output.dx = dx[x != 0] if data_conv_q is not None: output.xaxis("\\rm{Q}", output.x_unit) else: output.xaxis("\\rm{Q}", 'A^{-1}') if data_conv_i is not None: output.yaxis("\\rm{Intensity}", output.y_unit) else: output.yaxis("\\rm{Intensity}", "cm^{-1}") # Store loading process information output.meta_data['loader'] = self.type_name return output else: raise RuntimeError, "%s is not a file" % path return None
def setUp(self): x = np.asarray([1.,2.,3.,4.,5.,6.,7.,8.,9.]) y = np.asarray([1.,2.,3.,4.,5.,6.,7.,8.,9.]) dy = y/10.0 self.data = Data1D(x=x,y=y,dy=dy)
def read(self, path): # print "reader triggered" """ Load data file :param path: file path :return: SESANSData1D object, or None :raise RuntimeError: when the file can't be opened :raise ValueError: when the length of the data vectors are inconsistent """ if os.path.isfile(path): basename = os.path.basename(path) _, extension = os.path.splitext(basename) if self.allow_all or extension.lower() in self.ext: try: # Read in binary mode since GRASP frequently has no-ascii # characters that brakes the open operation input_f = open(path, 'rb') except: raise RuntimeError, "sesans_reader: cannot open %s" % path buff = input_f.read() lines = buff.splitlines() x = np.zeros(0) y = np.zeros(0) dy = np.zeros(0) lam = np.zeros(0) dlam = np.zeros(0) dx = np.zeros(0) #temp. space to sort data tx = np.zeros(0) ty = np.zeros(0) tdy = np.zeros(0) tlam = np.zeros(0) tdlam = np.zeros(0) tdx = np.zeros(0) output = Data1D(x=x, y=y, lam=lam, dy=dy, dx=dx, dlam=dlam, isSesans=True) self.filename = output.filename = basename paramnames = [] paramvals = [] zvals = [] dzvals = [] lamvals = [] dlamvals = [] Pvals = [] dPvals = [] for line in lines: # Initial try for CSV (split on ,) line = line.strip() toks = line.split('\t') if len(toks) == 2: paramnames.append(toks[0]) paramvals.append(toks[1]) if len(toks) > 5: zvals.append(toks[0]) dzvals.append(toks[3]) lamvals.append(toks[4]) dlamvals.append(toks[5]) Pvals.append(toks[1]) dPvals.append(toks[2]) else: continue x = [] y = [] lam = [] dx = [] dy = [] dlam = [] lam_header = lamvals[0].split() data_conv_z = None default_z_unit = "A" data_conv_P = None default_p_unit = " " # Adjust unit for axis (L^-3) lam_unit = lam_header[1].replace("[", "").replace("]", "") if lam_unit == 'AA': lam_unit = 'A' varheader = [ zvals[0], dzvals[0], lamvals[0], dlamvals[0], Pvals[0], dPvals[0] ] valrange = range(1, len(zvals)) for i in valrange: x.append(float(zvals[i])) y.append(float(Pvals[i])) lam.append(float(lamvals[i])) dy.append(float(dPvals[i])) dx.append(float(dzvals[i])) dlam.append(float(dlamvals[i])) x, y, lam, dy, dx, dlam = [ np.asarray(v, 'double') for v in (x, y, lam, dy, dx, dlam) ] input_f.close() output.x, output.x_unit = self._unit_conversion( x, lam_unit, default_z_unit) output.y = y output.y_unit = r'\AA^{-2} cm^{-1}' # output y_unit added output.dx, output.dx_unit = self._unit_conversion( dx, lam_unit, default_z_unit) output.dy = dy output.lam, output.lam_unit = self._unit_conversion( lam, lam_unit, default_z_unit) output.dlam, output.dlam_unit = self._unit_conversion( dlam, lam_unit, default_z_unit) output.xaxis(r"\rm{z}", output.x_unit) output.yaxis( r"\rm{ln(P)/(t \lambda^2)}", output.y_unit ) # Adjust label to ln P/(lam^2 t), remove lam column refs # Store loading process information output.meta_data['loader'] = self.type_name #output.sample.thickness = float(paramvals[6]) output.sample.name = paramvals[1] output.sample.ID = paramvals[0] zaccept_unit_split = paramnames[7].split("[") zaccept_unit = zaccept_unit_split[1].replace("]", "") if zaccept_unit.strip() == r'\AA^-1' or zaccept_unit.strip( ) == r'\A^-1': zaccept_unit = "1/A" output.sample.zacceptance = (float(paramvals[7]), zaccept_unit) output.vars = varheader if len(output.x) < 1: raise RuntimeError, "%s is empty" % path return output else: raise RuntimeError, "%s is not a file" % path return None
def test_guinier_incompatible_length(self): g = invariant.Guinier() data_in = Data1D(x=[1], y=[1,2], dy=None) self.assertRaises(AssertionError, g.linearize_data, data_in) data_in = Data1D(x=[1,1], y=[1,2], dy=[1]) self.assertRaises(AssertionError, g.linearize_data, data_in)
def compute(self): qs = self.extrapolation.x iqs = self.extrapolation.y q = self.data.x background = self.background xs = np.pi*np.arange(len(qs),dtype=np.float32)/(q[1]-q[0])/len(qs) self.ready(delay=0.0) self.update(msg="Fourier transform in progress.") self.ready(delay=0.0) if self.check_if_cancelled(): return try: # ----- 1D Correlation Function ----- gamma1 = dct((iqs-background)*qs**2) Q = gamma1.max() gamma1 /= Q if self.check_if_cancelled(): return # ----- 3D Correlation Function ----- # gamma3(R) = 1/R int_{0}^{R} gamma1(x) dx # trapz uses the trapezium rule to calculate the integral mask = xs <= 200.0 # Only calculate gamma3 up to x=200 (as this is all that's plotted) # gamma3 = [trapz(gamma1[:n], xs[:n])/xs[n-1] for n in range(2, len(xs[mask]) + 1)]j # gamma3.insert(0, 1.0) # Gamma_3(0) is defined as 1 n = len(xs[mask]) gamma3 = cumtrapz(gamma1[:n], xs[:n])/xs[1:n] gamma3 = np.hstack((1.0, gamma3)) # Gamma_3(0) is defined as 1 if self.check_if_cancelled(): return # ----- Interface Distribution function ----- idf = dct(-qs**4 * (iqs-background)) if self.check_if_cancelled(): return # Manually calculate IDF(0.0), since scipy DCT tends to give us a # very large negative value. # IDF(x) = int_0^inf q^4 * I(q) * cos(q*x) * dq # => IDF(0) = int_0^inf q^4 * I(q) * dq idf[0] = trapz(-qs**4 * (iqs-background), qs) idf /= Q # Normalise using scattering invariant except Exception as e: import logging logger = logging.getLogger(__name__) logger.error(e) self.update(msg="Fourier transform failed.") self.complete(transforms=None) return if self.isquit(): return self.update(msg="Fourier transform completed.") transform1 = Data1D(xs, gamma1) transform3 = Data1D(xs[xs <= 200], gamma3) idf = Data1D(xs, idf) transforms = (transform1, transform3, idf) self.complete(transforms=transforms)
def compute(self): qs = self.extrapolation.x iqs = self.extrapolation.y q = self.data.x background = self.background xs = np.pi * np.arange(len(qs), dtype=np.float32) / (q[1] - q[0]) / len(qs) self.ready(delay=0.0) self.update(msg="Fourier transform in progress.") self.ready(delay=0.0) if self.check_if_cancelled(): return try: # ----- 1D Correlation Function ----- gamma1 = dct((iqs - background) * qs**2) Q = gamma1.max() gamma1 /= Q if self.check_if_cancelled(): return # ----- 3D Correlation Function ----- # gamma3(R) = 1/R int_{0}^{R} gamma1(x) dx # numerical approximation for increasing R using the trapezium rule # Note: SasView 4.x series limited the range to xs <= 1000.0 gamma3 = cumtrapz(gamma1, xs) / xs[1:] gamma3 = np.hstack((1.0, gamma3)) # gamma3(0) is defined as 1 if self.check_if_cancelled(): return # ----- Interface Distribution function ----- idf = dct(-qs**4 * (iqs - background)) if self.check_if_cancelled(): return # Manually calculate IDF(0.0), since scipy DCT tends to give us a # very large negative value. # IDF(x) = int_0^inf q^4 * I(q) * cos(q*x) * dq # => IDF(0) = int_0^inf q^4 * I(q) * dq idf[0] = trapz(-qs**4 * (iqs - background), qs) idf /= Q # Normalise using scattering invariant except Exception as e: import logging logger = logging.getLogger(__name__) logger.error(e) self.update(msg="Fourier transform failed.") self.complete(transforms=None) return if self.isquit(): return self.update(msg="Fourier transform completed.") transform1 = Data1D(xs, gamma1) transform3 = Data1D(xs, gamma3) idf = Data1D(xs, idf) transforms = (transform1, transform3, idf) self.complete(transforms=transforms)
def read(self, path): """ Load data file :param path: file path :return: Data1D object, or None :raise RuntimeError: when the file can't be opened :raise ValueError: when the length of the data vectors are inconsistent """ if os.path.isfile(path): basename = os.path.basename(path) root, extension = os.path.splitext(basename) if extension.lower() in self.ext: try: input_f = open(path,'r') except: raise RuntimeError, "hfir1d_reader: cannot open %s" % path buff = input_f.read() lines = buff.split('\n') x = numpy.zeros(0) y = numpy.zeros(0) dx = numpy.zeros(0) dy = numpy.zeros(0) output = Data1D(x, y, dx=dx, dy=dy) self.filename = output.filename = basename data_conv_q = None data_conv_i = None if has_converter == True and output.x_unit != '1/A': data_conv_q = Converter('1/A') # Test it data_conv_q(1.0, output.x_unit) if has_converter == True and output.y_unit != '1/cm': data_conv_i = Converter('1/cm') # Test it data_conv_i(1.0, output.y_unit) for line in lines: toks = line.split() try: _x = float(toks[0]) _y = float(toks[1]) _dx = float(toks[3]) _dy = float(toks[2]) if data_conv_q is not None: _x = data_conv_q(_x, units=output.x_unit) _dx = data_conv_q(_dx, units=output.x_unit) if data_conv_i is not None: _y = data_conv_i(_y, units=output.y_unit) _dy = data_conv_i(_dy, units=output.y_unit) x = numpy.append(x, _x) y = numpy.append(y, _y) dx = numpy.append(dx, _dx) dy = numpy.append(dy, _dy) except: # Couldn't parse this line, skip it pass # Sanity check if not len(y) == len(dy): msg = "hfir1d_reader: y and dy have different length" raise RuntimeError, msg if not len(x) == len(dx): msg = "hfir1d_reader: x and dx have different length" raise RuntimeError, msg # If the data length is zero, consider this as # though we were not able to read the file. if len(x) == 0: raise RuntimeError, "hfir1d_reader: could not load file" output.x = x output.y = y output.dy = dy output.dx = dx if data_conv_q is not None: output.xaxis("\\rm{Q}", output.x_unit) else: output.xaxis("\\rm{Q}", 'A^{-1}') if data_conv_i is not None: output.yaxis("\\rm{Intensity}", output.y_unit) else: output.yaxis("\\rm{Intensity}", "cm^{-1}") # Store loading process information output.meta_data['loader'] = self.type_name return output else: raise RuntimeError, "%s is not a file" % path return None