def __init__(self, x, y, dx=None, dy=None, smearer=None, data=None): """ :param smearer: is an object of class QSmearer or SlitSmearer that will smear the theory data (slit smearing or resolution smearing) when set. The proper way to set the smearing object would be to do the following: :: from sas.models.qsmearing import smear_selection smearer = smear_selection(some_data) fitdata1d = FitData1D( x= [1,3,..,], y= [3,4,..,8], dx=None, dy=[1,2...], smearer= smearer) :Note: that some_data _HAS_ to be of class DataLoader.data_info.Data1D Setting it back to None will turn smearing off. """ Data1D.__init__(self, x=x, y=y, dx=dx, dy=dy) self.num_points = len(x) self.sas_data = data self.smearer = smearer self._first_unsmeared_bin = None self._last_unsmeared_bin = None # Check error bar; if no error bar found, set it constant(=1) # TODO: Should provide an option for users to set it like percent, # constant, or dy data if dy is None or dy == [] or dy.all() == 0: self.dy = numpy.ones(len(y)) else: self.dy = numpy.asarray(dy).copy() ## Min Q-value #Skip the Q=0 point, especially when y(q=0)=None at x[0]. if min(self.x) == 0.0 and self.x[0] == 0 and\ not numpy.isfinite(self.y[0]): self.qmin = min(self.x[self.x != 0]) else: self.qmin = min(self.x) ## Max Q-value self.qmax = max(self.x) # Range used for input to smearing self._qmin_unsmeared = self.qmin self._qmax_unsmeared = self.qmax # Identify the bin range for the unsmeared and smeared spaces self.idx = (self.x >= self.qmin) & (self.x <= self.qmax) self.idx_unsmeared = (self.x >= self._qmin_unsmeared) \ & (self.x <= self._qmax_unsmeared)
def test_allowed_bins(self): x = numpy.asarray(numpy.asarray([0, 1, 2, 3])) y = numpy.asarray(numpy.asarray([1, 1, 1, 1])) dy = numpy.asarray(numpy.asarray([1, 1, 1, 1])) g = invariant.Guinier() data = Data1D(x=x, y=y, dy=dy) self.assertEqual(g.get_allowed_bins(data), [False, True, True, True]) data = Data1D(x=y, y=x, dy=dy) self.assertEqual(g.get_allowed_bins(data), [False, True, True, True]) data = Data1D(x=dy, y=y, dy=x) self.assertEqual(g.get_allowed_bins(data), [False, True, True, True])
def __str__(self): """ print data """ _str = "%s\n" % LoadData1D.__str__(self) return _str
def read(self, path): """ Load data file @param path: file path @return: Data1D object, or None @raise RuntimeError: when the file can't be opened @raise ValueError: when the length of the data vectors are inconsistent """ if os.path.isfile(path): basename = os.path.basename(path) root, extension = os.path.splitext(basename) if extension.lower() in self.ext: try: input_f = open(path, 'r') except: raise RuntimeError, "ascii_reader: cannot open %s" % path buff = input_f.read() lines = buff.split('\n') x = numpy.zeros(0) y = numpy.zeros(0) dy = numpy.zeros(0) output = Data1D(x, y, dy=dy) self.filename = output.filename = basename for line in lines: x = numpy.append(x, float(line)) output.x = x return output else: raise RuntimeError, "%s is not a file" % path return None
def __init__(self, x=None, y=None, dx=None, dy=None): """ """ if x is None: x = [] if y is None: y = [] PlotData1D.__init__(self, x, y, dx, dy) LoadData1D.__init__(self, x, y, dx, dy) self.id = None self.list_group_id = [] self.group_id = None self.is_data = True self.path = None self.xtransform = None self.ytransform = None self.title = "" self.scale = None
def setUp(self): """ Generate a power law distribution. After extrapolating, we will verify that we obtain the scale and m parameters """ self.scale = 1.5 self.m = 3.0 x = numpy.arange(0.0001, 0.1, 0.0001) y = numpy.asarray([self.scale * math.pow(q, -1.0 * self.m) for q in x]) dy = y * .1 self.data = Data1D(x=x, y=y, dy=dy)
def setUp(self): """ Generate a Guinier distribution. After extrapolating, we will verify that we obtain the scale and rg parameters """ self.scale = 1.5 self.rg = 30.0 x = numpy.arange(0.0001, 0.1, 0.0001) y = numpy.asarray( [self.scale * math.exp(-(q * self.rg)**2 / 3.0) for q in x]) dy = y * .1 self.data = Data1D(x=x, y=y, dy=dy)
def test_linearization(self): """ Check that the linearization process filters out points that can't be transformed """ x = numpy.asarray(numpy.asarray([0, 1, 2, 3])) y = numpy.asarray(numpy.asarray([1, 1, 1, 1])) g = invariant.Guinier() data_in = Data1D(x=x, y=y) data_out = g.linearize_data(data_in) x_out, y_out, dy_out = data_out.x, data_out.y, data_out.dy self.assertEqual(len(x_out), 3) self.assertEqual(len(y_out), 3) self.assertEqual(len(dy_out), 3)
def test_error_treatment(self): x = numpy.asarray(numpy.asarray([0, 1, 2, 3])) y = numpy.asarray(numpy.asarray([1, 1, 1, 1])) # These are all the values of the dy array that would cause # us to set all dy values to 1.0 at __init__ time. dy_list = [[], None, [0, 0, 0, 0]] for dy in dy_list: data = Data1D(x=x, y=y, dy=dy) inv = invariant.InvariantCalculator(data) self.assertEqual(len(inv._data.x), len(inv._data.dy)) self.assertEqual(len(inv._data.dy), 4) for i in range(4): self.assertEqual(inv._data.dy[i], 1)
def _check_for_empty_data(self, data1d): """ Creates an empty data set if no data is passed to the reader :param data1d: presumably a Data1D object """ if data1d == None: self.errors = set() x_vals = numpy.empty(0) y_vals = numpy.empty(0) dx_vals = numpy.empty(0) dy_vals = numpy.empty(0) dxl = numpy.empty(0) dxw = numpy.empty(0) data1d = Data1D(x_vals, y_vals, dx_vals, dy_vals) data1d.dxl = dxl data1d.dxw = dxw return data1d
def test_cyl_times_square(self): """ Simple cylinder model fit """ out = Loader().load("cyl_400_20.txt") data = Data1D(x=out.x, y=out.y, dx=out.dx, dy=out.dy) # Receives the type of model for the fitting model1 = MultiplicationModel(CylinderModel(), SquareWellStructure()) model1.setParam('background', 0.0) model1.setParam('sldCyl', 3e-006) model1.setParam('sldSolv', 0.0) model1.setParam('length', 420) model1.setParam('radius', 40) model1.setParam('scale_factor', 2) model1.setParam('volfraction', 0.04) model1.setParam('welldepth', 1.5) model1.setParam('wellwidth', 1.2) model = Model(model1) pars1 = ['length', 'radius', 'scale_factor'] fitter = Fit('bumps') fitter.set_data(data, 1) fitter.set_model(model, 1, pars1) fitter.select_problem_for_fit(id=1, value=1) result1, = fitter.fit() self.assert_(result1) self.assertTrue(len(result1.pvec) >= 0) self.assertTrue(len(result1.stderr) >= 0) #print "results",list(zip(result1.pvec, result1.stderr)) self.assertTrue( math.fabs(result1.pvec[0] - 612) / 3.0 <= result1.stderr[0]) self.assertTrue( math.fabs(result1.pvec[1] - 20.3) / 3.0 <= result1.stderr[1]) self.assertTrue( math.fabs(result1.pvec[2] - 25) / 3.0 <= result1.stderr[2]) self.assertTrue(result1.fitness / len(data.x) < 1.0)
def read(self, xml_file): """ Validate and read in an xml_file file in the canSAS format. :param xml_file: A canSAS file path in proper XML format """ # output - Final list of Data1D objects output = [] # ns - Namespace hierarchy for current xml_file object ns_list = [] # Check that the file exists if os.path.isfile(xml_file): basename = os.path.basename(xml_file) _, extension = os.path.splitext(basename) # If the file type is not allowed, return nothing if extension in self.ext or self.allow_all: # Get the file location of cansas_defaults = self.load_file_and_schema(xml_file) # Try to load the file, but raise an error if unable to. # Check the file matches the XML schema try: if self.is_cansas(extension): # Get each SASentry from XML file and add it to a list. entry_list = self.xmlroot.xpath( '/ns:SASroot/ns:SASentry', namespaces={'ns': cansas_defaults.get("ns")}) ns_list.append("SASentry") # If multiple files, modify the name for each is unique increment = 0 # Parse each SASentry item for entry in entry_list: # Define a new Data1D object with zeroes for # x_vals and y_vals data1d = Data1D(numpy.empty(0), numpy.empty(0), numpy.empty(0), numpy.empty(0)) data1d.dxl = numpy.empty(0) data1d.dxw = numpy.empty(0) # If more than one SASentry, increment each in order name = basename if len(entry_list) - 1 > 0: name += "_{0}".format(increment) increment += 1 # Set the Data1D name and then parse the entry. # The entry is appended to a list of entry values data1d.filename = name data1d.meta_data["loader"] = "CanSAS 1D" # Get all preprocessing events and encoding self.set_processing_instructions() data1d.meta_data[PREPROCESS] = \ self.processing_instructions # Parse the XML file return_value, extras = \ self._parse_entry(entry, ns_list, data1d) del extras[:] return_value = self._final_cleanup(return_value) output.append(return_value) else: output.append("Invalid XML at: {0}".format(\ self.find_invalid_xml())) except: # If the file does not match the schema, raise this error raise RuntimeError, "%s cannot be read" % xml_file return output # Return a list of parsed entries that dataloader can manage return None
def read(self, path): """ Load data file :param path: file path :return: Data1D object, or None :raise RuntimeError: when the file can't be opened :raise ValueError: when the length of the data vectors are inconsistent """ if os.path.isfile(path): basename = os.path.basename(path) root, extension = os.path.splitext(basename) if extension.lower() in self.ext: try: input_f = open(path, 'r') except: raise RuntimeError, "hfir1d_reader: cannot open %s" % path buff = input_f.read() lines = buff.split('\n') x = numpy.zeros(0) y = numpy.zeros(0) dx = numpy.zeros(0) dy = numpy.zeros(0) output = Data1D(x, y, dx=dx, dy=dy) self.filename = output.filename = basename data_conv_q = None data_conv_i = None if has_converter == True and output.x_unit != '1/A': data_conv_q = Converter('1/A') # Test it data_conv_q(1.0, output.x_unit) if has_converter == True and output.y_unit != '1/cm': data_conv_i = Converter('1/cm') # Test it data_conv_i(1.0, output.y_unit) for line in lines: toks = line.split() try: _x = float(toks[0]) _y = float(toks[1]) _dx = float(toks[3]) _dy = float(toks[2]) if data_conv_q is not None: _x = data_conv_q(_x, units=output.x_unit) _dx = data_conv_q(_dx, units=output.x_unit) if data_conv_i is not None: _y = data_conv_i(_y, units=output.y_unit) _dy = data_conv_i(_dy, units=output.y_unit) x = numpy.append(x, _x) y = numpy.append(y, _y) dx = numpy.append(dx, _dx) dy = numpy.append(dy, _dy) except: # Couldn't parse this line, skip it pass # Sanity check if not len(y) == len(dy): msg = "hfir1d_reader: y and dy have different length" raise RuntimeError, msg if not len(x) == len(dx): msg = "hfir1d_reader: x and dx have different length" raise RuntimeError, msg # If the data length is zero, consider this as # though we were not able to read the file. if len(x) == 0: raise RuntimeError, "hfir1d_reader: could not load file" output.x = x output.y = y output.dy = dy output.dx = dx if data_conv_q is not None: output.xaxis("\\rm{Q}", output.x_unit) else: output.xaxis("\\rm{Q}", 'A^{-1}') if data_conv_i is not None: output.yaxis("\\rm{Intensity}", output.y_unit) else: output.yaxis("\\rm{Intensity}", "cm^{-1}") # Store loading process information output.meta_data['loader'] = self.type_name return output else: raise RuntimeError, "%s is not a file" % path return None
def read(self, path): """ Load data file :param path: file path :return: Data1D object, or None :raise RuntimeError: when the file can't be opened :raise ValueError: when the length of the data vectors are inconsistent """ if os.path.isfile(path): basename = os.path.basename(path) _, extension = os.path.splitext(basename) if self.allow_all or extension.lower() in self.ext: try: # Read in binary mode since GRASP frequently has no-ascii # characters that brakes the open operation input_f = open(path, 'rb') except: raise RuntimeError, "ascii_reader: cannot open %s" % path buff = input_f.read() lines = buff.splitlines() x = numpy.zeros(0) y = numpy.zeros(0) dy = numpy.zeros(0) dx = numpy.zeros(0) #temp. space to sort data tx = numpy.zeros(0) ty = numpy.zeros(0) tdy = numpy.zeros(0) tdx = numpy.zeros(0) output = Data1D(x, y, dy=dy, dx=dx) self.filename = output.filename = basename data_conv_q = None data_conv_i = None if has_converter == True and output.x_unit != '1/A': data_conv_q = Converter('1/A') # Test it data_conv_q(1.0, output.x_unit) if has_converter == True and output.y_unit != '1/cm': data_conv_i = Converter('1/cm') # Test it data_conv_i(1.0, output.y_unit) # The first good line of data will define whether # we have 2-column or 3-column ascii has_error_dx = None has_error_dy = None #Initialize counters for data lines and header lines. is_data = False # Has more than 5 lines # More than "5" lines of data is considered as actual # data unless that is the only data mum_data_lines = 5 # To count # of current data candidate lines i = -1 # To count total # of previous data candidate lines i1 = -1 # To count # of header lines j = -1 # Helps to count # of header lines j1 = -1 #minimum required number of columns of data; ( <= 4). lentoks = 2 for line in lines: # Initial try for CSV (split on ,) toks = line.split(',') # Now try SCSV (split on ;) if len(toks) < 2: toks = line.split(';') # Now go for whitespace if len(toks) < 2: toks = line.split() try: #Make sure that all columns are numbers. for colnum in range(len(toks)): float(toks[colnum]) _x = float(toks[0]) _y = float(toks[1]) #Reset the header line counters if j == j1: j = 0 j1 = 0 if i > 1: is_data = True if data_conv_q is not None: _x = data_conv_q(_x, units=output.x_unit) if data_conv_i is not None: _y = data_conv_i(_y, units=output.y_unit) # If we have an extra token, check # whether it can be interpreted as a # third column. _dy = None if len(toks) > 2: try: _dy = float(toks[2]) if data_conv_i is not None: _dy = data_conv_i(_dy, units=output.y_unit) except: # The third column is not a float, skip it. pass # If we haven't set the 3rd column # flag, set it now. if has_error_dy == None: has_error_dy = False if _dy == None else True #Check for dx _dx = None if len(toks) > 3: try: _dx = float(toks[3]) if data_conv_i is not None: _dx = data_conv_i(_dx, units=output.x_unit) except: # The 4th column is not a float, skip it. pass # If we haven't set the 3rd column # flag, set it now. if has_error_dx == None: has_error_dx = False if _dx == None else True #After talked with PB, we decided to take care of only # 4 columns of data for now. #number of columns in the current line #To remember the # of columns in the current #line of data new_lentoks = len(toks) #If the previous columns not equal to the current, #mark the previous as non-data and reset the dependents. if lentoks != new_lentoks: if is_data == True: break else: i = -1 i1 = 0 j = -1 j1 = -1 #Delete the previously stored lines of data candidates # if is not data. if i < 0 and -1 < i1 < mum_data_lines and \ is_data == False: try: x = numpy.zeros(0) y = numpy.zeros(0) except: pass x = numpy.append(x, _x) y = numpy.append(y, _y) if has_error_dy == True: #Delete the previously stored lines of # data candidates if is not data. if i < 0 and -1 < i1 < mum_data_lines and \ is_data == False: try: dy = numpy.zeros(0) except: pass dy = numpy.append(dy, _dy) if has_error_dx == True: #Delete the previously stored lines of # data candidates if is not data. if i < 0 and -1 < i1 < mum_data_lines and \ is_data == False: try: dx = numpy.zeros(0) except: pass dx = numpy.append(dx, _dx) #Same for temp. #Delete the previously stored lines of data candidates # if is not data. if i < 0 and -1 < i1 < mum_data_lines and\ is_data == False: try: tx = numpy.zeros(0) ty = numpy.zeros(0) except: pass tx = numpy.append(tx, _x) ty = numpy.append(ty, _y) if has_error_dy == True: #Delete the previously stored lines of # data candidates if is not data. if i < 0 and -1 < i1 < mum_data_lines and \ is_data == False: try: tdy = numpy.zeros(0) except: pass tdy = numpy.append(tdy, _dy) if has_error_dx == True: #Delete the previously stored lines of # data candidates if is not data. if i < 0 and -1 < i1 < mum_data_lines and \ is_data == False: try: tdx = numpy.zeros(0) except: pass tdx = numpy.append(tdx, _dx) #reset i1 and flag lentoks for the next if lentoks < new_lentoks: if is_data == False: i1 = -1 #To remember the # of columns on the current line # for the next line of data lentoks = len(toks) #Reset # of header lines and counts # # of data candidate lines if j == 0 and j1 == 0: i1 = i + 1 i += 1 except: # It is data and meet non - number, then stop reading if is_data == True: break lentoks = 2 #Counting # of header lines j += 1 if j == j1 + 1: j1 = j else: j = -1 #Reset # of lines of data candidates i = -1 # Couldn't parse this line, skip it pass input_f.close() # Sanity check if has_error_dy == True and not len(y) == len(dy): msg = "ascii_reader: y and dy have different length" raise RuntimeError, msg if has_error_dx == True and not len(x) == len(dx): msg = "ascii_reader: y and dy have different length" raise RuntimeError, msg # If the data length is zero, consider this as # though we were not able to read the file. if len(x) == 0: raise RuntimeError, "ascii_reader: could not load file" #Let's re-order the data to make cal. # curve look better some cases ind = numpy.lexsort((ty, tx)) for i in ind: x[i] = tx[ind[i]] y[i] = ty[ind[i]] if has_error_dy == True: dy[i] = tdy[ind[i]] if has_error_dx == True: dx[i] = tdx[ind[i]] # Zeros in dx, dy if has_error_dx: dx[dx == 0] = _ZERO if has_error_dy: dy[dy == 0] = _ZERO #Data output.x = x[x != 0] output.y = y[x != 0] output.dy = dy[x != 0] if has_error_dy == True\ else numpy.zeros(len(output.y)) output.dx = dx[x != 0] if has_error_dx == True\ else numpy.zeros(len(output.x)) if data_conv_q is not None: output.xaxis("\\rm{Q}", output.x_unit) else: output.xaxis("\\rm{Q}", 'A^{-1}') if data_conv_i is not None: output.yaxis("\\rm{Intensity}", output.y_unit) else: output.yaxis("\\rm{Intensity}", "cm^{-1}") # Store loading process information output.meta_data['loader'] = self.type_name if len(output.x) < 1: raise RuntimeError, "%s is empty" % path return output else: raise RuntimeError, "%s is not a file" % path return None
def test_guinier_incompatible_length(self): g = invariant.Guinier() data_in = Data1D(x=[1], y=[1, 2], dy=None) self.assertRaises(AssertionError, g.linearize_data, data_in) data_in = Data1D(x=[1, 1], y=[1, 2], dy=[1]) self.assertRaises(AssertionError, g.linearize_data, data_in)
def read(self, path): """ Load data file. :param path: file path :return: Data1D object, or None :raise RuntimeError: when the file can't be opened :raise ValueError: when the length of the data vectors are inconsistent """ if os.path.isfile(path): basename = os.path.basename(path) root, extension = os.path.splitext(basename) if extension.lower() in self.ext: try: input_f = open(path, 'r') except: raise RuntimeError, "abs_reader: cannot open %s" % path buff = input_f.read() lines = buff.split('\n') x = numpy.zeros(0) y = numpy.zeros(0) dy = numpy.zeros(0) dx = numpy.zeros(0) output = Data1D(x, y, dy=dy, dx=dx) detector = Detector() output.detector.append(detector) output.filename = basename is_info = False is_center = False is_data_started = False data_conv_q = None data_conv_i = None if has_converter == True and output.x_unit != '1/A': data_conv_q = Converter('1/A') # Test it data_conv_q(1.0, output.x_unit) if has_converter == True and output.y_unit != '1/cm': data_conv_i = Converter('1/cm') # Test it data_conv_i(1.0, output.y_unit) for line in lines: # Information line 1 if is_info == True: is_info = False line_toks = line.split() # Wavelength in Angstrom try: value = float(line_toks[1]) if has_converter == True and \ output.source.wavelength_unit != 'A': conv = Converter('A') output.source.wavelength = conv( value, units=output.source.wavelength_unit) else: output.source.wavelength = value except: #goes to ASC reader msg = "abs_reader: cannot open %s" % path raise RuntimeError, msg # Distance in meters try: value = float(line_toks[3]) if has_converter == True and \ detector.distance_unit != 'm': conv = Converter('m') detector.distance = conv( value, units=detector.distance_unit) else: detector.distance = value except: #goes to ASC reader msg = "abs_reader: cannot open %s" % path raise RuntimeError, msg # Transmission try: output.sample.transmission = float(line_toks[4]) except: # Transmission is not a mandatory entry pass # Thickness in mm try: value = float(line_toks[5]) if has_converter == True and \ output.sample.thickness_unit != 'cm': conv = Converter('cm') output.sample.thickness = conv( value, units=output.sample.thickness_unit) else: output.sample.thickness = value except: # Thickness is not a mandatory entry pass #MON CNT LAMBDA DET ANG DET DIST TRANS THICK # AVE STEP if line.count("LAMBDA") > 0: is_info = True # Find center info line if is_center == True: is_center = False line_toks = line.split() # Center in bin number center_x = float(line_toks[0]) center_y = float(line_toks[1]) # Bin size if has_converter == True and \ detector.pixel_size_unit != 'mm': conv = Converter('mm') detector.pixel_size.x = conv( 5.0, units=detector.pixel_size_unit) detector.pixel_size.y = conv( 5.0, units=detector.pixel_size_unit) else: detector.pixel_size.x = 5.0 detector.pixel_size.y = 5.0 # Store beam center in distance units # Det 640 x 640 mm if has_converter == True and \ detector.beam_center_unit != 'mm': conv = Converter('mm') detector.beam_center.x = conv( center_x * 5.0, units=detector.beam_center_unit) detector.beam_center.y = conv( center_y * 5.0, units=detector.beam_center_unit) else: detector.beam_center.x = center_x * 5.0 detector.beam_center.y = center_y * 5.0 # Detector type try: detector.name = line_toks[7] except: # Detector name is not a mandatory entry pass #BCENT(X,Y) A1(mm) A2(mm) A1A2DIST(m) DL/L # BSTOP(mm) DET_TYP if line.count("BCENT") > 0: is_center = True # Parse the data if is_data_started == True: toks = line.split() try: _x = float(toks[0]) _y = float(toks[1]) _dy = float(toks[2]) _dx = float(toks[3]) if data_conv_q is not None: _x = data_conv_q(_x, units=output.x_unit) _dx = data_conv_i(_dx, units=output.x_unit) if data_conv_i is not None: _y = data_conv_i(_y, units=output.y_unit) _dy = data_conv_i(_dy, units=output.y_unit) x = numpy.append(x, _x) y = numpy.append(y, _y) dy = numpy.append(dy, _dy) dx = numpy.append(dx, _dx) except: # Could not read this data line. If we are here # it is because we are in the data section. Just # skip it. pass #The 6 columns are | Q (1/A) | I(Q) (1/cm) | std. dev. # I(Q) (1/cm) | sigmaQ | meanQ | ShadowFactor| if line.count("The 6 columns") > 0: is_data_started = True # Sanity check if not len(y) == len(dy): msg = "abs_reader: y and dy have different length" raise ValueError, msg # If the data length is zero, consider this as # though we were not able to read the file. if len(x) == 0: raise ValueError, "ascii_reader: could not load file" output.x = x[x != 0] output.y = y[x != 0] output.dy = dy[x != 0] output.dx = dx[x != 0] if data_conv_q is not None: output.xaxis("\\rm{Q}", output.x_unit) else: output.xaxis("\\rm{Q}", 'A^{-1}') if data_conv_i is not None: output.yaxis("\\rm{Intensity}", output.y_unit) else: output.yaxis("\\rm{Intensity}", "cm^{-1}") # Store loading process information output.meta_data['loader'] = self.type_name return output else: raise RuntimeError, "%s is not a file" % path return None
def setUp(self): x = numpy.asarray([1., 2., 3., 4., 5., 6., 7., 8., 9.]) y = numpy.asarray([1., 2., 3., 4., 5., 6., 7., 8., 9.]) dy = y / 10.0 self.data = Data1D(x=x, y=y, dy=dy)