def calculate_root(f: Polynomial, a, b, eps): """ Return root (assuming there's one) of f function on the (a, b) interval using secant method and also return number of iterations. f's first two derivatives should be differentiable. """ assert f(a) * f(b) < 0 if f(a) > 0: f = -f if f(b) * f.deriv(2)(b) > 0: x = a c = b elif f(a) * f.deriv(2)(a) > 0: x = b c = a else: raise Exception() true_x = spo.brentq(f, a, b) iter_count = 0 while abs(x - true_x) > eps and iter_count < MAX_ITERATION_COUNT: x -= (c - x) / (f(c) - f(x)) * f(x) iter_count += 1 return x, iter_count
def lagrange(nodes): result = Polynomial([0]) w = Polynomial(np.poly(nodes)[::-1]) deriv = w.deriv(1) for i in range(len(nodes)): result = pl.polyadd(result, make_l_k(i, nodes, w, deriv) * f(nodes[i]))[0] return result
def lagrange(nodes): result = Polynomial([0]) # np.poly строит полином по набору корней. Но порядок коэффициетов противоположный тому, который # принимает конструктор класса Polynomial w = Polynomial(np.poly(nodes)[::-1]) deriv = w.deriv(1) for i in range(len(nodes)): result = pl.polyadd(result, make_l_k(i, nodes, w, deriv) * f(nodes[i]))[0] # возвращается не только сумма return result
def calculate_root(f: Polynomial, a, b, eps): """ Return root approximation calculated with Newton's method as well as number of iterations made to calculate it. Root of polynomial f is approximated in [a, b] interval until error is smaller than epsilon eps. """ assert f(a) * f(b) < 0 df = f.deriv() def newtons_lambda(x): return -1 / df(x) return sim.calculate_root(f, newtons_lambda, a, b, eps)
def make_a_k(nodes, p, a, b): w = Polynomial(np.poly(nodes)[::-1]) result_coeffs = [] x = Symbol("x") for k in range(len(nodes)): poly_part = np.polydiv(w.coef, np.array([-nodes[k], 1]))[0] / w.deriv()(nodes[k]) # Вручную домножаем каждый член многоочлена на весовую функцию, чтобы потом взять интеграл under_integral_list = [] for i, coef in enumerate(poly_part): under_integral_list.append(coef * x**i * p(x)) under_integral = sum(under_integral_list) # Берём интеграл на промежутке [a,b] result_coeffs.append(integrate(under_integral, (x, a, b))) return result_coeffs
def sturms_sequence(p: Polynomial): """ Returns Sturm's sequence of a given polynomial. """ # setting up the Sturm's sequence. sturms_seq = [] sturms_seq.append(p) sturms_seq.append(p.deriv()) # filling the Sturm's sequence list. f = -Polynomial(poly.polydiv(sturms_seq[-2].coef, sturms_seq[-1].coef)[1]) while f.degree() != 0 or f.coef[0] != 0: sturms_seq.append(f.copy()) f = -Polynomial( poly.polydiv(sturms_seq[-2].coef, sturms_seq[-1].coef)[1]) return sturms_seq
class MPolynomial: def __init__(self): self.roots = [complex(-0.5, 0), complex(0.5, 0)] self.update() def update(self): if len(self.roots)>0: self.poly = Polynomial.fromroots(self.roots) else: self.poly = Polynomial([0]) def add_root(self, root): self.roots.append(root) self.update() def remove_root(self, root_idx): assert root_idx >= 0 and root_idx < len(self.roots), "remove_root invalid index" del self.roots[root_idx] self.update() def get_roots(self): return self.roots def get_n_roots(self): return len(self.roots) def poly_as_str(self): prev = "(float2)(0.0f,0.0f)" for i,c in enumerate(self.poly.coef): formula = "plus(mult((float2)({}f,{}f), complexPower(z,(float2)({}.0f,0.0f))),{})".format(c.real, c.imag, i, prev) prev = formula return prev def deriv_poly_as_str(self): deriv = self.poly.deriv() prev = "(float2)(0.0f,0.0f)" for i, c in enumerate(deriv.coef): formula = "plus(mult((float2)({}f,{}f), complexPower(z,(float2)({}.0f,0.0f))),{})".format(c.real, c.imag, i, prev) prev = formula return prev
])) coef.reverse() w = Polynomial(np.array(coef, dtype=np.float)) # have to explicitly specify dtype, # otherwise root finder doesn't work print([w(x) for x in range(1, 21)]) print(w.roots()) # This uses eigenvalues of the companion matrix for roots from scipy.optimize import root print(root(w, 21.0)) # This uses Optimization method root finding for delta in (1e-8, 1e-6, 1e-4, 1e-2): coef[20] = 1 + delta w = Polynomial(np.array(coef, dtype=np.float)) print(root(w, 21.0)) coef[20] = 1 coef[19] = -210 - 2**(-23) w = Polynomial(np.array(coef, dtype=np.float)) print(root(w, 16.1)) print(root(w, 17.1)) print(w.roots()) coef[19] = -210 w = Polynomial(np.array(coef, dtype=np.float)) wp = w.deriv() [ print("%g" % (sum([abs(coef[l]) * k**(l - 1) for l in range(len(coef))]) / abs(wp(k)))) for k in (14, 16, 17, 20) ]
def measure_under_midline( fl: xr.DataArray, mid: Polynomial, n_points: int = 100, thickness: float = 0.0, order=1, norm_scale=1, flatten=True, ) -> np.ndarray: """ Measure the intensity profile of the given image under the given midline at the given x-coordinates. Parameters ---------- flatten norm_scale order the interpolation order fl The fluorescence image to measure mid The midline under which to measure n_points The number of points to measure under thickness The thickness of the line to measure under. Notes ----- Using thickness is slower, depending on the amount of thickness On my machine (2GHz Intel Core i5), as of 12/4/19: 0-thickness: 492 µs ± 16.6 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) 2-thickness: 1.99 ms ± 65.8 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) 10-thickness: 3.89 ms ± 92.1 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) Returns ------- zs: np.ndarray The intensity profile of the image measured under the midline at the given x-coordinates. """ # Make sure the image orientation matches with the expected order of map_coordinates try: if thickness == 0: xs, ys = mid.linspace(n=n_points) fl = np.asarray(fl) return ndi.map_coordinates(fl, np.stack([xs, ys]), order=order) else: # Gets a bit wonky, but makes sense # We need to get the normal lines from each point in the midline # then measure under those lines. # First, get the coordinates of the midline xs, ys = mid.linspace(n=n_points) # Now, we get the angles of each normal vector der = mid.deriv() normal_slopes = -1 / der(xs) normal_thetas = np.arctan(normal_slopes) # We get the x and y components of the start/end of the normal vectors mag = thickness / 2 x0 = np.cos(normal_thetas) * mag y0 = np.sin(normal_thetas) * mag x1 = np.cos(normal_thetas) * -mag y1 = np.sin(normal_thetas) * -mag # These are the actual coordinates of the starts/ends of the normal vectors as they move # from (x,y) coordinates in the midline xs0 = xs + x0 xs1 = xs + x1 ys0 = ys + y0 ys1 = ys + y1 # We need to measure in a consistent direction along the normal line # if y0 < y1, we're going to be measuring in an opposite direction along the line... so we need flip the coordinates for y0, y1, x0, x1, i in zip(ys0, ys1, xs0, xs1, range(len(xs0))): if y0 < y1: tx = xs0[i] xs0[i] = xs1[i] xs1[i] = tx ty = ys0[i] ys0[i] = ys1[i] ys1[i] = ty n_line_pts = thickness all_xs = np.linspace(xs0, xs1, n_line_pts) all_ys = np.linspace(ys0, ys1, n_line_pts) straightened = ndi.map_coordinates(fl, [all_xs, all_ys], order=order) if flatten: # Create a normal distribution centered around 0 with the given scale (see scipy.norm.pdf) # the distribution is then tiled to be the same shape as the straightened pharynx # then, this resultant matrix is the weights for averaging w = np.tile( norm.pdf(np.linspace(-1, 1, n_line_pts), scale=norm_scale), (n_points, 1), ).T profile = np.average(straightened, axis=0, weights=w) return profile else: return straightened except AttributeError: # This happens if the image is TL. Then it will have `None` instead of # a midline object pass except Exception as e: # Here, something actually went wrong logging.warning(f"measuring under midline failed with error {e}") return np.zeros((1, n_points))