Ejemplo n.º 1
0
def compute_quartic_spline_of_right_heaviside_function():
    r"""
    Get spline approximation of step function enforcing all derivatives 
    at 0 and 1 are zero
    """
    from scipy.interpolate import BPoly
    poly = BPoly.from_derivatives([0, 1], [[0, 0, 0, 0], [1, 0, 0, 0]],
                                  orders=[4])
    poly = BPoly.from_derivatives([0, 1], [[0, 0, 0], [1, 0, 0]], orders=[3])

    def basis(x, p):
        return x[:, np.newaxis]**np.arange(p + 1)[np.newaxis, :]

    interp_nodes = (-np.cos(np.linspace(0, np.pi, 5)) + 1) / 2
    basis_mat = basis(interp_nodes, 4)
    coef = np.linalg.inv(basis_mat).dot(poly(interp_nodes))
    print(coef)
    xx = np.linspace(0, 1, 101)
    print(np.absolute(basis(xx, 4).dot(coef) - poly(xx)).max())
    # plt.plot(xx,basis(xx,4).dot(coef))
    # plt.plot(xx,poly(xx))

    eps = 0.1
    a, b = 0, eps
    xx = np.linspace(a, b, 101)
    plt.plot(xx, basis((xx - a) / (b - a), 4).dot(coef))

    def f(x):
        return 6 * ((xx) / eps)**2 - 8 * ((xx) / eps)**3 + 3 * ((xx) / eps)**4

    plt.plot(xx, f(xx))
    plt.show()
Ejemplo n.º 2
0
    def _create_from_control_points(self, control_points, tangents, scale):
        """
        Creates the FiberSource instance from control points, and a specified 
        mode to compute the tangents.

        Parameters
        ----------
        control_points : ndarray shape (N, 3)
        tangents : 'incoming', 'outgoing', 'symmetric'
        scale : multiplication factor. 
            This is useful when the coodinates are given dimensionless, and we 
            want a specific size for the phantom.
        """
        # Compute instant points ts, from 0. to 1. 
        # (time interval proportional to distance between control points)
        nb_points = control_points.shape[0]
        dists = np.zeros(nb_points)
        dists[1:] = np.sqrt((np.diff(control_points, axis=0) ** 2).sum(1))
        ts = dists.cumsum()
        length = ts[-1]
        ts = ts / np.max(ts)

        # Create interpolation functions (piecewise polynomials) for x, y and z
        derivatives = np.zeros((nb_points, 3))

        # The derivatives at starting and ending points are normal
        # to the surface of a sphere.
        derivatives[0, :] = -control_points[0]
        derivatives[-1, :] = control_points[-1]
 
        # As for other derivatives, we use discrete approx
        if tangents == 'incoming':
            derivatives[1:-1, :] = (control_points[1:-1] - control_points[:-2])
        elif tangents == 'outgoing':
            derivatives[1:-1, :] = (control_points[2:] - control_points[1:-1])
        elif tangents == 'symmetric':
            derivatives[1:-1, :] = (control_points[2:] - control_points[:-2])
        else:
            raise Error('tangents should be one of the following: incoming, ' 
                        'outgoing, symmetric')
 
        derivatives = (derivatives.T / np.sqrt((derivatives ** 2).sum(1))).T \
                    * length
               
        self.x_poly = BPoly.from_derivatives(ts, 
               scale * np.vstack((control_points[:, 0], derivatives[:, 0])).T)
        self.y_poly = BPoly.from_derivatives(ts, 
               scale * np.vstack((control_points[:, 1], derivatives[:, 1])).T)
        self.z_poly = BPoly.from_derivatives(ts, 
               scale * np.vstack((control_points[:, 2], derivatives[:, 2])).T)
Ejemplo n.º 3
0
    def test_orders_too_high(self):
        m, k = 5, 12
        xi, yi = self._make_random_mk(m, k)

        pp = BPoly.from_derivatives(xi, yi, orders=2*k-1)   # this is still ok
        assert_raises(ValueError, BPoly.from_derivatives,   # but this is not
                **dict(xi=xi, yi=yi, orders=2*k))
Ejemplo n.º 4
0
    def Generate_Random_Trace_Function(self):
        # t_pre = arange(0, self.random_length)*self.dt
        self.t_max_pre = self.random_length * self.dt

        # t_init = linspace(0, self.t_max_pre, num=self.N, endpoint=True)
        t_init = np.sort(random.uniform(self.dt, self.t_max_pre, self.N))
        t_init = np.insert(t_init, 0, 0.0)
        t_init = np.append(t_init, self.t_max_pre)

        y = 2.0 * (random.random(self.N) - 0.5)

        y = y * 0.8 * self.HalfLength / max(abs(y))
        y = np.insert(y, 0, 0.0)
        y = np.append(y, 0.0)

        # t1 = timeit.default_timer()
        # self.random_track_f = interp1d(t_init, y, kind='cubic')
        # t2 = timeit.default_timer()
        # Try algorithm setting derivative to 0 a each point
        yder = [[y[i], 0] for i in range(len(y))]
        self.random_track_f = BPoly.from_derivatives(t_init, yder)
        # t3 = timeit.default_timer()

        # print('Time for simple method: {:.3f}ms'.format((t2-t1)*1000))
        # print('Time for complex method: {:.3f}ms'.format((t3 - t2) * 1000))

        self.new_track_generated = True
Ejemplo n.º 5
0
    def test_random_12(self):
        m, k = 5, 12
        xi, yi = self._make_random_mk(m, k)
        pp = BPoly.from_derivatives(xi, yi)

        for order in range(k//2):
            assert_allclose(pp(xi), [yy[order]  for yy in yi])
            pp = pp.derivative()
Ejemplo n.º 6
0
    def test_random_12(self):
        m, k = 5, 12
        xi, yi = self._make_random_mk(m, k)
        pp = BPoly.from_derivatives(xi, yi)

        for order in range(k // 2):
            assert_allclose(pp(xi), [yy[order] for yy in yi])
            pp = pp.derivative()
Ejemplo n.º 7
0
    def test_zeros(self):
        xi = [0, 1, 2, 3]
        yi = [[0, 0], [0], [0, 0], [0, 0]]  # NB: will have to raise the degree
        pp = BPoly.from_derivatives(xi, yi)
        assert_(pp.c.shape == (4, 3))

        ppd = pp.derivative()
        for xp in [0., 0.1, 1., 1.1, 1.9, 2., 2.5]:
            assert_allclose([pp(xp), ppd(xp)], [0., 0.])
Ejemplo n.º 8
0
    def test_zeros(self):
        xi = [0, 1, 2, 3]
        yi = [[0, 0], [0], [0, 0], [0, 0]]  # NB: will have to raise the degree
        pp = BPoly.from_derivatives(xi, yi)
        assert_(pp.c.shape == (4, 3))

        ppd = pp.derivative()
        for xp in [0., 0.1, 1., 1.1, 1.9, 2., 2.5]:
            assert_allclose([pp(xp), ppd(xp)], [0., 0.])
Ejemplo n.º 9
0
    def interpolate_setpoints(self):
        time, position, velocity = self.get_setpoints_unzipped()
        yi = []
        for i in range(0, len(time)):
            yi.append([position[i], velocity[i]])

        bpoly = BPoly.from_derivatives(time, yi)
        indices = np.linspace(0, self.duration, self.duration * 100)
        return [indices, bpoly(indices)]
Ejemplo n.º 10
0
    def fonction(self):
        """Fonction wrapper vers la fonction de scipy BPoly.from_derivatives

        """
        pts = self.points_tries
        xl = [P.x for P in pts]
        yl = [P.y for P in pts]
        yl_cum = list(zip(yl, self._derivees()))
        return BPoly.from_derivatives(xl, yl_cum)
Ejemplo n.º 11
0
    def half_amp_dur(self, waveforms):
        """
        Half amplitude duration of a spike

        Parameters
        ----------
        A: ndarray
            An nSpikes x nElectrodes x nSamples array

        Returns
        -------
        had: float
            The half-amplitude duration for the channel (electrode) that has
            the strongest (highest amplitude) signal. Units are ms
        """
        from scipy import optimize

        best_chan = np.argmax(np.max(np.mean(waveforms, 0), 1))
        mn_wvs = np.mean(waveforms, 0)
        wvs = mn_wvs[best_chan, :]
        half_amp = np.max(wvs) / 2
        half_amp = np.zeros_like(wvs) + half_amp
        t = np.linspace(0, 1 / 1000., 50)
        # create functions from the data using PiecewisePolynomial
        from scipy.interpolate import BPoly
        p1 = BPoly.from_derivatives(t, wvs[:, np.newaxis])
        p2 = BPoly.from_derivatives(t, half_amp[:, np.newaxis])
        xs = np.r_[t, t]
        xs.sort()
        x_min = xs.min()
        x_max = xs.max()
        x_mid = xs[:-1] + np.diff(xs) / 2
        roots = set()
        for val in x_mid:
            root, infodict, ier, mesg = optimize.fsolve(
                lambda x: p1(x) - p2(x), val, full_output=True)
            if ier == 1 and x_min < root < x_max:
                roots.add(root[0])
        roots = list(roots)
        if len(roots) > 1:
            r = np.abs(np.diff(roots[0:2]))[0]
        else:
            r = np.nan
        return r
Ejemplo n.º 12
0
    def test_orders_too_high(self):
        m, k = 5, 12
        xi, yi = self._make_random_mk(m, k)

        pp = BPoly.from_derivatives(xi, yi,
                                    orders=2 * k - 1)  # this is still ok
        assert_raises(
            ValueError,
            BPoly.from_derivatives,  # but this is not
            **dict(xi=xi, yi=yi, orders=2 * k))
Ejemplo n.º 13
0
    def test_orders_local(self):
        m, k = 7, 12
        xi, yi = self._make_random_mk(m, k)

        orders = [o + 1 for o in range(m)]
        for i, x in enumerate(xi[1:-1]):
            pp = BPoly.from_derivatives(xi, yi, orders=orders)
            for j in range(orders[i] // 2 + 1):
                assert_allclose(pp(x - 1e-12), pp(x + 1e-12))
                pp = pp.derivative()
            assert_(not np.allclose(pp(x - 1e-12), pp(x + 1e-12)))
Ejemplo n.º 14
0
    def _construct_polynomials(self):
        polys = []
        _yd = self._autofill_yd()

        for j in range(self._y.shape[1]):

            y_with_derivatives = np.vstack((self._y[:, j], _yd[:, j])).T
            poly = BPoly.from_derivatives(self._x, y_with_derivatives)
            polys.append(poly)

        return polys
Ejemplo n.º 15
0
    def test_orders_local(self):
        m, k = 7, 12
        xi, yi = self._make_random_mk(m, k)

        orders = [o + 1 for o in range(m)]
        for i, x in enumerate(xi[1:-1]):
            pp = BPoly.from_derivatives(xi, yi, orders=orders)
            for j in range(orders[i] // 2 + 1):
                assert_allclose(pp(x - 1e-12), pp(x + 1e-12))
                pp = pp.derivative()
            assert_(not np.allclose(pp(x - 1e-12), pp(x + 1e-12)))
Ejemplo n.º 16
0
    def _setup_phi_interpolator(self):
        """ Setup interpolater for phi, works on scalar and arrays """

        # Generate piecewise 3th order polynomials to connect the discrete
        # values of phi obtained from from Poisson, using dphi/dr
        self._interpolator_set = True

        if (self.scale):
            phi_and_derivs = numpy.vstack([[self.phi],[self.dphidr1]]).T
        else:
            phi_and_derivs = numpy.vstack([[self.phihat],[self.dphidrhat1]]).T

        self._phi_poly = BPoly.from_derivatives(self.r,phi_and_derivs)
Ejemplo n.º 17
0
    def test_orders_global(self):
        m, k = 5, 12
        xi, yi = self._make_random_mk(m, k)

        # ok, this is confusing. Local polynomials will be of the order 5
        # which means that up to the 2nd derivatives will be used at each point
        order = 5
        pp = BPoly.from_derivatives(xi, yi, orders=order)

        for j in range(order//2+1):
            assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
            pp = pp.derivative()
        assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))

        # now repeat with `order` being even: on each interval, it uses
        # order//2 'derivatives' @ the right-hand endpoint and
        # order//2+1 @ 'derivatives' the left-hand endpoint
        order = 6
        pp = BPoly.from_derivatives(xi, yi, orders=order)
        for j in range(order//2):
            assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
            pp = pp.derivative()
        assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))
Ejemplo n.º 18
0
    def test_orders_global(self):
        m, k = 5, 12
        xi, yi = self._make_random_mk(m, k)

        # ok, this is confusing. Local polynomials will be of the order 5
        # which means that up to the 2nd derivatives will be used at each point
        order = 5
        pp = BPoly.from_derivatives(xi, yi, orders=order)

        for j in range(order // 2 + 1):
            assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
            pp = pp.derivative()
        assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))

        # now repeat with `order` being even: on each interval, it uses
        # order//2 'derivatives' @ the right-hand endpoint and
        # order//2+1 @ 'derivatives' the left-hand endpoint
        order = 6
        pp = BPoly.from_derivatives(xi, yi, orders=order)
        for j in range(order // 2):
            assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
            pp = pp.derivative()
        assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))
Ejemplo n.º 19
0
def calc_obj_dynamics(s0, S, p, index):
    # get pose and vel traj from S
    offset = 6 * index
    pose_traj_K = [s0[offset:offset + 3]] + [
        S[offset + k * p.len_s:offset + k * p.len_s + 3] for k in range(p.K)
    ]
    vel_traj_K = [s0[offset + 3:offset + 6]] + [
        S[offset + k * p.len_s + 3:offset + k * p.len_s + 6]
        for k in range(p.K)
    ]

    # make spline functions
    spline_funcs = []
    for dim in range(3):
        x = np.linspace(0., p.T_final, p.K + 1)
        y = np.zeros((p.K + 1, 2))
        for k in range(p.K + 1):
            y[k, :] = pose_traj_K[k][dim], vel_traj_K[k][dim]
        spline_funcs += [
            BPoly.from_derivatives(x, y, orders=3, extrapolate=False)
        ]

    # use to interpolate pose
    pose_traj_T = []
    k = 0
    times = np.linspace(0., p.T_final, p.T_steps + 1)
    for t in times:
        if not t % p.delT_phase:  # this is a phase
            pose_traj_T += [pose_traj_K[k]]
            k += 1
        else:  # get from spline
            pose_traj_T += [[
                spline_funcs[0](t), spline_funcs[1](t), spline_funcs[2](t)
            ]]

    # use FD to get velocities and accels
    vel_traj_T = [np.zeros(3)]
    for t in range(1, p.T_steps + 1):
        p_tm1 = pose_traj_T[t - 1]
        p_t = pose_traj_T[t]
        vel_traj_T += [calc_deriv(p_t, p_tm1, p.delT)]

    accel_traj_T = [np.zeros(3)]
    for t in range(1, p.T_steps + 1):
        v_tm1 = vel_traj_T[t - 1]
        v_t = vel_traj_T[t]
        accel_traj_T += [calc_deriv(v_t, v_tm1, p.delT)]

    return pose_traj_T, vel_traj_T, accel_traj_T
Ejemplo n.º 20
0
    def __init__(self, x, y, axis=0, extrapolate=None):
        x = _asarray_validated(x, check_finite=False, as_inexact=True)
        y = _asarray_validated(y, check_finite=False, as_inexact=True)

        axis = axis % y.ndim

        xp = x.reshape((x.shape[0],) + (1,) * (y.ndim - 1))
        yp = np.rollaxis(y, axis)

        dk = self._find_derivatives(xp, yp)
        data = np.hstack((yp[:, None, ...], dk[:, None, ...]))

        _b = BPoly.from_derivatives(x, data, orders=None)
        super(PchipInterpolator_new, self).__init__(_b.c, _b.x, extrapolate=extrapolate)
        self.axis = axis
Ejemplo n.º 21
0
    def interpolate_setpoints(self):
        if len(self.setpoints) == 1:
            self.interpolated_position = lambda time: self.setpoints[0
                                                                     ].position
            self.interpolated_velocity = lambda time: self.setpoints[0
                                                                     ].velocity
            return

        time, position, velocity = self.get_setpoints_unzipped()
        yi = []
        for i in range(0, len(time)):
            yi.append([position[i], velocity[i]])

        # We do a cubic spline here, just like the ros joint_trajectory_action_controller,
        # see https://wiki.ros.org/robot_mechanism_controllers/JointTrajectoryActionController
        self.interpolated_position = BPoly.from_derivatives(time, yi)
        self.interpolated_velocity = self.interpolated_position.derivative()
Ejemplo n.º 22
0
 def __init__(self, x, y, yp=None, method='secant'):
     if yp is None:
         yp = slopes(x, y, method=method, monotone=True)
     yyp = [z for z in zip(y, yp)]
     bpoly = BPoly.from_derivatives(x, yyp, orders=3)
     super(Pchip, self).__init__(bpoly.c, x)
Ejemplo n.º 23
0
    def Generate_Random_Trace_Function(self):
        if (self.turning_points is None) or (self.turning_points == []):

            number_of_turning_points = int(
                np.floor(self.length_of_experiment *
                         self.track_relative_complexity))

            y = 2.0 * (np.random.random(number_of_turning_points) - 0.5)
            y = y * 0.5 * self.HalfLength

            if number_of_turning_points == 0:
                y = np.append(y, 0.0)
                y = np.append(y, 0.0)
            elif number_of_turning_points == 1:
                if self.start_random_target_position_at is not None:
                    y[0] = self.start_random_target_position_at
                elif self.end_random_target_position_at is not None:
                    y[0] = self.end_random_target_position_at
                else:
                    pass
                y = np.append(y, y[0])
            else:
                if self.start_random_target_position_at is not None:
                    y[0] = self.start_random_target_position_at
                if self.end_random_target_position_at is not None:
                    y[-1] = self.end_random_target_position_at

        else:
            number_of_turning_points = len(self.turning_points)
            if number_of_turning_points == 0:
                raise ValueError('You should not be here!')
            elif number_of_turning_points == 1:
                y = np.array([self.turning_points[0], self.turning_points[0]])
            else:
                y = np.array(self.turning_points)

        number_of_timesteps = np.ceil(self.length_of_experiment /
                                      self.dt_simulation)
        self.t_max_pre = number_of_timesteps * self.dt_simulation

        random_samples = number_of_turning_points - 2 if number_of_turning_points - 2 >= 0 else 0

        # t_init = linspace(0, self.t_max_pre, num=self.track_relative_complexity, endpoint=True)
        if self.turning_points_period == 'random':
            t_init = np.sort(
                np.random.uniform(self.dt_simulation,
                                  self.t_max_pre - self.dt_simulation,
                                  random_samples))
            t_init = np.insert(t_init, 0, 0.0)
            t_init = np.append(t_init, self.t_max_pre)
        elif self.turning_points_period == 'regular':
            t_init = np.linspace(0,
                                 self.t_max_pre,
                                 num=random_samples + 2,
                                 endpoint=True)
        else:
            raise NotImplementedError(
                'There is no mode corresponding to this value of turning_points_period variable'
            )

        # Try algorithm setting derivative to 0 a each point
        if self.interpolation_type == '0-derivative-smooth':
            yder = [[y[i], 0] for i in range(len(y))]
            random_track_f = BPoly.from_derivatives(t_init, yder)
        elif self.interpolation_type == 'linear':
            random_track_f = interp1d(t_init, y, kind='linear')
        elif self.interpolation_type == 'previous':
            random_track_f = interp1d(t_init, y, kind='previous')
        else:
            raise ValueError('Unknown interpolation type.')

        # Truncate the target position to be not grater than 80% of track length
        def random_track_f_truncated(time):

            target_position = random_track_f(time)
            if target_position > 0.8 * self.HalfLength:
                target_position = 0.8 * self.HalfLength
            elif target_position < -0.8 * self.HalfLength:
                target_position = -0.8 * self.HalfLength

            return target_position

        self.random_track_f = random_track_f_truncated

        self.new_track_generated = True
Ejemplo n.º 24
0
    def __linescan_poly(self, parameterDict, v_max, a_max):
        """ Generate a Bernstein piecewise polynomial for a smooth one-line scanning curve,
        from the acquisition parameter settings, using piecewise spline interpolation """
        sequence_time = parameterDict['sequence_time'] * 1e6  # s --> µs
        l_scan = self.axis_length[0]  # µm
        c_scan = self.axis_centerpos[0]  # µm
        v_scan = self.axis_step_size[0] / sequence_time  # µm/µs
        dt_fix = 1e-2  # time between two fix points where the acceleration changes (infinite jerk) - µs

        # positions at fixed points
        p1 = c_scan
        p2 = p2p = p1 + l_scan / 2
        t_deacc = (v_scan + v_max) / a_max
        d_deacc = v_scan * t_deacc + 0.5 * (-a_max) * t_deacc**2
        p3 = p3p = p2 + d_deacc
        p4 = p4p = c_scan - (p3 - c_scan)
        p5 = p5p = p1 - l_scan / 2
        p6 = p1
        pos = [p1, p2, p2p, p3, p3p, p4, p4p, p5, p5p, p6]

        # time at fixed points
        t1 = 0
        t_scanline = l_scan / v_scan
        t2 = t1 + t_scanline / 2
        t2p = t2 + dt_fix
        t3 = t2 + t_deacc
        t3p = t3 + dt_fix
        t4 = t3 + abs(p4 - p3) / v_max
        t4p = t4 + dt_fix
        t_acc = t_deacc
        t5 = t4 + t_acc
        t5p = t5 + dt_fix
        t6 = t5 + t_scanline / 2
        time = [t1, t2, t2p, t3, t3p, t4, t4p, t5, t5p, t6]

        # velocity at fixed points
        v1 = v_scan
        v2 = v2p = v_scan
        v3 = v3p = v4 = v4p = -v_max
        v5 = v5p = v6 = v_scan
        vel = [v1, v2, v2p, v3, v3p, v4, v4p, v5, v5p, v6]

        # acceleration at fixed points
        a1 = a2 = 0
        a2p = a3 = -a_max
        a3p = a4 = 0
        a4p = a5 = a_max
        a5p = a6 = 0
        acc = [a1, a2, a2p, a3, a3p, a4, a4p, a5, a5p, a6]
        # if p3 is already past the center of the scan it means that the max_velocity was never reached
        # in this case, remove two fixed points, and change the values to the curr. vel and time in the
        # middle of the flyback
        if p3 <= c_scan:
            t_mid = np.roots([-a_max / 2, v_scan, p2 - c_scan])[0]
            v_mid = -a_max * t_mid + v_scan
            del pos[5:7]
            del vel[5:7]
            del acc[5:7]
            del time[5:7]
            pos[3:5] = [c_scan, c_scan]
            vel[3:5] = [v_mid, v_mid]
            acc[3:5] = [-a_max, a_max]
            time[3] = time[2] + t_mid
            time[4] = time[3] + dt_fix
            time[5] = time[3] + t_mid
            time[6] = time[5] + dt_fix
            time[7] = time[5] + t_scanline / 2
        # generate Bernstein polynomial with piecewise spline interpolation with the fixed points
        # give positions, velocity, acceleration, and time of fixed points
        yder = np.array([pos, vel, acc]).T.tolist()

        bpoly = BPoly.from_derivatives(time, yder)  # bpoly time unit: µs
        # return polynomial, that can be evaluated at any timepoints you want
        # return fixed points position and time
        return bpoly, time, pos
Ejemplo n.º 25
0
    def __final_positioning(self, initpos, v_max, a_max):
        """ Generate a polynomial for a smooth final positioning scanning curve, from the acquisition parameter settings """
        v_max = -np.sign(initpos) * v_max
        a_max = -np.sign(initpos) * a_max
        dt_fix = 1e-2  # time between two fix points where the acceleration changes (infinite jerk)  # µs

        # positions at fixed points
        p1 = p1p = initpos
        t_deacc = (v_max) / a_max
        d_deacc = 0.5 * a_max * t_deacc**2
        p2 = p2p = initpos + d_deacc
        p3 = p3p = -d_deacc
        p4 = 0
        pos = [p1, p1p, p2, p2p, p3, p3p, p4]

        # time at fixed points
        t1 = 0
        t1p = dt_fix
        t2 = t_deacc
        t2p = t2 + dt_fix
        t3 = t2 + abs(abs(p3 - p2) / v_max)
        t3p = t3 + dt_fix
        t4 = t3 + t_deacc
        time = [t1, t1p, t2, t2p, t3, t3p, t4]

        # velocity at fixed points
        v1 = v1p = 0
        v2 = v2p = v3 = v3p = v_max
        v4 = 0
        vel = [v1, v1p, v2, v2p, v3, v3p, v4]

        # acceleration at fixed points
        a1 = 0
        a1p = a2 = a_max
        a2p = a3 = 0
        a3p = a4 = -a_max
        acc = [a1, a1p, a2, a2p, a3, a3p, a4]

        # if p2 is already past the center of the scan it means that the max_velocity was never reached
        # in this case, remove two fixed points, and change the values to the curr. vel and time in the
        # middle of the flyback
        if abs(p2 - p1) >= abs(initpos / 2):
            t_mid = np.sqrt(abs(initpos / a_max))
            v_mid = a_max * t_mid
            del pos[4:6]
            del vel[4:6]
            del acc[4:6]
            del time[4:6]
            pos[2:4] = [initpos / 2, initpos / 2]
            vel[2:4] = [v_mid, v_mid]
            acc[2:4] = [a_max, -a_max]
            time[2] = t_mid
            time[3] = t_mid + dt_fix
            time[4] = 2 * t_mid

        # generate Bernstein polynomial with piecewise spline interpolation with the fixed points
        # give positions, velocity, acceleration, and time of fixed points
        yder = np.array([pos, vel, acc]).T.tolist()
        bpoly = BPoly.from_derivatives(time, yder)  # bpoly time unit: µs

        # get number of evaluation points
        n_eval = int(time[-1] / self.__timestep)
        # get evaluation times for one line
        t_eval = np.linspace(0, time[-1], n_eval)
        # evaluate polynomial
        poly_eval = bpoly(t_eval)
        # return evaluated polynomial at the timestep I want
        return poly_eval
Ejemplo n.º 26
0
qsm1 = qc0 - 1
print('Fourier Sine transform of x^2 / [(1+x^2) x^1], with tilt q =', qsm1)
Tsm1 = FourierSine(x, q=qsm1, N=N, lowring=False)
Tsm1.check(Fsm1)
ysm1, Sm1 = Tsm1(Fsm1, extrap=extrap)
assert all(y == ysm1)

qcm2 = qsm1 - 1
print('Fourier Cosine transform of x^2 / [(1+x^2) x^2], with tilt q =', qcm2)
Tcm2 = FourierCosine(x, q=qcm2, N=N, lowring=False)
Tcm2.check(Fcm2)
ycm2, Cm2 = Tcm2(Fcm2, extrap=extrap)
assert all(y == ycm2)
Cm2_cspline = CubicSpline(* symmetrize(ycm2, Cm2, parity=0))
Cm2_hermite = BPoly.from_derivatives(* symmetrize(ycm2, Cm2, dGdy=-Sm1, parity=0))
Cm2_hermite5 = BPoly.from_derivatives(* symmetrize(ycm2, Cm2, dGdy=-Sm1, d2Gdy2=-C0, parity=0))

qsm3 = qcm2 - 1
print('Fourier Sine transform of x^2 / [(1+x^2) x^3], with tilt q =', qsm3)
Tsm3 = FourierSine(x, q=qsm3, N=N, lowring=False)
Tsm3.check(Fsm3)
ysm3, Sm3 = Tsm3(Fsm3, extrap=extrap)
assert all(y == ysm3)
Sm3_cspline = CubicSpline(* symmetrize(ysm3, Sm3, parity=1))
Sm3_hermite = BPoly.from_derivatives(* symmetrize(ysm3, Sm3, dGdy=Cm2, parity=1))
Sm3_hermite5 = BPoly.from_derivatives(* symmetrize(ysm3, Sm3, dGdy=Cm2, d2Gdy2=-Sm1, parity=1))

qcm4 = qsm3 - 1
print('Fourier Cosine transform of x^2 / [(1+x^2) x^4], with tilt q =', qcm4)
Tcm4 = FourierCosine(x, q=qcm4, N=N, lowring=False)
Ejemplo n.º 27
0
def to_usgscsm(driver):
    """
    Formatter to create USGSCSM meta data from a driver.

    Parameters
    ----------
    driver : Driver
        Concrete driver for the image that meta data is being generated for.

    Returns
    -------
    string
        The USGSCSM compatible meta data as a JSON encoded string.
    """
    isd_data = {}

    # general information
    isd_data['image_lines'] = driver.image_lines
    isd_data['image_samples'] = driver.image_samples
    isd_data['name_platform'] = driver.platform_name
    isd_data['name_sensor'] = driver.sensor_name

    # shared exterior orientation
    body_radii = driver.target_body_radii
    isd_data['radii'] = {
        'semimajor': body_radii[0],
        'semiminor': body_radii[2],
        'unit': 'km'
    }
    positions, velocities, position_times = driver.sensor_position
    isd_data['sensor_position'] = {
        'positions': positions,
        'velocities': velocities,
        'unit': 'm'
    }

    sun_positions, sun_velocities, _ = driver.sun_position
    isd_data['sun_position'] = {
        'positions': sun_positions,
        'velocities': sun_velocities,
        'unit': 'm'
    }

    # shared isd keywords for Framer and Linescanner
    if isinstance(driver, LineScanner) or isinstance(driver, Framer):
        # exterior orientation for just Framer and LineScanner
        frame_chain = driver.frame_chain
        sensor_to_target = frame_chain.compute_rotation(
            driver.sensor_frame_id, driver.target_frame_id)
        quaternions = sensor_to_target.quats
        isd_data['sensor_orientation'] = {'quaternions': quaternions}

        # interior orientation
        isd_data['detector_sample_summing'] = driver.sample_summing
        isd_data['detector_line_summing'] = driver.line_summing
        isd_data['focal_length_model'] = {'focal_length': driver.focal_length}
        isd_data['detector_center'] = {
            'line': driver.detector_center_line,
            'sample': driver.detector_center_sample
        }
        isd_data['starting_detector_line'] = driver.detector_start_line
        isd_data['starting_detector_sample'] = driver.detector_start_sample
        isd_data['focal2pixel_lines'] = driver.focal2pixel_lines
        isd_data['focal2pixel_samples'] = driver.focal2pixel_samples
        isd_data['optical_distortion'] = driver.usgscsm_distortion_model

        # general information
        isd_data['reference_height'] = {
            "maxheight": 1000,
            "minheight": -1000,
            "unit": "m"
        }

    # shared interpolation needed for LineScanner and Radar
    if isinstance(driver, LineScanner) or isinstance(driver, Radar):
        interp_times = np.linspace(position_times[0], position_times[-1],
                                   int(driver.image_lines / 64))

        if velocities is not None:
            positions = np.asarray(positions)
            velocities = np.asarray(velocities)
            pos_x, pos_y, pos_z = np.asarray(positions).T
            vel_x, vel_y, vel_z = np.asarray(velocities).T
            x_interp = BPoly.from_derivatives(position_times,
                                              np.vstack((pos_x, vel_x)).T,
                                              extrapolate=True)
            y_interp = BPoly.from_derivatives(position_times,
                                              np.vstack((pos_y, vel_y)).T,
                                              extrapolate=True)
            z_interp = BPoly.from_derivatives(position_times,
                                              np.vstack((pos_z, vel_z)).T,
                                              extrapolate=True)
            interp_pos = np.vstack(
                (x_interp(interp_times), y_interp(interp_times),
                 z_interp(interp_times))).T
            interp_vel = np.vstack((x_interp(interp_times,
                                             nu=1), y_interp(interp_times,
                                                             nu=1),
                                    z_interp(interp_times, nu=1))).T
        else:
            position_interp = interp1d(position_times, positions)
            interp_pos = position_interp(interp_times)
            interp_vel = None
        isd_data['sensor_position'] = {
            'positions': interp_pos,
            'velocities': interp_vel,
            'unit': 'm'
        }
        if len(interp_times) > 1:
            isd_data['dt_ephemeris'] = (interp_times[-1] - interp_times[0]) / (
                len(interp_times) - 1)
        else:
            isd_data['dt_ephemeris'] = 0

        isd_data['t0_ephemeris'] = interp_times[0]

    # line scan sensor model specifics
    if isinstance(driver, LineScanner):
        isd_data['name_model'] = 'USGS_ASTRO_LINE_SCANNER_SENSOR_MODEL'
        isd_data['interpolation_method'] = 'lagrange'

        start_lines, start_times, scan_rates = driver.line_scan_rate
        center_time = driver.center_ephemeris_time
        isd_data['line_scan_rate'] = [[
            line, time, rate
        ] for line, time, rate in zip(start_lines, start_times, scan_rates)]
        isd_data['starting_ephemeris_time'] = driver.ephemeris_start_time
        isd_data['center_ephemeris_time'] = center_time

        rotation_interp = sensor_to_target.reinterpolate(interp_times)
        isd_data['sensor_orientation'] = {'quaternions': rotation_interp.quats}

        isd_data['t0_ephemeris'] = interp_times[0] - center_time

        isd_data['t0_quaternion'] = isd_data['t0_ephemeris']
        isd_data['dt_quaternion'] = isd_data['dt_ephemeris']

    # frame sensor model specifics
    if isinstance(driver, Framer):
        isd_data['name_model'] = 'USGS_ASTRO_FRAME_SENSOR_MODEL'
        isd_data['center_ephemeris_time'] = driver.center_ephemeris_time

    # radar sensor model specifics
    if isinstance(driver, Radar):
        isd_data['name_model'] = 'USGS_ASTRO_SAR_SENSOR_MODEL'
        isd_data['starting_ephemeris_time'] = driver.ephemeris_start_time
        isd_data['ending_ephemeris_time'] = driver.ephemeris_stop_time
        isd_data['wavelength'] = driver.wavelength
        isd_data['line_exposure_duration'] = driver.line_exposure_duration
        isd_data['scaled_pixel_width'] = driver.scaled_pixel_width
        isd_data['range_conversion_times'] = driver.range_conversion_times
        isd_data[
            'range_conversion_coefficients'] = driver.range_conversion_coefficients

    # check that there is a valid sensor model name
    if 'name_model' not in isd_data:
        raise Exception('No CSM sensor model name found!')

    # Convert to JSON object
    return isd_data
Ejemplo n.º 28
0
    elif order == 1:
        return -3 * exp(-3 * x + 3)


def g(x, order):
    if order == 0:
        return -1 / (x**6)
    elif order == 1:
        return 6 / (x**7)


pts = [.8, 1 - .05, 1.25]
img = [(f(pts[0], 0), f(pts[0], 1)), (-.5 - .1, 0), (g(pts[2],
                                                       0), g(pts[2], 1))]

p = BPoly.from_derivatives(pts, img)

xx = np.linspace(0.5, 2, 100)
yy = np.zeros_like(xx)
yyf = list(map(lambda x: f(x, 0), xx))
yyg = list(map(lambda x: g(x, 0), xx))

for i, x in enumerate(xx):
    if (x < pts[0]):
        yy[i] = f(x, 0)
    elif (x > pts[0]) and (x < pts[2]):
        yy[i] = p(x)
    elif (x > pts[2]):
        yy[i] = g(x, 0)

plt.plot(xx, yy, xx, yyf, xx, yyg)
Ejemplo n.º 29
0
    def _poisson(self, potonly):
        """ Solves Poisson equation """
        # y = [phi, u_j, U, K_j], where u = -M(<r)/G

        # Initialize
        self.r = numpy.array([0])
        self._y = numpy.r_[self.phi0, numpy.zeros(2 * self.nmbin + 1)]

        if (not potonly): self._y = numpy.r_[self._y, numpy.zeros(self.nmbin)]

        # Ode solving using Runge-Kutta integator of order 4(5) 'dopri5'
        # (Hairor, Norsett& Wanner 1993)
        max_step = self.maxr if (potonly) else self.max_step
        sol = ode(self._odes)
        sol.set_integrator('dopri5',
                           nsteps=1e6,
                           max_step=max_step,
                           atol=self.ode_atol,
                           rtol=self.ode_rtol)
        sol.set_solout(self._logcheck)
        sol.set_f_params(potonly)
        sol.set_initial_value(self._y, 0)
        sol.integrate(self.maxr)

        # Extrapolate to rt
        derivs = self._odes(self.r[-1], self._y[:, -1], self.potonly)
        self.rt = self.r[-1] - self._y[0][-1] / derivs[0]
        dr = self.rt - self.r[-1]
        ylast = [0]

        for i in range(len(derivs) - 1):
            ylast.append(self._y[1 + i, -1] + derivs[i + 1] * dr)
        self._y = numpy.c_[self._y, ylast]

        # Set the converged flag to True if successful
        if (self.rt < self.maxr) & (sol.successful()):
            self.converged = True
        else:
            self.converged = False

        # Fill arrays needed if potonly=True
        self.r = numpy.r_[self.r, self.rt]
        self.rhat = self.r * 1.0

        self.phihat = numpy.r_[self._y[0, :]]
        self.phi = self.phihat * 1.0
        self.nbound = len(self.phihat)

        self._Mjtot = -sol.y[1:1 + self.nmbin] / self.G

        self.M = sum(self._Mjtot)
        self.Mpe = -sol.y[1 + self.nmbin] / self.G
        self.fpe = self.Mpe / self.M

        # Save the derivative of the potential for the potential interpolater
        dphidr = numpy.sum(self._y[1:1 + self.nmbin, 1:],
                           axis=0) / self.r[1:]**2
        self.dphidrhat1 = numpy.r_[0, dphidr]

        self.A = 1 / (2 * pi * self.s2j)**1.5 / self.rhoint0

        self.mc = -self._y[1, :] / self.G

        # Compute radii to be able to scale in case potonly=True
        self.U = self._y[2 + self.nmbin,
                         -1] - 0.5 * self.G * self.M**2 / self.rt

        # Get half-mass radius from cubic interpolation, because the half-mass
        # radius can be used as a scale length, this needs to be done
        # accurately, a linear interpolation does not suffice. Because the
        # density array is not known yet at this point, we need a temporary
        # evaluation of density in the vicinity of r_h
        ih = numpy.searchsorted(self.mc, 0.5 * self.mc[-1]) - 1
        rhotmp = numpy.zeros(2)
        for j in range(self.nmbin):
            phi = self.phihat[ih:ih + 2] / self.s2j[j]
            rhotmp += self.alpha[j] * self._rhohat(phi, self.r[ih:ih + 2], j)
        drdm = 1. / (4 * pi * self.r[ih:ih + 2]**2 * rhotmp)
        rmc_and_derivs = numpy.vstack([[self.r[ih:ih + 2]], [drdm]]).T

        self.rh = BPoly.from_derivatives(self.mc[ih:ih + 2],
                                         rmc_and_derivs)(0.5 * self.mc[-1])
        self.rv = -0.5 * self.G * self.M**2 / self.U

        # Solve (mass-less) part outside rt
        if (self.nrt > 1):
            # Continue to solve until rlast
            sol.set_solout(self._logcheck2)
            sol.set_f_params(potonly)
            sol.set_initial_value(self._y[:, -1], self.rt)
            sol.integrate(self.nrt * self.rt)

        self.rhat = self.r * 1.0
        self.phihat = numpy.r_[self.phihat, self._y[0, self.nbound:]]
        self.mc = numpy.r_[self.mc,
                           numpy.zeros(len(self._y[0, self.nbound:])) +
                           self.mc[-1]]
        self.phi = self.phihat * 1.0

        dphidr = numpy.sum(self._y[1:1 + self.nmbin, self.nbound:],
                           axis=0) / self.r[self.nbound:]**2
        self.dphidrhat1 = numpy.r_[self.dphidrhat1, dphidr]

        # Additional stuff
        if (not potonly):
            # Calculate kinetic energy
            self.K = numpy.sum(sol.y[4:5])

            # Calculate density and velocity dispersion components
            if (not self.multi):
                self.rhohat = self._rhohat(self.phihat, self.r, 0)
                self.rhohatpe = self._rhohatpe(self.phihat, self.r, 0)
                self.rho = self.rhohat * 1.0
                self.rhope = self.rhohatpe * 1.0

                self.v2 = self._get_v2(self.phihat, self.rhohat, 0)
Ejemplo n.º 30
0
    def _poisson(self, potonly):
        """ Solves Poisson equation """
        # y = [phi, u_j, U, K_j], where u = -M(<r)/G

        # Initialize
        self.r = numpy.array([0])
        self._y = numpy.r_[self.phi0, numpy.zeros(self.nmbin+1)]
        if (not potonly): self._y = numpy.r_[self._y,numpy.zeros(2*self.nmbin)]
        self._y = numpy.r_[self._y, 0]

        # Ode solving using Runge-Kutta integator of order 4(5) 'dopri5'
        # (Hairor, Norsett& Wanner 1993)
        max_step = self.maxr if (potonly) else self.max_step
        sol = ode(self._odes)
        sol.set_integrator('dopri5',nsteps=1e6,max_step=max_step,
                           atol=self.ode_atol,rtol=self.ode_rtol)
        sol.set_solout(self._logcheck)
        sol.set_f_params(potonly)
        sol.set_initial_value(self._y,0)
        sol.integrate(self.maxr)

        # Extrapolate to r_t:
        # phi(r) =~ a(r_t -r)
        # a = GM/r_t^2
        GM = -self.G*sum(sol.y[1:1+self.nmbin])
        p = 2*sol.y[0]*self.r[-1]/GM

        if (p<=0.5):
            rtfac = (1 - sqrt(1-2*p))/p
            self.rt = rtfac*self.r[-1] if (rtfac > 1) else 1.0000001*self.r[-1]
        else:
            self.rt = 1.000001*self.r[-1]

        # Set the converged flag to True if successful
        if (self.rt < self.maxr)&(sol.successful()):
            self.converged=True
        else:
            self.converged=False

        # Calculate the phase space volume occupied by the model
        dvol = (4./3*pi)**2*(self.rt**3-self.r[-1]**3)*0.5
        dvol *= (2*self._y[0,-1])**1.5
        self.volume = self._y[-1][-1]+dvol

        # Fill arrays needed if potonly=True
        self.r = numpy.r_[self.r, self.rt]
        self.rhat = self.r*1.0

        self.phihat = numpy.r_[self._y[0,:], 0]
        self.phi = self.phihat*1.0

        self._Mjtot = -sol.y[1:1+self.nmbin]/self.G

        self.M = sum(self._Mjtot)

        # Save the derivative of the potential for the potential interpolater
        dphidr = numpy.sum(self._y[1:1+self.nmbin,1:],axis=0)/self.r[1:-1]**2
        self.dphidrhat1 = numpy.r_[0, dphidr, -self.G*self.M/self.rt**2]

        self.A = self.alpha/(2*pi*self.s2j)**1.5/self.rhoint0

        if (not self.multi):
            self.mc = -numpy.r_[self._y[1,:], self._y[1,-1]]/self.G

        if (self.multi):
            self.mc = sum(-self._y[1:1+self.nmbin,:]/self.G)
            self.mc = numpy.r_[self.mc, self.mc[-1]]

        # Compute radii to be able to scale in case potonly=True
        self.U = self._y[1+self.nmbin,-1]  - 0.5*self.G*self.M**2/self.rt

        # Get half-mass radius from cubic interpolation, because the half-mass
        # radius can be used as a scale length, this needs to be done
        # accurately, a linear interpolation does not suffice. Because the
        # density array is not known yet at this point, we need a temporary
        # evaluation of density in the vicinity of r_h
        ih = numpy.searchsorted(self.mc, 0.5*self.mc[-1])-1
        rhotmp=numpy.zeros(2)
        for j in range(self.nmbin):
            phi = self.phihat[ih:ih+2]/self.s2j[j]
            rhotmp += self.alpha[j]*self._rhohat(phi, self.r[ih:ih+2], j)
        drdm = 1./(4*pi*self.r[ih:ih+2]**2*rhotmp)
        rmc_and_derivs = numpy.vstack([[self.r[ih:ih+2]],[drdm]]).T

        self.rh = BPoly.from_derivatives(self.mc[ih:ih+2], rmc_and_derivs)(0.5*self.mc[-1])
        self.rv = -0.5*self.G*self.M**2/self.U

        # Additional stuff
        if (not potonly):
            # Calculate kinetic energy (total, radial, tangential)
            self.K = numpy.sum(sol.y[2+self.nmbin:2+2*self.nmbin])
            self.Kr = numpy.sum(sol.y[2+2*self.nmbin:2+3*self.nmbin])
            self.Kt = self.K - self.Kr

            # Calculate density and velocity dispersion components
            if (not self.multi):
                self.rhohat = self._rhohat(self.phihat, self.r, 0)
                self.rho = self.rhohat*1.0
                self.v2, self.v2r, self.v2t = \
                        self._get_v2(self.phihat, self.r, self.rhohat, 0)

            # For multi-mass models, calculate quantities for each mass bin
            if (self.multi):
                for j in range(self.nmbin):
                    phi = self.phihat/self.s2j[j]
                    rhohatj = self._rhohat(phi, self.r, j)
                    v2j, v2rj, v2tj = self._get_v2(phi, self.r, rhohatj, j)
                    v2j, v2rj, v2tj = (q*self.s2j[j] for q in [v2j,v2rj,v2tj])
                    betaj = self._beta(self.r, v2rj, v2tj)

                    kj = self._y[2+self.nmbin+j,:]
                    krj = self._y[2+2*self.nmbin+j,:]
                    ktj = kj - krj

                    mcj = -numpy.r_[self._y[1+j,:], self._y[1+j,-1]]/self.G
                    rhj = numpy.interp(0.5*mcj[-1], mcj, self.r)

                    if (j==0):
                        self.rhohatj = rhohatj
                        self.rhohat = self.alpha[0] * self.rhohatj
                        self.v2j, self.v2rj, self.v2tj = v2j, v2rj, v2tj
                        self.v2 = self._Mjtot[j]*v2j/self.M
                        self.v2r = self._Mjtot[j]*v2rj/self.M
                        self.v2t = self._Mjtot[j]*v2tj/self.M

                        self.betaj = betaj
                        self.kj, self.krj, self.ktj = kj, krj, ktj
                        self.Kj, self.Krj = kj[-1], krj[-1]
                        self.ktj = self.kj - self.krj
                        self.Ktj = self.Kj - self.Krj
                        self.rhj, self.mcj = rhj, mcj
                    else:
                        self.rhohatj = numpy.vstack((self.rhohatj, rhohatj))
                        self.rhohat += self.alpha[j]*rhohatj

                        self.v2j = numpy.vstack((self.v2j, v2j))
                        self.v2rj = numpy.vstack((self.v2rj, v2rj))
                        self.v2tj = numpy.vstack((self.v2tj, v2tj))
                        self.v2 += self._Mjtot[j]*v2j/self.M
                        self.v2r += self._Mjtot[j]*v2rj/self.M
                        self.v2t += self._Mjtot[j]*v2tj/self.M

                        self.betaj = numpy.vstack((self.betaj, betaj))
                        self.kj = numpy.vstack((self.kj, kj))
                        self.krj = numpy.vstack((self.krj, krj))
                        self.ktj = numpy.vstack((self.ktj, ktj))
                        self.Kj = numpy.r_[self.Kj, kj[-1]]
                        self.Krj = numpy.r_[self.Krj, krj[-1]]
                        self.Ktj = numpy.r_[self.Ktj, ktj[-1]]
                        self.rhj = numpy.r_[self.rhj,rhj]
                        self.mcj = numpy.vstack((self.mcj, mcj))

                self.rho = self.rhohat*1.0
                self.rhoj = self.rhohatj*1.0

            # Calculate anisotropy profile (equation 32 of GZ15)
            self.beta = self._beta(self.r, self.v2r, self.v2t)
Ejemplo n.º 31
0
 def __init__(self, x, y, yp=None, method='parabola', monotone=False):
     if yp is None:
         yp = slopes(x, y, method, monotone=monotone)
     yyp = [z for z in zip(y, yp)]
     bpoly = BPoly.from_derivatives(x, yyp)
     super(StinemanInterp2, self).__init__(bpoly.c, x)
Ejemplo n.º 32
0
 def __init__(self, x, y, yp=None, method='Catmull-Rom'):
     if yp is None:
         yp = slopes(x, y, method, monotone=False)
     yyp = [z for z in zip(y, yp)]
     bpoly = BPoly.from_derivatives(x, yyp, orders=3)
     super(CubicHermiteSpline, self).__init__(bpoly.c, x)
Ejemplo n.º 33
0
 def test_yi_trailing_dims(self):
     m, k = 7, 5
     xi = np.sort(np.random.random(m+1))
     yi = np.random.random((m+1, k, 6, 7, 8))
     pp = BPoly.from_derivatives(xi, yi)
     assert_equal(pp.c.shape, (2*k, m, 6, 7, 8))
Ejemplo n.º 34
0
 def __init__(self, x, y, yp=None, method='secant'):
     if yp is None:
         yp = slopes(x, y, method=method, monotone=True)
     yyp = [z for z in zip(y, yp)]
     bpoly = BPoly.from_derivatives(x, yyp, orders=3)
     super(Pchip, self).__init__(bpoly.c, x)
Ejemplo n.º 35
0
 def test_yi_trailing_dims(self):
     m, k = 7, 5
     xi = np.sort(np.random.random(m + 1))
     yi = np.random.random((m + 1, k, 6, 7, 8))
     pp = BPoly.from_derivatives(xi, yi)
     assert_equal(pp.c.shape, (2 * k, m, 6, 7, 8))
Ejemplo n.º 36
0
def find_resonance(file_path, file_alpha):
    """
        The function takes Acoustic Reflex mesurement pivot table as a workbook from the\
        `write_xls` function and does basic data analysis in separate document:
        
        writes down the pivot for each subject, whenever the resonant frequency has been\
        changed by 5Hz during the experiment.
    
    """
    xl_file = pd.ExcelFile(file_path)
    wb = xlsxwriter.Workbook(file_alpha)
    sheets = xl_file.sheet_names
    arr_sheets = list(dict.fromkeys([sheets[i] for i in range(len(sheets))]))
    ws1 = wb.add_worksheet('Total_alpha_max')
    ws2 = wb.add_worksheet('Total_R_min')
    ws3 = wb.add_worksheet('Total_Y0')
    # Dummy counter
    total_index_row = 1

    for arr_sheet in tqdm(arr_sheets):
        # Retrieving all participant names from worksheets
        ws1.write(total_index_row, 0, arr_sheet)
        ws2.write(total_index_row, 0, arr_sheet)
        ws3.write(total_index_row, 0, arr_sheet)
        merge_format = wb.add_format({'align': 'center'})
        df = pd.read_excel(file_path, sheet_name=arr_sheet, index_col=0)
        # Writing down y,alpha,r
        df_y = df.iloc[:, [1, 4, 7, 10, 13]]
        df_alpha = df.iloc[:, [0, 3, 6, 9, 12]]
        df_r = df.iloc[:, [2, 5, 8, 11, 14]]
        alpha_names = df_alpha.keys()

        max_alpha_values = []
        max_alpha_frequencis = []
        roots_y = []
        min_r_values = []
        min_r_frequencis = []
        i = 1

        # Perform analysis for each frequency in index
        for a_index in range(len(alpha_names)):

            x_coords = np.array(
                df_alpha[df_alpha.columns[a_index]].keys().tolist())
            y_coords_alpha = np.array(
                df_alpha[df_alpha.columns[a_index]].tolist())
            y_coords_y = np.array(df_y[df_y.columns[a_index]].tolist())
            y_coords_r = np.array(df_r[df_r.columns[a_index]].tolist())

            # Writing down the approximating function as cubic polynome
            cubic_alpha = CubicSpline(x_coords, y_coords_alpha)
            cubic_r = CubicSpline(x_coords, y_coords_r)

            xnew = np.arange(330, 570, 0.1)
            ynew_alpha = cubic_alpha(xnew)

            ynew_r = cubic_r(xnew)
            bpoly = BPoly.from_derivatives(x_coords,
                                           y_coords_y[:, np.newaxis],
                                           extrapolate=None)

            max_index_alpha = np.argmax(ynew_alpha)
            max_value_alpha = ynew_alpha[max_index_alpha]
            max_x_alpha = xnew[max_index_alpha]

            min_index_r = np.argmin(ynew_r)
            min_value_r = ynew_r[min_index_r]
            min_x_r = xnew[min_index_r]

            try:
                root_y = brentq(bpoly, 350, max_x_alpha + 50)
            except ValueError:
                root_y = '-'
            max_alpha_values.append('%0.2f' % max_value_alpha)
            max_alpha_frequencis.append('%0.1f' % max_x_alpha)
            roots_y.append(root_y)
            min_r_values.append('%0.2f' % min_value_r)
            min_r_frequencis.append('%0.1f' % min_x_r)

            ws1.write(total_index_row, i, max_x_alpha)
            ws1.write(0, i, alpha_names[i - 1][6:] + 'dB')
            ws2.write(total_index_row, i, min_x_r)
            ws2.write(0, i, alpha_names[i - 1][6:] + 'dB')
            ws3.write(total_index_row, i, root_y)
            ws3.write(0, i, alpha_names[i - 1][6:] + 'dB')
            i += 1
            max_alpha_values.append('%0.2f' % max_value_alpha)
        # Defining symbolic notation for each participant
        try:
            for f_y in roots_y:
                if float(f_y) >= float(roots_y[0]) + 5:
                    report = 'Частота повысилась'
                    symbol = u'\u2191'
                    break
                elif float(f_y) + 5 <= float(roots_y[0]):
                    report = 'Частота понизилась'
                    symbol = u'\u2193'
                    break
                else:
                    report = 'Частота не изменилась'
                    symbol = u'\u2192'
                    continue
                return f_y, report, symbol
        except ValueError:
            report = '-'
            symbol = '-'
        ws3.write(total_index_row, 7, symbol)
        ws3.write(total_index_row, 6, alpha_names[roots_y.index(f_y)][6:])
        for f_alpha in max_alpha_frequencis:
            if float(f_alpha) > float(max_alpha_frequencis[0]) + 5:
                report = 'Частота повысилась'
                symbol = u'\u2191'
                break
            elif float(f_alpha) + 5 < float(max_alpha_frequencis[0]):
                report = 'Частота понизилась'
                symbol = u'\u2193'
                break
            else:
                report = 'Частота не изменилась'
                symbol = u'\u2192'
                continue
            return f_alpha, report, symbol
        ws1.write(total_index_row, 7, symbol)
        ws1.write(total_index_row, 6,
                  alpha_names[max_alpha_frequencis.index(f_alpha)][6:])
        for f_r in min_r_frequencis:
            if float(f_r) > float(min_r_frequencis[0]) + 5:
                report = 'Частота повысилась'
                symbol = u'\u2191'
                break
            elif float(f_r) + 5 < float(min_r_frequencis[0]):
                report = 'Частота понизилась'
                symbol = u'\u2193'
                break
            else:
                report = 'Частота не изменилась'
                symbol = u'\u2192'
                continue
            return f_r, report, symbol
        ws2.write(total_index_row, 7, symbol)
        ws2.write(total_index_row, 6,
                  alpha_names[min_r_frequencis.index(f_r)][6:])
        total_index_row += 1
    wb.close()
Ejemplo n.º 37
0
 def __init__(self, x, y, yp=None, method='parabola', monotone=False):
     if yp is None:
         yp = slopes(x, y, method, monotone=monotone)
     yyp = [z for z in zip(y, yp)]
     bpoly = BPoly.from_derivatives(x, yyp)
     super(StinemanInterp2, self).__init__(bpoly.c, x)
Ejemplo n.º 38
0
 def __init__(self, x, y, yp=None, method='Catmull-Rom'):
     if yp is None:
         yp = slopes(x, y, method, monotone=False)
     yyp = [z for z in zip(y, yp)]
     bpoly = BPoly.from_derivatives(x, yyp, orders=3)
     super(CubicHermiteSpline, self).__init__(bpoly.c, x)