def getCurvatureForPoints(arcLengthList, fx_s, fy_s, smoothing=None):
	x, x_, x__, y, y_, y__ = getFirstAndSecondDerivForTPoints(arcLengthList, fx_s, fy_s)
	curvature = abs(x_* y__ - y_* x__) / np.power(x_** 2 + y_** 2, 3 / 2)
	fCurvature = UnivariateSpline(arcLengthList, curvature, s=smoothing)
	dxcurvature = fCurvature.derivative(1)(arcLengthList)
	dx2curvature = fCurvature.derivative(2)(arcLengthList)
	return curvature, dxcurvature, dx2curvature
    def curvature_spline(self, x, y, error=0.1):
        """
        Calculate the signed curvature of a 2D curve at each point
        using interpolating splines.

        Parameters
        ----------
        x, y  : numpy.array(dtype = float) shape (n_points, )
        error : float - The admisible error when interpolating the splines

        Returns
        -------
        curvature: numpy.array shape (n_points, )
        """

        # create range for array length
        t = np.arange(x.shape[0])
        std = error * np.ones_like(x)

        fx = UnivariateSpline(t, x, k=4, w=1 / np.sqrt(std))
        fy = UnivariateSpline(t, y, k=4, w=1 / np.sqrt(std))

        xl = fx.derivative(1)(t)
        xl2 = fx.derivative(2)(t)
        yl = fy.derivative(1)(t)
        yl2 = fy.derivative(2)(t)

        curvature = (xl * yl2 - yl * xl2) / np.power(xl**2 + yl**2, 3 / 2)

        return curvature
Example #3
0
def main(dt):
    global X_ref, Y_ref, psi_ref, vx_ref, vy_ref, Waypoints_received, stateEstimate_mark
    rospy.init_node('waypoints_interface', anonymous=True)
    rospy.Subscriber('waypoints', Waypoints, WaypointsCallback)
    rospy.Subscriber('state_estimate', state_Dynamic, stateEstimateCallback)
    pub = rospy.Publisher('final_trajectory', Trajectory2D, queue_size=1)
    rate = rospy.Rate(1 / dt)

    while (rospy.is_shutdown() != 1):
        if stateEstimate_mark and Waypoints_mark:
            num_steps_received = len(Waypoints_received.points) - 1
            dt_received = Waypoints_received.dt
            horizon = dt_received * num_steps_received
            points = np.zeros((2, num_steps_received + 2))
            points[0, 0] -= vx_ref * dt_received
            #points[1, 0] -= vy_ref * dt_received
            for i in range(num_steps_received):
                points[0, i + 2] = Waypoints_received.points[i].x
                points[1, i + 2] = Waypoints_received.points[i].y
            t_received = np.linspace(-dt_received, horizon,
                                     num_steps_received + 2)
            num_points = int(floor(horizon / dt + 1)) + 40
            t = np.linspace(0, (num_points - 1) * dt, num_points)
            w = np.ones(num_steps_received + 2)
            w[0:2] *= 10
            w[-1] *= 5
            spl_x = UnivariateSpline(t_received, points[0, :], k=3, w=w)
            spl_y = UnivariateSpline(t_received, points[1, :], k=3, w=w)
            spl_x_dot = spl_x.derivative()
            spl_y_dot = spl_y.derivative()
            spl_x_val = spl_x(t)
            spl_y_val = spl_y(t)
            spl_x_dot_val = spl_x_dot(t)
            spl_y_dot_val = spl_y_dot(t)
            spl_v_val = np.sqrt(spl_x_dot_val**2 + spl_y_dot_val**2)
            spl_theta_val = np.arctan2(spl_y_dot_val, spl_x_dot_val)
            spl_theta_val[0] = 0
            w_theta = np.ones(spl_theta_val.shape[0])
            w_theta[0] = w_theta[0] * 10
            spl_theta_fn = UnivariateSpline(t, spl_theta_val, k=3, w=w_theta)
            spl_theta_val = spl_theta_fn(t)
            spl_yr_fn = spl_theta_fn.derivative()
            spl_yr_val = spl_yr_fn(t)

            traj = Trajectory2D()
            for i in range(num_points):
                pt = TrajectoryPoint2D()
                pt.t = t[i]
                pt.x = spl_x_val[i] * cos(psi_ref) - spl_y_val[i] * sin(
                    psi_ref) + X_ref
                pt.y = spl_x_val[i] * sin(psi_ref) + spl_y_val[i] * cos(
                    psi_ref) + Y_ref
                pt.theta = spl_theta_val[i] + psi_ref
                pt.v = spl_v_val[i]
                pt.kappa = spl_yr_val[i]
                traj.point.append(pt)
            traj.header = Header()
            traj.header.stamp = rospy.get_rostime()
            pub.publish(traj)
        rate.sleep()
def expand_1d_static_fieldmap(z0, fz0):
    """
    Expands 1D static fieldmap z, fz into r, z using splines.
    
    Cylindrically symmetric geometry. 
    
    This is valid for both electric and magnetic fields. 
    
    Returns functions: 
        fr(r, z), fz(r, z)
    
    """

    # Make spline and derivatives
    S = UnivariateSpline(z0, fz0, k=5, s=0)
    Sp = S.derivative()
    Sp2 = S.derivative(2)
    Sp3 = S.derivative(3)

    def fz(r, z):
        return S(z) - r**2 * Sp2(z) / 4

    def fr(r, z):
        return -r * Sp(z) / 2 + r**3 * Sp3(z) / 16

    return fr, fz
Example #5
0
class AlphaInterpolator(object):
    def __init__(self, a, x, y):

        # Drop NaN values to avoid fitpack errors
        self._data = pd.DataFrame(np.array([a, x, y]).T, columns=["a", "x", "y"])
        self._data.dropna(inplace=True)

        self._create_interpolating_polynomials()
        self._find_path_length()

    def _create_interpolating_polynomials(self):
        self.x_interp = UnivariateSpline(self._data.a, self._data.x, s=0)
        self.y_interp = UnivariateSpline(self._data.a, self._data.y, s=0)

    def _find_path_length(self):
        dx_interp = self.x_interp.derivative()
        dy_interp = self.y_interp.derivative()

        ts = np.linspace(0, 1, 200)
        line_length = cumtrapz(np.sqrt(dx_interp(ts) ** 2 + dy_interp(ts) ** 2), x=ts, initial=0.0)

        line_length /= line_length.max()

        # Here we invert the line_length (ts) function, in order to evenly
        # sample the pareto front
        self.l_interp = UnivariateSpline(line_length, ts, s=0)

    def sample(self, num):
        """ Return estimates of alpha values that evenly sample the pareto
        front """

        out = self.l_interp(np.linspace(0, 1, num))
        out[0] = 0.0
        out[-1] = 1.0
        return out
Example #6
0
def smoothing(x,y,err=None,k=5,s=None,newx=None,derivative_order=0):
  # remove NaNs
  idx = np.isfinite(x) & np.isfinite(y)
  if idx.sum() != len(x): x=x[idx]; y=y[idx]

  # if we don't need to interpolate, use same x as input
  if newx is None: newx=x

  if err is None:
    w=None
  elif err == "auto":
    n=len(x)
    imin = int(max(0,n/2-20))
    imax = imin + 20
    idx = range(imin,imax)
    p = np.polyfit(x[idx],y[idx],2)
    e = np.std( y[idx] - np.polyval(p,x[idx] ) )
    w = np.ones_like(x)/e
  else:
    w=np.ones_like(x)/err
  from scipy.interpolate import UnivariateSpline
  if (s is not None):
    s = len(x)*s
  s = UnivariateSpline(x, y,w=w, k=k,s=s)
  if (derivative_order==0):
    return s(newx)
  else:
    try:
      len(derivative_order)
      return np.asarray([s.derivative(d)(newx) for d in derivative_order])
    except:
      return s.derivative(derivative_order)(newx)
Example #7
0
def smoothing(x,y,err=None,k=5,s=None,newx=None,derivative_order=0):
  idx = np.isnan(x)|np.isnan(y)
  idx = ~ idx
  if newx is None: newx=x
  if idx.sum() > 0:
    x=x[idx]
    y=y[idx]
  if idx.sum() < 3:
    return np.ones(len(newx))
  if err is None:
    w=None
  elif err == "auto":
    n=len(x)
    imin = max(0,n/2-20)
    imax = min(n,n/2+20)
    idx = range(imin,imax)
    p = np.polyfit(x[idx],y[idx],4)
    e = np.std( y[idx] - np.polyval(p,x[idx] ) )
    w = np.ones_like(x)/e
  else:
    w=np.ones_like(x)/err
  from scipy.interpolate import UnivariateSpline
  if (s is not None):
    s = len(x)*s
  s = UnivariateSpline(x, y,w=w, k=k,s=s)
  if (derivative_order==0):
    return s(newx)
  else:
    try:
      len(derivative_order)
      return [s.derivative(d)(newx) for d in derivative_order]
    except:
      return s.derivative(derivative_order)(newx)
Example #8
0
def calculate_rates(galaxy, **kwargs):
    print "calculating rates...."

    print """****calculate_rates assumes that galaxy masses are in Msun.
****actual units are: galaxy['Ms'].unit = """ + str(galaxy['Ms'].unit) + """ and galaxy['Mg'].unit = """ + str(galaxy['Mg'].unit)

    Nspline = kwargs.get("Nspline",10)

    given_age = galaxy['age'].to(u.yr).value
    indices = np.append(np.arange(0,len(galaxy),len(galaxy)/(Nspline-1)), len(galaxy)-1)
    short_age = given_age[indices]

    assert ("Mg" in galaxy.colnames), "galaxy column 'Mg' not found, galaxy lacks gas :-("
    print galaxy['Mg'][indices].value
    dMg = UnivariateSpline(short_age,galaxy['Mg'][indices].value,k=3)
    dMgdt_func = dMg.derivative(n=1)
    dMgdt = dMgdt_func(given_age) * u.Msun / u.yr
    col_dMgdt = Column(data=dMgdt,name='dMgdt')
    galaxy.add_column(col_dMgdt)


    if "Mhi" in galaxy.colnames:
        dMhi = UnivariateSpline(short_age,galaxy['Mhi'][indices].value,k=3)
        dMhidt_func = dMhi.derivative(n=1)
        dMhidt = dMhidt_func(given_age) * u.Msun / u.yr
        col_dMhidt = Column(data=dMhidt,name='dMhidt')
        galaxy.add_column(col_dMhidt)

    if "Mh2" in galaxy.colnames:
        dMh2 = UnivariateSpline(short_age,galaxy['Mh2'][indices].value,k=3)
        dMh2dt_func = dMh2.derivative(n=1)
        dMh2dt = dMh2dt_func(given_age) * u.Msun / u.yr
        col_dMh2dt = Column(data=dMh2dt,name='dMh2dt')
        galaxy.add_column(col_dMh2dt)

    assert ("Ms" in galaxy.colnames), "galaxy column 'Ms' not found, galaxy that lacks stars is not a galaxy :-("
    dMs = UnivariateSpline(short_age,galaxy['Ms'][indices].value,k=3)
    dMsdt_func = dMs.derivative(n=1)
    dMsdt = dMsdt_func(given_age) * u.Msun / u.yr
    col_dMsdt = Column(data=dMsdt,name='dMsdt')
    galaxy.add_column(col_dMsdt)

    if "Mh" in galaxy.colnames and "dMhdt" not in galaxy.colnames:
        dMh = UnivariateSpline(short_age,galaxy['Mh'][indices].value,k=3)
        dMhdt_func = dMh.derivative(n=1)
        dMhdt = dMhdt_func(given_age) * u.Msun / u.yr
        col_dMhdt = Column(data=dMhdt,name='dMhdt')
        galaxy.add_column(col_dMhdt)
    if "dMhdt" in galaxy.colnames:
        galaxy['dMcgmdt'] = galaxy['dMhdt'] - galaxy['dMsdt'] - galaxy['dMgdt']

    if "dt" not in galaxy.colnames:
        dt = np.zeros(len(galaxy))*u.yr
        for i in range(len(galaxy)):
            dt[i] = calculate_timestep(galaxy['age'].to(u.yr),i,len(galaxy))
        col_dt = Column(data=dt, name='dt')
        galaxy.add_column(col_dt)

    return galaxy
Example #9
0
def get_velocity_and_acceleration(scalars):
    """ https://stackoverflow.com/questions/40226357/second-derivative-in-python-scipy-numpy-pandas
    """
    x = np.linspace(0, len(scalars), len(scalars))
    ys = np.array(scalars, dtype=np.float32)
    y_spl = UnivariateSpline(x, ys, s=0, k=4)
    velocity = y_spl.derivative(n=1)
    acceleration = y_spl.derivative(n=2)
    return velocity(x), acceleration(x)
Example #10
0
def main(dt, horizon):
    global vx, vy, X, Y, psi, wz, d_f,stateEstimate_mark

    # import track file
    track = Tra('Tra_1', horizon)
    rospy.init_node('toy_planner', anonymous=True)
    rospy.Subscriber('state_estimate', state_Dynamic, stateEstimateCallback)
    pub = rospy.Publisher('final_trajectory', Trajectory2D, queue_size=1)

    rate = rospy.Rate(1/dt)

    # first set the horizon to be very large to get where the vehicle is
    track.horizon = horizon
    track.currentIndex = 0

    num_points = 400
    w = np.ones(num_points) * 0.1
    w[0] = w[0] * 30
    w[-1] = w[-1] * 30

    while (rospy.is_shutdown() != 1):
        if stateEstimate_mark == True:
            track.currentIndex, _ = track.searchClosestPt(X, Y, track.currentIndex)
            if track.currentIndex + num_points < track.size:
                index_list = np.arange(track.currentIndex, track.currentIndex+num_points, 1)
                t = track.t[track.currentIndex:track.currentIndex+num_points]
                t_sub = track.t[index_list]
                x_sub = track.x[index_list] #- 0.16
                y_sub = track.y[index_list] #+ 0.44
                spl_x = UnivariateSpline(t_sub, x_sub, k=2)
                spl_y = UnivariateSpline(t_sub, y_sub, k=2)
                spl_x_dot = spl_x.derivative()
                spl_y_dot = spl_y.derivative()
                spl_x_val = spl_x(t)
                spl_y_val = spl_y(t)
                spl_x_dot_val = spl_x_dot(t)
                spl_y_dot_val = spl_y_dot(t)
                spl_v_val = np.sqrt(spl_x_dot_val**2 + spl_y_dot_val**2)
                spl_theta_val = np.arctan2(spl_y_dot_val, spl_x_dot_val)
                spl_yr_fn = UnivariateSpline(t, spl_theta_val, k=2).derivative()
                spl_yr_val = spl_yr_fn(t)

                traj = Trajectory2D()
                for i in range(num_points):
                    pt = TrajectoryPoint2D()
                    pt.t = t[i]
                    pt.x = spl_x_val[i]
                    pt.y = spl_y_val[i]
                    pt.theta = spl_theta_val[i]
                    pt.v = spl_v_val[i]
                    pt.kappa = spl_yr_val[i]
                    traj.point.append(pt)
                traj.header = Header()
                traj.header.stamp = rospy.get_rostime()
                pub.publish(traj)
        rate.sleep()
Example #11
0
 def phase_corr(self, p, MfRD, freq, aligned_phase):
     #NOTE: The sign of the phase used here is opposite of that in lal but they both follow the same convention
     from scipy.interpolate import UnivariateSpline
     dphi = UnivariateSpline(freq, aligned_phase, k=3, s=0)
     self.dphilist = dphi.derivative(1)(freq)
     f_final = MftoHz(MfRD, p['Mtot'])
     # Time correction is t(f_final) = 1/(2pi) dphi/df (f_final)
     t_corr = dphi.derivative(1)(f_final) / (2. * Constants.LAL_PI)
     logger.info("t_corr = {0}".format(t_corr))
     return t_corr
Example #12
0
File: util.py Project: depra/cana
def curvature(func, x, ftype='polynomial', order=4):
    r"""
    Measure the curvature of a function.

    Parameters
    ----------
    func: coefcients, spline or function
        The function for deriving the curvature. F(x)

    x:
        The x axis for the F(x) function.

    ftype: string
        'polynomial': func is the polynomial coefcients arrays
        'spline': func is a scipy.UnivariateSpline object
        'analytical': a self defined function

    order: int (optional)
        The order for fitting the analytical function.
        Only used if ftype='analytical'

    Returns
    -------
    The curvature array

    """
    if ftype == 'polynomial':
        # getting polynomial derivatives
        func_ = np.polyder(func)
        func_2 = np.polyder(func, m=2)
        # building arrays with derived polynomial
        y_ = np.polyval(func_, x)
        y_2 = np.polyval(func_2, x)
    if ftype == 'analytical':
        # fitting analytical function with a spline
        spl = UnivariateSpline(x, func(x), k=order)
        ftype = 'spline'
    if ftype == 'spline':
        # deriving function
        func_ = spl.derivative()
        func_2 = spl.derivative(n=2)
        y_ = func_(x)
        y_2 = func_2(x)
    # normalizing vectors according to the first derivative
    y_norm = np.median(y_)
    y_ = y_/y_norm
    y_2 = y_2/y_norm
    # building terms of the curvature
    aux = np.power((np.power(y_, 2) + 1), 1.5)
    aux2 = y_2
    # calculating curvature and radius of curvature
    r = aux/aux2
    k = 1/r
    return k
Example #13
0
    def _build_energy_curve(self):
        x, y = bezier(self.control_polygon.points).T
        t = np.linspace(0, 1, num=200)
        bezier_spline_x = UnivariateSpline(t, x, k=5)
        bezier_spline_y = UnivariateSpline(t, y, k=5)
        dxdt = bezier_spline_x.derivative(n=self.control_polygon.degree)(t)
        dydt = bezier_spline_y.derivative(n=self.control_polygon.degree)(t)
        norm = dxdt * dxdt + dydt * dydt

        norm_spline = UnivariateSpline(t, norm, k=5)
        energy = norm_spline.integral(0, 1)
        return x, y, norm, energy
Example #14
0
class VelocityComponents:

    def __init__(self, s, U, C, D, spline=False, smoothing=1.0):

        # Store the data
        self.s = s
        self.U = U
        self.C = C
        self.D = D
        self.ds = abs(s[1] - s[0])

        # Check if using a spline representation for C and D components
        self.spline = spline
        if spline:
            self._cspline = UnivariateSpline(self.s, self.C, k=5, s=smoothing)
            self._dspline = UnivariateSpline(self.s, self.D, k=5, s=smoothing)

    @property
    def Vk(self):
        """
        Kinematic wave speed.
        """
        return self.C - self.Dp

    @property
    def Cp(self):
        """
        The spatial derivative of C component.
        """
        if self.spline:
            return self._cspline.derivative(n=1)(self.s)
        else:
            return np.gradient(self.C, self.ds, edge_order=2)

    @Cp.setter
    def Cp(self, value):
        raise ValueError('Cannot set Cp explicitly')

    @property
    def Dp(self):
        """
        The spatial derivative of D component.
        """
        if self.spline:
            return self._dspline.derivative(n=1)(self.s)
        else:
            return np.gradient(self.D, self.ds, edge_order=2)

    @Dp.setter
    def Dp(self, value):
        raise ValueError('Cannot set Dp explicitly')
Example #15
0
def get_vertical_acceleration(skeleton, frames, joint_name):
    """ https://stackoverflow.com/questions/40226357/second-derivative-in-python-scipy-numpy-pandas
    """
    ps = []
    for frame in frames:
        p = skeleton.nodes[joint_name].get_global_position(frame)
        ps.append(p)
    ps = np.array(ps)
    x = np.linspace(0, len(frames), len(frames))
    ys = np.array(ps[:, 1])
    y_spl = UnivariateSpline(x, ys, s=0, k=4)
    velocity = y_spl.derivative(n=1)
    acceleration = y_spl.derivative(n=2)
    return ps, velocity(x), acceleration(x)
def curvature_spline(x, y):
    """
    this function gets the curvature of the road.
    """

    t = numpy.arange(x.shape[0])
    fx = UnivariateSpline(t, x, k=4)
    fy = UnivariateSpline(t, y, k=4)
    xDer = fx.derivative(1)(t)
    xDerDer = fx.derivative(2)(t)
    yDer = fy.derivative(1)(t)
    yDerDer = fy.derivative(2)(t)
    curvature = (xDer * yDerDer - yDer * xDerDer) / numpy.power(xDer ** 2 + yDer ** 2, 3/2)
    return curvature
Example #17
0
def _get_curvature(vertex_x_padded_metres, vertex_y_padded_metres):
    """Computes signed curvature at each vertex, using interpolating splines.

    Curvature = inverse of turning radius.

    This method is based on curvature.py, found here:
    https://gist.github.com/elyase/451cbc00152cb99feac6

    V_p = total number of vertices (including duplicates used for padding)
    V_u = number of unique vertices

    :param vertex_x_padded_metres: numpy array (length V_p) with x-coordinates
        of vertices.
    :param vertex_y_padded_metres: numpy array (length V_p) with y-coordinates
        of vertices.
    :return: vertex_curvatures_metres01: numpy array (length V_u) of curvatures
        (inverse metres).
    """

    num_padded_vertices = len(vertex_x_padded_metres)
    vertex_indices_padded = numpy.linspace(0,
                                           num_padded_vertices - 1,
                                           num=num_padded_vertices,
                                           dtype=int)

    interp_object_for_x = UnivariateSpline(vertex_indices_padded,
                                           vertex_x_padded_metres,
                                           k=SPLINE_DEGREE)
    interp_object_for_y = UnivariateSpline(vertex_indices_padded,
                                           vertex_y_padded_metres,
                                           k=SPLINE_DEGREE)

    vertex_indices_unique = vertex_indices_padded[SPLINE_DEGREE:-SPLINE_DEGREE]
    x_derivs_metres_per_vertex = interp_object_for_x.derivative(1)(
        vertex_indices_unique)
    x_derivs_metres2_per_vertex2 = interp_object_for_x.derivative(2)(
        vertex_indices_unique)

    y_derivs_metres_per_vertex = interp_object_for_y.derivative(1)(
        vertex_indices_unique)
    y_derivs_metres2_per_vertex2 = interp_object_for_y.derivative(2)(
        vertex_indices_unique)

    numerators = (x_derivs_metres_per_vertex * y_derivs_metres2_per_vertex2
                  ) - (y_derivs_metres_per_vertex *
                       x_derivs_metres2_per_vertex2)
    denominators = numpy.power(
        x_derivs_metres_per_vertex**2 + x_derivs_metres_per_vertex**2, 1.5)
    return numerators / denominators
Example #18
0
def get_critical_points_dist(series):
    spline_4 = UnivariateSpline(series.keys(), series, k=4)
    spline_5 = UnivariateSpline(series.keys(), series, k=5)
    first_derivative = spline_4.derivative()
    second_derivative = spline_5.derivative(2)
    roots = first_derivative.roots()

    #return [(series.keys()[0], series.iloc[0], cp_labels[3])] + [(r, series[r], second_derivative_test(second_derivative, r))
    #                                                                        for r in roots] + [(series.keys()[-1], series.iloc[-1], cp_labels[3])]

    return [
        (series.keys()[0], spline_4(series.keys()[0]).item(), cp_labels[3])
    ] + [(r, spline_4(r).item(), second_derivative_test(second_derivative, r))
         for r in roots] + [(series.keys()[-1], spline_4(
             series.keys()[-1]).item(), cp_labels[3])]
def histogram_maxima(peak_hist, plot_bins, hist_sigma, plot):
    lg.function_log()

    #hist_filter_1 = savgol_filter(peak_hist,5,3)
    #hist_filter = gaussian_filter1d(hist_filter_1,0.1)

    gauss_sigma = max(plot_bins) * hist_sigma
    """interpolate histogram for higher resolution"""

    interp_hist = interpolate.interp1d(plot_bins, peak_hist)

    new_inc = (plot_bins[1] - plot_bins[0]) / 10

    new_bins = np.arange(min(plot_bins), max(plot_bins), new_inc)

    hist_filter = gaussian_filter1d(interp_hist(new_bins), gauss_sigma)
    """interpolation of histogram"""
    hist_spline = UnivariateSpline(new_bins, hist_filter, k=4, s=0)
    """higher number of bins for analysis of interpolation curve"""
    #new_bins = np.arange(0,max(peak_hist),0.1)
    """calculation of derivatives to find local maxima"""
    d_hist_spline = hist_spline.derivative()
    d2_hist_spline = hist_spline.derivative(2)
    d_hist_roots = d_hist_spline.roots()
    """only extract roots with positive values for d2/d2x(root)"""
    find_peaks_result = []

    for n in range(0, len(d_hist_roots)):
        if d_hist_roots[n] > min(new_bins):
            if d_hist_roots[n] < max(new_bins):
                if d2_hist_spline(d_hist_roots[n]) < 0:
                    find_peaks_result.append(d_hist_roots[n])

    if plot == True:
        plt.plot(plot_bins, peak_hist, label='center of mass histogram')
        #plt.plot(plot_bins, hist_filter_1, label = 'savgol_filter')
        plt.plot(new_bins, hist_filter, label='gauss_filter')
        plt.plot(new_bins, d_hist_spline(new_bins), label='derivative')
        plt.legend()
        plt.show()

    cen_list = []

    for n in range(0, len(find_peaks_result)):
        if hist_spline(find_peaks_result[n]) > 0:
            cen_list.append(np.round(find_peaks_result[n], 0))

    return cen_list
Example #20
0
    def softening_scale(self,mq=70,auto=True,r=None,dens=None,mass=None,kernel='Gadget'):

        opt_dict={'Gadget':0.698352, 'spline':0.977693}
        rq=self.qmass(mq)

        if auto==True:
            prof=Profile(self.p,Ngrid=512,xmin=0.001*rq,xmax=10*rq,kind='lin')
            r=prof.grid.gx
            dens=prof.dens



        dens_spline=UnivariateSpline(r, dens, k=1,s=0,ext=1)
        der_dens=dens_spline.derivative()




        derdens=der_dens(r)
        ap=UnivariateSpline(r, r*r*dens*derdens*derdens, k=1,s=0,ext=1)
        bp=UnivariateSpline(r, r*r*dens*dens, k=1,s=0,ext=1)

        B=bp.integral(0,rq)
        A=ap.integral(0,rq)/(mass*mq/100.)
        C=(B/(A))**(1/5)

        cost=opt_dict[kernel]
        N=len(self.p.Id)**(1/5)

        return C*cost/N
def threshold_evaluation(scores):
    precision = scores[0]
    recall = scores[1]
    f1 = scores[2]

    max_f1 = max(f1, key=lambda x: x[1])[0]

    x, y = zip(*f1)
    y_spl = UnivariateSpline(x, y, s=0, k=4)
    x_range = np.linspace(x[0], x[-1], 1000)
    y_spl_2d = y_spl.derivative(n=2)
    # plt.plot(x_range, y_spl_2d(x_range))
    max_f1_2d = [x for x in x_range if y_spl_2d(x) == max(y_spl_2d(x_range))]

    p_x, p_y = zip(*precision)
    r_x, r_y = zip(*recall)

    p_x = np.array(p_x)
    p_y = np.array(p_y)
    r_y = np.array(r_y)

    idx = np.argwhere(np.diff(np.sign(p_y - r_y))).flatten()
    inter = p_x[idx]  # A bit off, interpolation should make it better

    return max_f1, max_f1_2d[0], inter
Example #22
0
def find_extrema_spline(x,y,k=4,s=0,**kwargs):
    """
    find local extrema of y(x) by taking derivative of spline

    Parameters
    ----------

    x,y : array-like
      find extrema of y(x)

    k : int
      order of spline interpolation [must be >3]

    s : number
      s parameter sent to scipy spline interpolation (used for smoothing)

    **kwargs : extra arguments to UnivariateSpline


    Returns
    -------
    sp : UnivariateSpline object

    x_max,y_max : array
      value of x and y at extrema(s)
    """


    sp = UVSpline(x,y,k=k,s=s,**kwargs)

    x_max = sp.derivative().roots()

    y_max = sp(x_max)

    return sp,x_max,y_max
Example #23
0
def ang_mom_circ(r, v, phi, rmax=200, bins=100):
    '''
    :param r: radius
    :param v: total velocity
    :param phi: potential
    :return: j_circ(E)
    '''
    r_phi = np.array(sorted([x for x in zip(r, phi)]))
    r_edges = np.linspace(0, rmax, bins+1)  # check what is max(r); should I apply limits?
    r_bin = 0.5*(r_edges[1:] + r_edges[:-1])
    phi_bin = np.zeros_like(r_bin)

    for i in range(len(r_bin)-1):
        idxs_bin = np.where((r_phi.T[0] > r_edges[i]) & (r_phi.T[0] < r_edges[i+1]))
        phi_bin[i] = np.mean(r_phi.T[1][idxs_bin])

    phi_spl = UnivariateSpline(r_bin, phi_bin)
    phi_der = phi_spl.derivative()

    # j(E) interpolation
    r = np.linspace(0, rmax, 1000)
    j_circ_E_spl = interp1d(0.5 * r * phi_der(r) + phi_spl(r), np.sqrt(r * phi_der(r)) * r,
                        fill_value=0, bounds_error=False)

    j_circ_E = j_circ_E_spl(0.5 * v ** 2 + phi)

    return j_circ_E
Example #24
0
def retrieve_data(dataset, Y, derivative=False, maxlen=96):
    obs = dataset.shape[0]
    num_feature = 20
    newdata = []
    newY = []
    t = [0.2 * i for i in range(maxlen)]

    for i in range(obs):
        data = dataset[i]
        datalen = data.shape[0]
        if datalen >= maxlen:
            D = data[(datalen - maxlen):(datalen), ]
            if derivative == True:
                #D_deri = np.gradient(D,axis=0)
                #D = np.concatenate((D,D_deri),axis=1)
                D_deri = []
                for j in range(num_feature):
                    sdata = D[:, j]
                    localvar = np.std(sdata)
                    f_spline = UnivariateSpline(t,
                                                sdata,
                                                s=2 * len(t) * localvar)
                    f_deri = f_spline.derivative()
                    D_deri.append(f_deri(t))

                D_deri = np.array(D_deri).transpose()
                D = np.concatenate((D, D_deri), axis=1)

            newdata.append(D)
            newY.append(Y[i])

    return np.array(newdata), np.array(newY)
Example #25
0
    def calc_Lz(cls, inp, data):
        """creates the emissivity function Lz over a range of temperatures"""
        def kev_J(x):
            return x * 1E3 * 1.6021E-19

        tei_lin = np.linspace(inp.nmin_T, inp.nmax_T, inp.nte)
        Lz_tot = np.zeros(inp.nte)

        # for each charge state,
        for cs in np.arange(inp.z_imp + 1):
            # get fractional abundances of each as a function of ne and Te
            frac_abun_interp = interp1d(tei_lin,
                                        cls.frac_abun(inp, data)[0, :, cs])

            Lz_cs_interp = interp1d(tei_lin, data.alradr[cs, 0, :])

            for i, T in enumerate(tei_lin):
                Lz_tot[
                    i] = Lz_tot[i] + 10**Lz_cs_interp(T) * frac_abun_interp(T)

        # spline fit the logarithm of the emissivity. We'll spline fit the actual values next
        Lz_tot_interp = UnivariateSpline(inp.tei,
                                         np.log10(Lz_tot) + 100.0,
                                         s=0)

        # the above function gives the right values, but we also need the derivatives
        # we will now do a spline fit of the values themselves, rather than their base-10 logarithm
        T_kev_vals = np.logspace(-3, 2, 10000)
        T_J_vals = kev_J(T_kev_vals)
        Lz_vals = 10.0**(Lz_tot_interp(T_kev_vals) - 100.0)

        Lz_interp = UnivariateSpline(T_J_vals, Lz_vals, s=0)
        dLzdT_interp = Lz_interp.derivative()

        return Lz_interp, dLzdT_interp
Example #26
0
def omega_phi_kerr_converge(time, data, N=1, minN=0):
    splrep = UnivariateSpline(time, data, s=0, k=4)
    tpks = splrep.derivative().roots()[1:]
    deltat = tpks[N] - tpks[minN]
    defint = splrep.integral(tpks[minN], tpks[N])
    omega_phi = defint / deltat
    return omega_phi
Example #27
0
    def response(self, disturbance_vector):
        """ Returns the response of the sensor due to a disturbance

            The acceleration imposed to the sensor is estimatad with the following equations:
                acceleration(t) = d2stress(t)/t2 * material_depth/material_prop['modulus']
            And the sensor response takes into account the frequency response, estimated by a normal curve
                freq_response(f) = norm(scale = self.bandwidth/2, loc=self.resonant_freq).pdf(f_array)
                freq_response(f) /= max(freq_response)
                response(t) = ifft(fft(acceleration) * freq_response)


            Args:
                disturbance_vector (list): list with a temporal array, in the 0 index, and a 
                stress array, in the 1 index.

            Returns:
                list: with two arrays, the temporal array and the voltage response array.
        """
        const = self.material_depth / self.material_prop['modulus']
        t_vector = disturbance_vector[0]
        # using the scipy UnivariateSpline to compute the second derivative
        data_spl = UnivariateSpline(t_vector, disturbance_vector[1], s=0, k=3)
        acceleration = data_spl.derivative(n=2)(t_vector) * const
        # we need to take the frequency response of the acceleration stimuli
        N = len(disturbance_vector[1])
        T = t_vector[1] - t_vector[0]
        f_array = np.fft.fftfreq(N, T)
        freq_acc = np.fft.fft(acceleration)
        # we need to apply a filter factor related to the frequency response of the sensor
        freq_response = self.frequency_response(N, (0, max(f_array)), mirror=True)[1]
        voltage = np.fft.ifft(freq_acc * freq_response) * self.sensitivity
        return voltage
Example #28
0
def GetPeaks():
    x = ds_analysis.data['x']
    y = ds_analysis.data['y']
    spl = UnivariateSpline(x, y, k=4, s=splineSlider.value)
    spl_prime = spl.derivative()
    spl_prime_vals = spl_prime(x)
    spl_prime2 = spl_prime.derivative()

    peaks_valleys = spl_prime.roots()
    peaks_valleys_ndx = [np.abs(x - i).argmin() for i in peaks_valleys]

    inflection_points_ndx = []
    for i in range(1, len(spl_prime_vals) - 1):
        if spl_prime_vals[i] < spl_prime_vals[
                i - 1] and spl_prime_vals[i] < spl_prime_vals[i + 1]:
            inflection_points_ndx.append(i)
        elif spl_prime_vals[i] > spl_prime_vals[
                i - 1] and spl_prime_vals[i] > spl_prime_vals[i + 1]:
            inflection_points_ndx.append(i)

    ds_splinefit_peaks.data = dict(x=np.concatenate(
        (peaks_valleys, x[inflection_points_ndx])),
                                   y=spl(x)[np.concatenate(
                                       (peaks_valleys_ndx,
                                        inflection_points_ndx))])
Example #29
0
def get_derivatives(xs, ys, fd=False):
    """
    return the derivatives of y(x) at the points x
    if scipy is available a spline is generated to calculate the derivatives
    if scipy is not available the left and right slopes are calculated, if both exist the average is returned
    putting fd to zero always returns the finite difference slopes
    """
    try:
        if fd:
            raise SplineInputError("no spline wanted")
        if len(xs) < 4:
            er = SplineInputError("too few data points")
            raise er
        from scipy.interpolate import UnivariateSpline

        spline = UnivariateSpline(xs, ys)
        d = spline.derivative(1)(xs)
    except (ImportError, SplineInputError):
        d = []
        m, left, right = 0, 0, 0
        for n in range(0, len(xs), 1):
            try:
                left = (ys[n] - ys[n - 1]) / (xs[n] - xs[n - 1])
                m += 1
            except IndexError:
                pass
            try:
                right = (ys[n + 1] - ys[n]) / (xs[n + 1] - xs[n])
                m += 1
            except IndexError:
                pass
            d.append(left + right / m)
    return d
Example #30
0
    def filter(self, ):
        """Update StruM to ignore uninformative position-specific features.

		Features with a high variance, i.e. non-specific features, do not
		contribute to the specificity of the StruM model. Filtering them
		out may increase the signal-to-noise ratio. The position-specific-
		features are rank ordered by their variance, and a univariate 
		spline is fit to the distribution. The point of inflection is 
		used as the threshold for masking less specific features.

		Once this method is run and the attribute `self.filter_mask` is 
		generated, two additional methods will become available:
		:func:`score_seq_filt` and :func:`eval_filt`.
		"""
        from scipy.interpolate import UnivariateSpline
        idx = np.argsort(self.strum[1])[::-1]
        variance = self.strum[1][idx]

        n = len(idx)
        xvals = np.arange(n)

        spl = UnivariateSpline(xvals, variance, s=n / 10.)
        d_spl = spl.derivative(1)
        d_ys = d_spl(xvals)

        min_i = np.argmax(d_ys)
        self.var_thresh = spl(min_i)
        self.filter_mask = idx[min_i:]
Example #31
0
    def averageProfile(self,
                       trange=[2, 3],
                       interelm=False,
                       elm=False,
                       **kwargs):

        _idx = np.where((self.time >= trange[0]) & (self.time <= trange[1]))[0]

        ne = self.ne[_idx, :]
        neN = self.neNorm[_idx, :]
        if interelm:
            logging.warning('Computing inter-ELM profiles')
            self._maskElm(trange=trange, **kwargs)
            ne = ne[self._interElm, :]
            neN = neN[self._interElm, :]
        if elm:
            self._maskElm(trange=trange, **kwargs)
            logging.warning('Computing ELM profiles')
            ne = ne[self._Elm, :]
            neN = neN[self._Elm, :]

        profiles = np.nanmean(ne, axis=0)
        error = np.nanstd(ne, axis=0)
        profilesN = np.nanmean(neN, axis=0)
        errorN = np.nanstd(neN, axis=0)
        # now build the appropriate Efolding length on the
        # recomputed profiles using an UnivariateSpline
        # interpolation weighted on the error
        Rmid = self.eq.rho2rho('sqrtpsinorm', 'Rmid', self.rho,
                               self.time[_idx].mean())
        #
        S = UnivariateSpline(Rmid, profiles, w=1. / error, s=0)
        Efold = np.abs(S(Rmid) / S.derivative()(Rmid))
        return profiles, error, Efold, profilesN, errorN
Example #32
0
def curvature_splines(x, y=None, error=0.1):
    if y is None:
        x, y = x.real, x.imag

    t = np.arange(x.shape[0])
    std = error * np.ones_like(x)

    fx = UnivariateSpline(t, x, k=4, w=1 / np.sqrt(std))
    fy = UnivariateSpline(t, y, k=4, w=1 / np.sqrt(std))

    xˈ = fx.derivative(1)(t)
    xˈˈ = fx.derivative(2)(t)
    yˈ = fy.derivative(1)(t)
    yˈˈ = fy.derivative(2)(t)
    curvature = abs((xˈ * yˈˈ - yˈ * xˈˈ)) / np.power(xˈ**2 + yˈ**2, 3 / 2)
    return curvature
Example #33
0
def smooth_elevation(df, smooth=4):
    if not present(df, N.ELEVATION):
        log.debug(f'Smoothing {N.RAW_ELEVATION} to get {N.ELEVATION}')
        unique = df.loc[~df[N.DISTANCE].isna() & ~df[N.RAW_ELEVATION].isna(),
                        [N.DISTANCE, N.RAW_ELEVATION]].drop_duplicates(
                            N.DISTANCE)
        # the smoothing factor is from eyeballing results only.  maybe it should be a parameter.
        # it seems better to smooth along the route rather that smooth the terrain model since
        # 1 - we expect the route to be smoother than the terrain in general (roads / tracks)
        # 2 - smoothing the 2d terrain is difficult to control and can give spikes
        # 3 - we better handle errors from mismatches between terrain model and position
        #     (think hairpin bends going up a mountainside)
        # the main drawbacks are
        # 1 - speed on loading
        # 2 - no guarantee of consistency between routes (or even on the same routine retracing a path)
        spline = UnivariateSpline(unique[N.DISTANCE],
                                  unique[N.RAW_ELEVATION],
                                  s=len(unique) * smooth)
        df[N.ELEVATION] = spline(df[N.DISTANCE])
        df[N.GRADE] = (spline.derivative()(df[N.DISTANCE]) / 10
                       )  # distance in km
        df[N.GRADE] = df[N.GRADE].rolling(
            5, center=True).median().ffill().bfill()
        # avoid extrapolation / interpolation
        df.loc[df[N.RAW_ELEVATION].isna(), [N.ELEVATION]] = None
    return df
Example #34
0
def get_derivatives(xs, ys, fd=False):
    """
    return the derivatives of y(x) at the points x
    if scipy is available a spline is generated to calculate the derivatives
    if scipy is not available the left and right slopes are calculated, if both exist the average is returned
    putting fd to zero always returns the finite difference slopes
    """
    try:
        if fd:
            raise SplineInputError('no spline wanted')
        if len(xs) < 4:
            er = SplineInputError('too few data points')
            raise er
        from scipy.interpolate import UnivariateSpline
        spline = UnivariateSpline(xs, ys)
        d = spline.derivative(1)(xs)
    except (ImportError, SplineInputError):
        d = []
        m, left, right = 0, 0, 0
        for n in range(0, len(xs), 1):
            try:
                left = (ys[n] - ys[n-1]) / (xs[n] - xs[n-1])
                m += 1
            except IndexError:
                pass
            try:
                right = (ys[n+1] - ys[n]) / (xs[n+1] - xs[n])
                m += 1
            except IndexError:
                pass
            d.append(left + right / m)
    return d
def spl1d(pr, fs, kk=3, ss=.001):
    """Use scipy.interpolate.Univariate spline to fit the data and
    calculate velocity and acceleration from the spline

    Parameters:
    pr = (ntime x nmark x 3) raw data array
    fs = sampling rate
    kk = spline order (default is 3)
    ss = spline smoothing parameter (default is .001)

    Returns:
    out = dict that holds filtered position and calculated velocity
        and accleration
    """

    ntime, nmark, ncoord = pr.shape
    dt = 1 / fs
    times = np.arange(ntime) * dt

    # iterate through each marker and coordinate, smooth, and calculate
    # velocities and accelerations
    pf, vf, af = pr.copy(), pr.copy(), pr.copy()
    for j in np.arange(nmark):
        for k in np.arange(ncoord):
            d = pr[:, j, k]
            spl = UnivariateSpline(times, d, k=kk, s=ss)
            pf[:, j, k] = spl(times)
            vf[:, j, k] = spl.derivative(1)(times)
            af[:, j, k] = spl.derivative(2)(times)

    out = {'p': pf, 'v': vf, 'a': af}
    return out
def dRdS1(S1, m_DM, cp_random, cn_random, Nevents=False, **kwargs):
    eff1, eff2 = np.loadtxt("../Swordfish_Xenon1T/Efficiency-1705.06655.txt",
                            unpack=True)
    efficiency = UnivariateSpline(eff1, eff2, ext="zeros", k=1, s=0)
    S1_vals, E_vals = np.loadtxt("../Swordfish_Xenon1T/S1vsER.txt",
                                 unpack=True)
    CalcER = UnivariateSpline(S1_vals, E_vals, k=4, s=0)
    dERdS1 = CalcER.derivative()
    ER_keV = CalcER(S1)
    prefactor = 0.475 * efficiency(ER_keV)

    def dRdE(ER_keV, m_x, cp, cn, **kwargs):
        #Load in the list of nuclear spins, atomic masses and mass fractions
        nuclei_Xe = [
            "Xe128", "Xe129", "Xe130", "Xe131", "Xe132", "Xe134", "Xe136"
        ]
        nuclei_list = np.loadtxt("Nuclei.txt", usecols=(0, ), dtype='string')
        frac_list = np.loadtxt("Nuclei.txt", usecols=(3, ))
        frac_vals = dict(zip(nuclei_list, frac_list))

        dRdE = np.zeros_like(ER_keV)
        for nuc in nuclei_Xe:
            dRdE += frac_vals[nuc] * DMU.dRdE_NREFT(ER_keV, m_x, cp, cn, nuc,
                                                    **kwargs)
        return dRdE

    dRdEXe = dRdE(ER_keV, m_DM, cp_random, cn_random, **kwargs)
    s = prefactor * dRdEXe * dERdS1(S1)

    if Nevents:
        return s, sum(s * s1width)
    else:
        return s
Example #37
0
def find_char_separators(img, num_chars):

    f = (img > 0).mean(axis=0)
    a, b = f.min(), f.max()
    f = (f - a) / (b - a)
    n, k = len(f), num_chars
    x = np.arange(0, n)

    # Initial guess
    s0 = np.linspace(0, n, k + 1)[1:-1]

    # Value boundaries for each delimiter.
    char_min_size = 15
    delimiter_margin = 8
    bounds = np.transpose(
        np.stack([
            np.maximum(s0 - delimiter_margin, 0),
            np.minimum(s0 + delimiter_margin, n - 1)
        ],
                 axis=0))

    # Now we move the delimiters to divide the chars better

    y_spl = UnivariateSpline(x, f, s=0, k=4)
    y_spl_df = y_spl.derivative(n=1)

    F = lambda s: np.sum(y_spl(s))
    dF = lambda s: y_spl_df(s) / (k - 1)
    jac = lambda s, *args: dF(s)

    result = minimize(F, s0, jac=jac, method='SLSQP', bounds=bounds)
    s = np.round(result.x)

    separators = s.astype(np.uint16)
    return separators
Example #38
0
	def getspline_Sold(self):
		"""Cubic spline interpolation of entropy and convective velocity.
		"""
		want = self.mass < max(self.mass)*self.mass_cut
		S_old = UnivariateSpline(self.mass[want], self.Sgas[want], k=self.spline_k, s=self.spline_s, ext=self.spline_ext)
		dS_old = S_old.derivative()
		vconv_Sold = UnivariateSpline(self.mass[want], self.vconv[want], k=self.spline_k, s=self.spline_s, ext=self.spline_ext)
		return [S_old, dS_old, vconv_Sold] 
    def adiabat_steeper(self):
        '''
        Checks whether the adiabat is steeper than the liquidus. Ultimately this
        should be part of determining the adiabat in the snow regime.
        '''

        p_oc = self.pressure[self.outer_core()]
        t_oc = self.temperature[self.outer_core()]

        t_func = UnivariateSpline(p_oc,t_oc)

        return t_func.derivative()(p_oc) > self.liquidus.derivative()(p_oc)
Example #40
0
def find_ending_point(X, y, start_point, use_alternative = False):
    """
    ================
    INPUT: array of X data (temperature), array of Y data (heat flow), int
    OUTPUT: int
    ================

    Finds the end point of the reaction to estimate enthalpy.

    - NOTE: use_alternative: this variable decides which "end point finder" you
    want to use. if you want to take the post-peak max point, select True. If
    you want to use the end-point derived from the second derivative, select
    False.

    In order, this function:

    1. Isolates data after the peak

    2. Approximates the data with a spline.

    3. Finds the point where the first derivative is closest to zero and nearest
    to the end of the reaction. This is the end point.
        ## note, you could also set the end point to be when the function
        ## has returned closest to the gradient at the start_point

    4. If this point is < post-peak max point, choose the maximum point.

    """
    start_point = 1405
    reaction_x = X[start_point:] # x's after the start_point
    reaction_y = y[start_point:] # y's after the start_point
    index_of_peak = start_point + reaction_y.argmin()


    if use_alternative:
        end_point_id = index_of_peak + y[index_of_peak : ].argmax()
    else:
        pp_X, pp_y = X[index_of_peak: ], y[index_of_peak: ]
        spline = UnivariateSpline(pp_X, pp_y, k = 5)
        second_derivative = spline.derivative(2)(pp_X)
        relevant_end_points = np.abs(second_derivative)
        end_point = np.max(relevant_end_points.argsort()[:50]) # get the largest of all the zero points
        end_point_id = index_of_peak + end_point

        # just in case there are larger points between peak & end point
        if np.max(y[index_of_peak : end_point_id]) > y[end_point_id]:
            index = y[index_of_peak : end_point_id].argmax()
            end_point_id = index_of_peak + index

    return end_point_id
Example #41
0
    def get_splines(self, floor=25 * 1e-9, ceil=25 * 1e-6):
        from scipy.interpolate import UnivariateSpline

        mx, my = zip(*self.scatter_data)
        yy, xx = zip(*lmseq(zip(my, mx)))  # filter with LMS
        spl = UnivariateSpline(xx, yy)
        spld = spl.derivative()

        def spl_derivative(x):
            s = abs(spld(x))
            s[s < floor] = floor
            s[s > ceil] = ceil
            return s

        self.spl = spl
        self.spld = spl_derivative
Example #42
0
    def softening_scale(self,mq=70,auto=True,r=None,dens=None,mass=None,kernel='Gadget',type=None):
        """
        Calculate the optimal softening scale following Dehnen, 2012 eps=cost*a(dens)*N^-0.2. The output will be in unit
        of r. If Auto==True, r and dens will be not considered.
        :param mq: Mass fraction where calcualte the softening_scale.
        :param auto: If True calculate the r-dens gride using the grid and Profile class
                wit 512 points from 0.001*rq to 10*rq.
        :param r: Array with the sampling radii.
        :param dens: Array with the density at the sampling radii. Its unity need to be the same of mass/r^3
        :param mass: Total mass of the system, the method will calculate in automatic the fraction mq/100*mass
        :param kernel: Kernel to use. Different kernel have different constant C. The implemented kernels are:
                        -spline: generic cubic spline (as in Dehnen, 2012)
                        -Gadget: to calculate the softening_scale using the spline kernel of Gadget2
        :return: the softening scale.
        """
        opt_dict={'Gadget':0.698352, 'spline':0.977693}
        rq=self.qmass(mq,type=type)

        if auto==True:
            prof=Profile(self.p,Ngrid=512,xmin=0.01*rq,xmax=10*rq,kind='lin',type=type)
            r=prof.grid.gx
            dens=prof.dens



        dens_spline=UnivariateSpline(r, dens, k=1,s=0,ext=1)
        der_dens=dens_spline.derivative()




        derdens=der_dens(r)
        ap=UnivariateSpline(r, r*r*dens*derdens*derdens, k=1,s=0,ext=1)
        bp=UnivariateSpline(r, r*r*dens*dens, k=1,s=0,ext=1)

        B=bp.integral(0,rq)
        A=ap.integral(0,rq)/(mass*mq/100.)
        C=(B/(A))**(1/5)

        cost=opt_dict[kernel]
        N=len(self.p.Id)**(1/5)

        return C*cost/N
def GroundDataTrendingPlotJSON(site,crack,end = None):
    
    ##### Federico et al. constants######    
    slope = 1.49905955613175
    intercept = -3.00263765777028
    t_crit = 4.53047399738543
    var_v_log = 215.515369339559
    v_log_mean = 2.232839766
    sum_res_square = 49.8880017417971
    n = 30.    
    ####################################
    
    if end == None:
        end = datetime.now()
    df = get_latest_ground_df(site,end)
    df['site_id'] = map(lambda x: x.lower(),df['site_id'])
    df['crack_id'] = map(lambda x: x.title(),df['crack_id'])
    end = pd.to_datetime(end)    
    
    df = df[df.crack_id == crack.title()]
    
    cur_t = (df.timestamp.values - df.timestamp.values[0])/np.timedelta64(1,'D')
    cur_x = df.meas.values
    cur_ts = df.timestamp.values
    cur_ts = pd.to_datetime(cur_ts)
    
    ##### Interpolate the last 10 data points
    _,var = moving_average(cur_x)
    w = 1/np.sqrt(var)
    if 0 in var:
        w = None

    
    t_n = np.linspace(cur_t[0],cur_t[-1],20)
    ts_n = pd.to_datetime(cur_ts[0]) + np.array(map(lambda x: timedelta(days = x), t_n))
    try:
        sp = UnivariateSpline(cur_t,cur_x,w=w)
        x_n = sp(t_n)
        v_s = abs(sp.derivative(n=1)(cur_t))
        a_s = abs(sp.derivative(n=2)(cur_t))
        
        v_t = np.linspace(min(v_s),max(v_s),num = 20)
        unc = t_crit*np.sqrt(1/(n-2)*sum_res_square*(1/n + (np.log(v_t) - v_log_mean)**2/var_v_log))
        
        a_t = slope * np.log(v_t) + intercept
        a_tu = a_t + unc
        a_td = a_t - unc
        
                
        
    except:
        print "Interpolation Error for site {} crack {} at timestamp ".format(site,crack,end)
        t_n = np.linspace(cur_t[0],cur_t[-1],len(cur_t))
        ts_n = pd.to_datetime(cur_ts[0]) + np.array(map(lambda x: timedelta(days = x), t_n))
        x_n = cur_x
        v_s = np.zeros(len(x_n))
        a_s = np.zeros(len(x_n))
        v_t = np.zeros(20)
        a_t = np.zeros(20)
        a_tu = np.zeros(20)
        a_td = np.zeros(20)
        
    #logarithmic axes
    v_s = np.log(v_s)
    v_s = v_s[~np.logical_or(np.isnan(v_s),np.isinf(v_s))]
    
    a_s = np.log(a_s)
    a_s = a_s[~np.logical_or(np.isnan(a_s),np.isinf(a_s))]
    a_t = a_t[~np.logical_or(np.isnan(a_t),np.isinf(a_t))]
    a_tu = a_tu[~np.logical_or(np.isnan(a_tu),np.isinf(a_tu))]
    a_td = a_td[~np.logical_or(np.isnan(a_td),np.isinf(a_td))]
    v_t = np.log(v_t)
    v_t = v_t[~np.logical_or(np.isnan(v_t),np.isinf(v_t))]
    
    
    
    ts_n = map(lambda x: mytime.mktime(x.timetuple())*1000, ts_n)
    cur_ts = map(lambda x: mytime.mktime(x.timetuple())*1000, cur_ts)
    to_json = {'av' : {'v':list(v_s),'a':list(a_s),'v_threshold':list(v_t),'a_threshold_line':list(a_t),'a_threshold_up':list(a_tu),'a_threshold_down':list(a_td)},'dvt':{'gnd':{'ts':list(cur_ts),'surfdisp':list(cur_x)},'interp':{'ts':list(ts_n),'surfdisp':list(x_n)}}}
    print json.dumps(to_json)
def check_trending(df,out_folder,plot = False):
    ##### Get the data from the crack dataframe    
    cur_t = (df.timestamp.values - df.timestamp.values[0])/np.timedelta64(1,'D')
    cur_x = df.meas.values
    
    ##### Interpolate the last 10 data points
    _,var = moving_average(cur_x)
    sp = UnivariateSpline(cur_t,cur_x,w=1/np.sqrt(var))
    
    t_n = np.linspace(cur_t[0],cur_t[-1],100)
    x_n = sp(t_n)
    v_n = sp.derivative(n=1)(t_n)
    a_n = sp.derivative(n=2)(t_n)
    
    x_s = sp(cur_t)
    v_s = abs(sp.derivative(n=1)(cur_t))
    a_s = abs(sp.derivative(n=2)(cur_t))
    
    SS_res,r2,RMSE = goodness_of_fit(cur_t,cur_x,x_s)
    text = 'SSE = {} \nR-square = {} \nRMSE = {}'.format(round(SS_res,4),round(r2,4),round(RMSE,4))

    
    ##### Federico et al. constants    
    slope = 1.49905955613175
    intercept = -3.00263765777028
    t_crit = 4.53047399738543
    var_v_log = 215.515369339559
    v_log_mean = 2.232839766
    sum_res_square = 49.8880017417971
    n = 30.
    
    ##### Trending Alert Evaluation
    cur_v = v_s[-1]
    cur_a = a_s[-1]
    delta = t_crit*np.sqrt(1/(n-2)*sum_res_square*(1/n + (np.log(cur_v) - v_log_mean)**2/var_v_log))
    
    log_a_t = slope * np.log(cur_v) + intercept
    log_a_t_up = log_a_t + delta
    log_a_t_down = log_a_t - delta
    
    a_t_up = np.e**log_a_t_up
    a_t_down = np.e**log_a_t_down
    
    ##### Plot points in the confidence interval envelope
    if plot == True:
        ##### Plotting Colors
        tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),    
                     (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),    
                     (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),    
                     (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),    
                     (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
        
        for i in range(len(tableau20)):    
            r, g, b = tableau20[i]    
            tableau20[i] = (r / 255., g / 255., b / 255.)
        #################
        
        v_theo = np.linspace(min(v_s),max(v_s),10000)
        uncertainty = t_crit*np.sqrt(1/(n-2)*sum_res_square*(1/n + (np.log(v_theo) - v_log_mean)**2/var_v_log))
        
        log_a_theo = slope * np.log(v_theo) + intercept
        log_a_theo_up = log_a_theo + uncertainty
        log_a_theo_down = log_a_theo - uncertainty
        
        a_theo = np.e**log_a_theo
        a_theo_up = np.e**log_a_theo_up
        a_theo_down = np.e**log_a_theo_down
        
        fig = plt.figure()
        fig.set_size_inches(15,8)
        fig.suptitle('{} Crack {} {}'.format(str(df.site_id.values[0]).upper(),str(df.crack_id.values[0]).title(),pd.to_datetime(df.timestamp.values[-1]).strftime("%b %d, %Y %H:%M")))
        ax1 = fig.add_subplot(121)
        ax1.get_xaxis().tick_bottom()    
        ax1.get_yaxis().tick_left()
        ax1.grid()
        l1 = ax1.plot(v_theo,a_theo,c = tableau20[0],label = 'Fukuzono (1985)')
        ax1.plot(v_theo,a_theo_up,'--',c = tableau20[0])
        ax1.plot(v_theo,a_theo_down,'--', c = tableau20[0])
        ax1.plot(v_s,a_s,c = tableau20[10])
        l2 = ax1.plot(v_s[:-1],a_s[:-1],'o',c = tableau20[19],label = 'Previous')
        l3 = ax1.plot(v_s[-1],a_s[-1],'*',c = tableau20[6],label = 'Current')

        
        lns = l1 + l2 + l3
        labs = [l.get_label() for l in lns]
        ax1.legend(lns,labs,loc = 'upper left',fancybox = True, framealpha = 0.5)        
        
        ax1.set_xlabel('velocity (cm/day)')
        ax1.set_ylabel('acceleration (cm/day$^2$)')
        ax1.set_xscale('log')
        ax1.set_yscale('log')
        
        ax2 = fig.add_subplot(222)
        ax2.grid()
        ax2.plot(cur_t,cur_x,'.',c = tableau20[0],label = 'Data')
        ax2.plot(t_n,x_n,c = tableau20[12],label = 'Interpolation')
        cur_range = max(list(cur_x) + list(x_n)) - min(list(cur_x) + list(x_n))
        ylim_max = max(list(cur_x) + list(x_n)) + cur_range*0.05
        ylim_min = min(list(cur_x) + list(x_n)) - cur_range*0.05
        ax2.set_ylim([ylim_min,ylim_max])
        ax2.legend(loc = 'upper left',fancybox = True, framealpha = 0.5)
        ax2.set_ylabel('disp (meters)')
        
        ax3 = fig.add_subplot(224, sharex = ax2)
        ax3.grid()
        ax3.plot(t_n,v_n, color = tableau20[4],label = 'Velocity')
        l3_vel, l2_vel = velocity_alert_values(cur_t[-2]-cur_t[-1])
        ylim_values = ax3.get_ylim()
        ax3.plot(ax3.get_xlim(),[l3_vel,l3_vel],'--',lw = 2., color = tableau20[2],label = 'L3 Velocity')
        ax3.plot(ax3.get_xlim(),[-l3_vel,-l3_vel],'--',lw = 2., color = tableau20[2])
        ax3.plot(ax3.get_xlim(),[l2_vel,l2_vel],'--',lw = 2., color = tableau20[16],label = 'L2 Velocity')
        ax3.plot(ax3.get_xlim(),[-l2_vel,-l2_vel],'--',lw = 2., color = tableau20[16])
        ax3.set_ylim(ylim_values)
        ax3.set_ylabel('velocity (cm/day)')
        ax3.set_xlabel('time (days)')
        ax3.legend(loc = 'upper left',fancybox = True, framealpha = 0.5)
        
        ax4 = ax3.twinx()
        ax4.plot(t_n,a_n,'-r',label = 'Acceleration')
        ax4.set_ylabel('acceleration (m/day$^2$)')
        ax4.legend(loc = 'upper right',fancybox = True, framealpha = 0.5)

        tsn = pd.to_datetime(df.timestamp.values[-1]).strftime("%Y-%m-%d_%H-%M-%S")
        out_filename = out_folder + '{} {} {}'.format(tsn,str(df.site_id.values[0]),str(df.crack_id.values[0]))
        print out_filename        
        print text
        plt.savefig(out_filename,facecolor='w', edgecolor='w',orientation='landscape',mode='w',bbox_inches = 'tight')
        
        print cur_a, a_t_up, a_t_down
    if (cur_a <= a_t_up and cur_a >= a_t_down):
        #Reject alert if v and a have opposite signs
        if v_n[-1]*a_n[-1] >= 0:
            return 'Legit'
        else:
            return 'Reject'
    else:
        return 'Reject'    
class corePlanet(cm_Planet):
    def __init__(self,  masses, compositions, temperatures, liquidus=None,materials=None,**kwargs):
        """
        Parameters
        ----------
        masses: list of layer masses ordered in to out

        compositions: list of burnman.Composite or burnman.Material describing 
            the material of each layer

        temperatures: temperature of the upper boundary for each layer
        methods: list of EOS fitting method

        Optional
        ----------
        liquidus: A function describing the icb temperature (for a given 
        composition).

        methods: list of burnman EOS methods to be used for each material
            in compositions, default: 'slb'
        """
        super(corePlanet,self).__init__(masses, compositions, temperatures,**kwargs)

        # liquidus model
        self.liquidus_model = liquidus()

        self.materials = materials

        # Make sure the number of layers is consistent with having a growing core
        assert self.Nlayer >= 3

    def set_liquidus_model(self,liquidus):
        self.liquidus_model = liquidus()

    def set_liquidus(self):
        '''
        Set liquidus for current liquid composition to a UnivariateSpline)
        '''
        liq_arr = np.array([ self.liquidus_model.T_SP(self.w_l[0],p) for p in self.pressure])
        self.liquidus = UnivariateSpline(self.pressure[::-1],liq_arr[::-1])

    def find_icb_temp(self,idx=0):
        '''
        Use an FeS liquidus model to find a thermodynamically consisten temperature
        for the icb. Assumes that the 0th index refers to the inner core and inner
        core boundary.
        '''
        assert not self.liquidus_model is None
        self.set_liquidus()

        m_inner = self.massBelowBoundary[idx]
        p_func = UnivariateSpline(self.int_mass, self.pressure) 
        p_icb = p_func(m_inner)
        t_icb = self.liquidus(p_icb)

#         print 'liquidus:',p_icb,t_icb
        self.temperature[idx] = t_icb
        return t_icb
        
    def compute_temperature(self,inner_isotherm=False,outer_isotherm=False,
            mantle_isotherm=False):
        '''
        Calculate a core adiabat consistent with the size of the inner core
        using the model FeS liquidus.

        Defaults to calculating adiabatic profiles, starting at the icb for
        both inner and outer core.
        '''

        t_icb = self.find_icb_temp()

        # compute inner_core
        if not inner_isotherm:
            self.compute_adiabat_layer(0,t_icb)
        else:
            self.compute_isotherm_layer(0,t_icb)
        
        if not outer_isotherm:
            self.compute_adiabat_layer(1,t_icb,fromLowerBound=True)
        else:
            self.compute_isotherm_layer(1,t_icb,fromLowerBound=True)

        last_temp = self.temperature[self.get_layer(1)][-1]
        self.boundary_temperatures[0] = t_icb
        self.boundary_temperatures[1] = last_temp

        if not mantle_isotherm:
            for i in range(2,self.Nlayer):
                self.compute_adiabat_layer(i,last_temp,fromLowerBound=True)
                last_temp = self.temperature[self.get_layer(i) ][-1]
                self.boundary_temperatures[i] = last_temp

        else:
            for i in range(2,self.Nlayer):
                self.compute_isotherm_layer(i,last_temp,fromLowerBound=True)
                last_temp = self.temperature[self.get_layer(i) ][-1]
                self.boundary_temperatures[i] = last_temp

    def inner_core(self):
        return self.get_layer(0)
    def outer_core(self):
        return self.get_layer(1)
    def mantle(self):
        return -self.inner_core() - self.outer_core()
    def core(self):
        return self.inner_core() + self.outer_core()

    def icb(self):
        x = len(self.int_mass) 
#         layer = np.linspace(0,x-1,x)[self.inner_core()]
        layer = np.arange(x)[self.inner_core()]
        return layer[-1]
        
    def cmb(self):
        x = len(self.int_mass) 
#         layer = np.linspace(0,x-1,x)[self.outer_core()]
        layer = np.arange(x)[self.outer_core()]
        return layer[-1]

    def print_state(self,i=150):
        '''
        For debugging
        '''
        print self.int_mass[i],self.radius[i],self.density[i],self.gravity[i],
        print self.pressure[i],self.temperature[i]

    def gravitational_energy_over_r(self):
        '''
        Calculate the Eg per change in core radius.
        
        Delta Eg = int ( [ (rho_l - rho_s)*g*4pi*r^2 ] dr )

        Returns the bracketed parameter.
        '''

        for c in self.compositions:
            assert( isinstance(c,burnman.Material) ), "Expected burnman.Material object"

        # Parameters at the inner core boundary
        idx_icb = self.icb()
        p_icb = self.pressure[idx_icb]
        rho_icb = self.density[idx_icb]
        g_icb = self.gravity[idx_icb]
        r_icb = self.boundaries[0]
        t_icb = self.temperature[idx_icb]

        # is this necessary? or is taking rho_ic[-1] - rho_oc[0] sufficient?
        rho_s,rho_l = density_coexist(self.w_l,[self.DS,self.DSi],p_icb,t_icb,\
                self.materials[0],self.materials[1])
#         print rho_s - rho_l

        # return dEg / dr
        return (rho_s - rho_l) * g_icb * r_icb * 4. * np.pi * r_icb**2.

    def specific_gravitational_energy(self):
        '''
        Calculate the gravitational energy released per unit mass at the icb.
        '''
        rho = self.density[self.inner_core()][-1]
        r = self.boundaries[0]
        dm_dr = rho * 4. * np.pi * r**2
        return self.gravitational_energy_over_r() / dm_dr

    def light_element_release_over_r(self):
        '''
        Calculate the mass of light element released per change in core radius. (kg)
        '''
        rho = self.density[self.inner_core()][-1]
        r = self.boundaries[0]
        dm_dr = rho * 4. * np.pi * r**2
        return np.sum(self.w_l[:-1]) * dm_dr

    def latent_heat_over_r(self):
        '''
        Compute latent heat released per growth of inner core radius
        '''
        p = self.pressure[self.icb()]
        t = self.temperature[self.icb()]
        rho = self.density[self.inner_core()][-1]
        r = self.boundaries[0]
        dm_dr = rho * 4. * np.pi * r**2
        return iron_latent_heat(p,t,self.w_l) * dm_dr

    def specific_latent_heat(self):
        '''
        Compute latent heat per change in mass
        '''
        p = self.pressure[self.icb()]
        t = self.temperature[self.icb()]
        return iron_latent_heat(p,t,self.w_l)


    def detect_snow(self):
        '''
        Test whether points in the liquid outer core are above the liquidus.

        This is a very simple check and doesn't consider how liquidus should
        perturb the adiabat
        '''
        p_oc = self.pressure[self.outer_core()]
        t_oc = self.temperature[self.outer_core()]

        t_liq = self.liquidus(p_oc)
        
        # Boolean. True if temperature is below the liquidus (snowing)
        snow = t_liq > t_oc
        self.has_snow = snow.any()

        return snow

    def adiabat_steeper(self):
        '''
        Checks whether the adiabat is steeper than the liquidus. Ultimately this
        should be part of determining the adiabat in the snow regime.
        '''

        p_oc = self.pressure[self.outer_core()]
        t_oc = self.temperature[self.outer_core()]

        t_func = UnivariateSpline(p_oc,t_oc)

        return t_func.derivative()(p_oc) > self.liquidus.derivative()(p_oc)
        


    def integrate(self,n_slices,P0,n_iter=5,profile_type='adiabatic',plot=False,
            verbose=True):
        """
        Iteratively determine the pressure, density temperature and gravity profiles
        for the planet as a function of radius within a planet, with a consistent
        temperature and pressure for the inner core boundary.

        Sets pressure, temperature, radius, boundaries, gravity, and density, for 
        given profile in integrated mass (int_mass).

        Also sets differences for quantities between the last two iterations 
        for convergence analysis.

        Parameters
        ----------
        n_slices : number of steps in integrated mass

        P0 : initial guess for central pressure in Pa

        Optional
        ----------
        n_iter : number of iterations (default: 5)

        profile_type : temperature profile type ('adiabatic' or 'isothermal,
            default: 'adiabatic')

        plot : create plot of density, gravity, pressure and temperature as a 
            function of radius (default: False)
        
        verbose : (default: True)
        """
        if verbose:
            self.display_input(n_slices,P0,n_iter,profile_type)

        self.int_mass = np.linspace(0.,self.massBelowBoundary[-1], n_slices)
        self.pressure = np.linspace(P0, 0.0, n_slices) # initial guess at pressure profile
        # take isothermal starting T profile
        self.temperature = np.ones_like(self.pressure)*self.boundary_temperatures[-1]

        self.radius = np.zeros_like(self.int_mass)
        self.boundaries = np.zeros_like(self.massBelowBoundary)

        self.gravity = np.zeros_like(self.int_mass)

        # eos parameters
        self.density = np.zeros_like(self.int_mass)
        self.vp = np.zeros_like(self.int_mass)
        self.vs = np.zeros_like(self.int_mass)
        self.vphi = np.zeros_like(self.int_mass)
        self.K = np.zeros_like(self.int_mass)
        self.G = np.zeros_like(self.int_mass)

        if plot == True:
            ax1 = plt.subplot(141);ax1.set_title('rho')
            ax2 = plt.subplot(142);ax2.set_title('g')
            ax3 = plt.subplot(143);ax3.set_title('P')
            ax4 = plt.subplot(144);ax4.set_title('T')
            plt.hold(True)

        for i in range(n_iter): 

            # Keep track of the previous iteration for an idea of the uncertainty
            self.last_state = np.vstack((self.int_mass.copy(),self.radius.copy(),
                self.pressure.copy(), self.temperature.copy(),self.gravity.copy(),
                self.density.copy()) )
            self.last_boundaries = self.boundaries.copy()
            self.last_icb_temp = self.boundary_temperatures[0]

            if verbose: print 'Initial'; self.print_state()

            # Calculate temperature and density before finding radii.
            if verbose: print 'Iteration #',i+1

            # calculate temperature profile with consistent ICB temp
            if profile_type == 'adiabatic':
                self.compute_temperature()
            elif profile_type == 'isothermal':
                self.compute_temperature(inner_isotherm=False,outer_isotherm=False,
                        mantle_isotherm=False)
            else:
                 raise NameError('Invalid profile_type:'+profile_type)

            if verbose: print 'compute_temperature';self.print_state()

            self.evaluate_eos()
            if verbose: print 'evaluate_eos';self.print_state()
            
            # find radii from the calculated density profile.
            self.compute_radii()
            if verbose: print 'compute_radii';self.print_state()

            self.compute_boundaries()
            if verbose: print 'compute_boundaries';self.print_state()

            # compute gravity and pressure from radii
            self.compute_gravity()
            if verbose: print 'compute_gravity'; self.print_state()

            self.compute_pressure()
            if verbose: print 'compute_pressure';self.print_state()

            if plot==True:
                ax1.plot(self.radius, self.density)
                ax2.plot(self.radius, self.gravity)
                ax3.plot(self.radius, self.pressure)
                ax4.plot(self.radius, self.temperature)
        
        # compare differences between last two iterations
        present_state = np.vstack((self.int_mass,self.radius,self.pressure,
                    self.temperature,self.gravity,self.density))

        self.diff_state = present_state - self.last_state
        self.diff_mean = np.mean(self.diff_state,axis=1)
        self.diff_max = max_magnitude(self.diff_state,axis=1)
        self.diff_bounds = self.boundaries - self.last_boundaries
        self.diff_icb_temp = self.boundary_temperatures[0] - self.last_icb_temp

        # Check if snow encountered
        self.detect_snow()
        self.adiabat_steeper()

        # compute quantaties for energy/entropy budget

        # print diagnostiscs for last iteration
        if verbose:
            print 'Change during last iteration:'
            print 'mean: ', self.diff_mean
            print 'max: ', self.diff_max
            print 'boundaries: ', self.diff_bounds
            print 'ICB temp: ', self.diff_icb_temp

        if plot==True:
            plt.show()
def PlotTrendingAnalysis(site,marker,end):
    
    monitoring_out_path = output_file_path(site,'surficial',end = pd.to_datetime(end))['monitoring_output']
    print_out_path = monitoring_out_path
    print_out_path2 = monitoring_out_path + 'TrendingPlots/'
    for path in [print_out_path,print_out_path2]:
        if not os.path.exists(path):
            os.makedirs(path)
    
    #### Get marker data
    df = get_latest_ground_df2(site,end)
    df = df.loc[df.crack_id == marker]

    ##### Get the data from the crack dataframe    
    cur_t = (df.timestamp.values - df.timestamp.values[0])/np.timedelta64(1,'D')
    cur_x = df.meas.values
    
    ##### Interpolate the last 10 data points
    _,var = moving_average(cur_x)
    sp = UnivariateSpline(cur_t,cur_x,w=1/np.sqrt(var))
    
    t_n = np.linspace(cur_t[0],cur_t[-1],1000)
    x_n = sp(t_n)
    v_n = sp.derivative(n=1)(t_n)
    a_n = sp.derivative(n=2)(t_n)
    
    v_s = abs(sp.derivative(n=1)(cur_t))
    a_s = abs(sp.derivative(n=2)(cur_t))
    
    
    ##### Federico et al. constants    
    slope = 1.49905955613175
    intercept = -3.00263765777028
    t_crit = 4.53047399738543
    var_v_log = 215.515369339559
    v_log_mean = 2.232839766
    sum_res_square = 49.8880017417971
    n = 30.
            
    ##### Plotting Colors
    tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),    
                 (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),    
                 (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),    
                 (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),    
                 (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
    
    for i in range(len(tableau20)):    
        r, g, b = tableau20[i]    
        tableau20[i] = (r / 255., g / 255., b / 255.)
    #################
    
    v_theo = np.linspace(min(v_s),max(v_s),10000)
    uncertainty = t_crit*np.sqrt(1/(n-2)*sum_res_square*(1/n + (np.log(v_theo) - v_log_mean)**2/var_v_log))
    
    log_a_theo = slope * np.log(v_theo) + intercept
    log_a_theo_up = log_a_theo + uncertainty
    log_a_theo_down = log_a_theo - uncertainty
    
    a_theo = np.e**log_a_theo
    a_theo_up = np.e**log_a_theo_up
    a_theo_down = np.e**log_a_theo_down
    
    fig = plt.figure()
    fig.set_size_inches(15,8)
    fig.suptitle('{} Marker {} {}'.format(str(df.site_id.values[0]).upper(),str(df.crack_id.values[0]).title(),pd.to_datetime(df.timestamp.values[-1]).strftime("%b %d, %Y %H:%M")))
    ax1 = fig.add_subplot(121)
    ax1.get_xaxis().tick_bottom()    
    ax1.get_yaxis().tick_left()
    ax1.grid()
    l1 = ax1.plot(v_theo,a_theo,c = tableau20[0],label = 'Fukuzono (1985)')
    ax1.plot(v_theo,a_theo_up,'--',c = tableau20[0])
    ax1.plot(v_theo,a_theo_down,'--', c = tableau20[0])
    ax1.plot(v_s,a_s,c = tableau20[10])
    l2 = ax1.plot(v_s[:-1],a_s[:-1],'o',c = tableau20[19],label = 'Previous')
    l3 = ax1.plot(v_s[-1],a_s[-1],'*',c = tableau20[6],label = 'Current')

    
    lns = l1 + l2 + l3
    labs = [l.get_label() for l in lns]
    ax1.legend(lns,labs,loc = 'upper left',fancybox = True, framealpha = 0.5)        
    
    ax1.set_xlabel('velocity (cm/day)')
    ax1.set_ylabel('acceleration (cm/day$^2$)')
    ax1.set_xscale('log')
    ax1.set_yscale('log')
    
    ax2 = fig.add_subplot(222)
    ax2.grid()
    ax2.plot(cur_t,cur_x,'.',c = tableau20[0],label = 'Data')
    ax2.plot(t_n,x_n,c = tableau20[12],label = 'Interpolation')
    ax2.legend(loc = 'upper left',fancybox = True, framealpha = 0.5)
    ax2.set_ylabel('disp (meters)')
    
    ax3 = fig.add_subplot(224, sharex = ax2)
    ax3.grid()
    ax3.plot(t_n,v_n, color = tableau20[4],label = 'Velocity')
    ax3.set_ylabel('velocity (cm/day)')
    ax3.set_xlabel('time (days)')
    ax3.legend(loc = 'upper left',fancybox = True, framealpha = 0.5)
    
    ax4 = ax3.twinx()
    ax4.plot(t_n,a_n,'-r',label = 'Acceleration')
    ax4.set_ylabel('acceleration (m/day$^2$)')
    ax4.legend(loc = 'upper right',fancybox = True, framealpha = 0.5)

    tsn = pd.to_datetime(df.timestamp.values[-1]).strftime("%Y-%m-%d_%H-%M-%S")
    out_filename = print_out_path2 + '{} {} {}'.format(tsn,str(df.site_id.values[0]),str(df.crack_id.values[0]))
    plt.savefig(out_filename,facecolor='w', edgecolor='w',orientation='landscape',mode='w',bbox_inches = 'tight')
Example #47
0
def get_parameter_limits(xval, loglike, ul_confidence=0.95, tol=1E-3):
    """Compute upper/lower limits, peak position, and 1-sigma errors
    from a 1-D likelihood function.  This function uses the
    delta-loglikelihood method to evaluate parameter limits by
    searching for the point at which the change in the log-likelihood
    value with respect to the maximum equals a specific value.  A
    parabolic spline fit to the log-likelihood values is used to
    improve the accuracy of the calculation.

    Parameters
    ----------

    xval : `~numpy.ndarray`
       Array of parameter values.

    loglike : `~numpy.ndarray`
       Array of log-likelihood values.

    ul_confidence : float
       Confidence level to use for limit calculation.

    tol : float
       Tolerance parameter for spline.

    """

    deltalnl = onesided_cl_to_dlnl(ul_confidence)

    # EAC FIXME, added try block here b/c sometimes xval is np.nan
    try:
        spline = UnivariateSpline(xval, loglike, k=2, s=tol)
    except:
        print ("Failed to create spline: ", xval, loglike)
        return {'x0': np.nan, 'ul': np.nan, 'll': np.nan,
                'err_lo': np.nan, 'err_hi': np.nan, 'err': np.nan,
                'lnlmax': np.nan}
    # m = np.abs(loglike[1:] - loglike[:-1]) > delta_tol
    # xval = np.concatenate((xval[:1],xval[1:][m]))
    # loglike = np.concatenate((loglike[:1],loglike[1:][m]))
    # spline = InterpolatedUnivariateSpline(xval, loglike, k=2)

    sd = spline.derivative()

    imax = np.argmax(loglike)
    ilo = max(imax - 1, 0)
    ihi = min(imax + 1, len(xval) - 1)

    # Find the peak
    x0 = xval[imax]

    # Refine the peak position
    if np.sign(sd(xval[ilo])) != np.sign(sd(xval[ihi])):
        x0 = find_function_root(sd, xval[ilo], xval[ihi])

    lnlmax = float(spline(x0))

    fn = lambda t: spline(t) - lnlmax
    fn_val = fn(xval)
    if np.any(fn_val[imax:] < -deltalnl):
        xhi = xval[imax:][fn_val[imax:] < -deltalnl][0]
    else:
        xhi = xval[-1]

    if np.any(fn_val[:imax] < -deltalnl):
        xlo = xval[:imax][fn_val[:imax] < -deltalnl][-1]
    else:
        xlo = xval[0]

    ul = find_function_root(fn, x0, xhi, deltalnl)
    ll = find_function_root(fn, x0, xlo, deltalnl)
    err_lo = np.abs(x0 - find_function_root(fn, x0, xlo, 0.5))
    err_hi = np.abs(x0 - find_function_root(fn, x0, xhi, 0.5))

    if np.isfinite(err_lo):
        err = 0.5 * (err_lo + err_hi)
    else:
        err = err_hi

    o = {'x0': x0, 'ul': ul, 'll': ll,
         'err_lo': err_lo, 'err_hi': err_hi, 'err': err,
         'lnlmax': lnlmax}
    return o
def kernel(ell, nu, eig, fgong):
    """Returns a dict of structural kernels.  I have tried to make this as
    notationally similar to Gough & Thompson (1991) as possible.

    Parameters
    ----------
    ell: int
        The angular degree of the mode.
    nu: float
        The cyclic frequency of the mode.
    eig: np.array, shape(N,7)
        Eigenfrequency data for the mode, as produced by ADIPLS.
    fgong: dict
        Stellar model data in a dictionary, as per load_fgong()
        above.

    Returns
    -------
    kernel: np.array, length N
        The density or sound speed structure kernel.
    """
    omega = 2.*np.pi*nu                      # convert to cyclic frequency
    G = 6.672e-8                             # gravitational constant
    L2 = ell*(ell+1)
    L = np.sqrt(L2)
    M, R = fgong['glob'][:2]                 # mass and radius from FGONG
    sigma = np.sqrt(R**3/G/M)*omega          # dimensionless frequency

    ## unpack fgong file (c.f. page 6 section 2.1.3 of above doc)
    r = fgong['var'][::-1,0]                 # radial co-ordinate
    m = M*np.exp(fgong['var'][::-1,1])       # mass co-ordinate
    P = fgong['var'][::-1,3]                 # pressure
    rho = fgong['var'][::-1,4]               # density
    Gamma1 = fgong['var'][::-1,9]            # first adiabatic index
    cs2 = Gamma1*P/rho                       # square of the sound speed
    Y = 1 - fgong['var'][::-1,5] - fgong['var'][::-1,16] # helium abundance
    
    # Gamma_1,Y = ( partial ln Gamma_1 / partial ln Y ) _ {P, rho} etc
    Gamma_1rho = fgong['var'][::-1,25]
    Gamma_1p = fgong['var'][::-1,26]
    Gamma_1Y = fgong['var'][::-1,27]
    
    ## equilibrium model (c.f. ADIPLS - The Aarhus adi. osc. pack., section 2.1)
    A = fgong['var'][::-1,14] # 1/Gamma_1 (dln p / dln r) - (dln rho / dln r)
    A1 = (m/M)/(r/R)**3                      # fractional volume
    Vg = (G*m*rho)/(Gamma1*P*r)
    drho_dr = -(Vg+A)*rho/r                  # density gradient
    
    ## unpack eigenfunction (c.f. page 7 of Notes on adi. osc. prog.)
    x = eig[:,0]                             # dimensionless radius (i.e. r/R)
    y1 = eig[:,1]                            # xi_r / R
    y2 = eig[:,2]                            # l(l+1)/R * xi_h
    y3 = eig[:,3]                            # -x * Phi' / (g * r)
  # y4 = eig[:,4]                            # x^2 * d/dx (y_3 / x)
    
    xi_r = y1*R                              # radial component of eigenfunction
    
    dxi_r_dr = np.hstack((0., np.diff(xi_r)/np.diff(r)))
    
    # chi is the "dilatation"
    if ell == 0:
        xi_h = 0.*xi_r 
      # eta = G*m/r**3/omega**2
        chi = Vg/x*(y1-sigma**2/A1/x*y2)
    elif ell > 0:
        xi_h = y2*R/L2
      # eta = L2*G*m/r**3/omega**2
        chi = Vg/x*(y1-y2*sigma**2/(A1*L2)-y3)
    else:
        raise ValueError('ell must be non-negative')
    
    # most stellar models include the central point, at which the
    # numerical value of chi and drho_dr might be buggy.
    chi[0] = 0.
    drho_dr[0] = 0.
    
    S = np.trapz((xi_r**2+L2*xi_h**2)*rho*r**2, r)
    
    
    ### Calculate c, rho pair
    # c.f. Gough & Thompson 1991 equation (60)
    K_c_rho = rho*cs2*chi**2*r**2 / (S*omega**2)
    
    # first compute the huge bracketed terms 
    # in last two lines of equation (61) 
    K_rho_c = (ell+1.)/r**ell*(xi_r-ell*xi_h) \
        *integrate((rho*chi+xi_r*drho_dr)*r**(ell+2.), r) \
        - ell*r**(ell+1.)*(xi_r+(ell+1.)*xi_h) \
        *complement((rho*chi+xi_r*drho_dr)*r**(1.-ell), r)
    # then combine it with the rest
    K_rho_c = -0.5*(xi_r**2+L2*xi_h**2)*rho*omega**2*r**2 \
        +0.5*rho*cs2*chi**2*r**2 \
        -G*m*(chi*rho+0.5*xi_r*drho_dr)*xi_r \
        -4*np.pi*G*rho*r**2*complement((chi*rho+0.5*xi_r*drho_dr)*xi_r, r) \
        +G*m*rho*xi_r*dxi_r_dr \
        +0.5*G*(m*drho_dr+4.*np.pi*r**2*rho**2)*xi_r**2 \
        -4*np.pi*G*rho/(2.*ell+1.)*K_rho_c
    K_rho_c = K_rho_c / (S*omega**2)
    
    
    ### Calculate c^2, rho pair
    # d(c^2)/c^2 = 2 * d(c)/c
    # so K_c2_rho = K_c_rho / 2
    K_c2_rho = K_c_rho / 2.
    K_rho_c2 = K_rho_c
    
    
    ### Calculate Gamma_1, rho pair
    # K_Gamma1_rho = K_c2_rho (c.f. Basu, Studying Stars 2011, eq 3.20)
    # K_rho_Gamma1 (c.f. InversionKit v. 2.2, eq. 105)
    K_rho_Gamma1 = K_rho_c2
    integrand = Gamma1*chi**2*r**2 / (2*S*omega**2)
    first_int = integrate(integrand , r)
    second_int = complement(4*np.pi*G*rho/r**2*integrate(integrand, r), r)
    K_Gamma1_rho = K_rho_c2 - K_c2_rho + G*m*rho/r**2 * \
        first_int + rho*r**2 * second_int
    
    
    ### Calculate u, Y pair
    # K_Y_u = Gamma_1,Y * K_Gamma1_rho (c.f. Basu & Chaplin 2016, eq 10.40)
    # K_u_Y = P * d(P^-1 psi)/dr + Gamma_1,p * K_Gamma1_rho
    # where psi is obtained by solving
    # d[psi(r)]/dr - 4 pi G rho r^2 int_r^R rho/(r^2 P) psi dr = -z(r)
    # z(r) = K_rho_Gamma1 + (Gamma_1,p + Gamma_1,rho) K_Gamma1_rho
    K_Y_u = Gamma_1Y * K_Gamma1_rho

    z = K_rho_Gamma1 + ( Gamma_1p + Gamma_1rho ) * K_Gamma1_rho
    
    rho_spl = UnivariateSpline(r, rho)
    P_spl = UnivariateSpline(r, P)
    z_spl = UnivariateSpline(r, z)
    
    drho = rho_spl.derivative()
    dz = z_spl.derivative()
    
    def bvp(t, psi):
        # psi'' + a(r) psi' + b(r) psi = f(r)
        # where
        # a(r) =  P/rho (r rho' - 2 rho)
        # b(r) = -P/rho [ 1 / ( 4 pi G r^2 rho ) ]
        # f(r) =  P/rho ( z r rho' - rho r z' + 2 z rho )
        # rearrange into y_1' = y_2
        #                y_2' = f(t) - a(t) y_2 - b(t) psi 
        
        # remesh onto variable x from r
        rhot = rho_spl(t) 
        Pt = P_spl(t) 
        zt = z_spl(t) 
        
        # differentiate with respect to x
        drhot = drho(t) 
        dzt = dz(t) 
        
        # calculate variables on mesh x
        #Prhot = Pt / rhot
        #at = Prhot * ( t * rhot - 2 * rhot )
        #bt = - Prhot * ( 1 / ( 4 * np.pi * G * t**2 * rhot ) )
        #ft = Prhot * ( zt * t * drhot - \
        #               rhot * x * dzt + \
        #               2 * zt * rhot )
        
        drhorho = drhot / rhot
        
        at = drhorho - 2 / t
        bt = 4 * np.pi * G * t * rhot**2 / Pt
        ft = zt * (2/t + drhorho) - dzt
        
        return np.vstack(( psi[1], 
                           ft - at * psi[1] - bt * psi[0] ))
    
    result = solve_bvp(bvp, 
        lambda ya, yb: np.array([ ya[0]-1, yb[0]-1 ]), 
        r, 
        np.array([ np.ones(len(r)), np.ones(len(r)) ]), 
        max_nodes=1000000)#, tol=1e-5)
    
    
    
    
    
    #def dpsi_dr(psi, s):
    #    return 4*np.pi*G * np.interp(s, r, rho) * s**2 \
    #        * trapz(rho[r>=s] / (r[r>=s]**2 * P[r>=s]) * psi, r[r>=s]) \
    #        - ( np.interp(s, r, K_rho_Gamma1) \
    #            + ( np.interp(s, r, Gamma_1p) + np.interp(s, r, Gamma_1rho) ) \
    #              * np.interp(s, r, K_Gamma1_rho) \
    #          )
    #psi = odeint(dpsi_dr, 1.0, r).T[0]
    #print(psi)
    
    
    
    #def dpsi_dr(r2, psi):
    #    print('r2 == r', np.all(r2 == r))
    #    psi = psi[0]
    #    print('len(psi) == len(r)', len(psi) == len(r))
    #    return np.vstack((4*np.pi*G*rho*r**2 \
    #        * complement(rho/(r**2*P)*psi, r) \
    #        + ( K_rho_Gamma1 + ( Gamma_1p + Gamma_1rho ) * K_Gamma1_rho ))).T
    
    #def dpsi_dr(r2, psi):
    #    psi = psi[0]
    #    return np.vstack((4*np.pi*G*np.interp(r2, r, rho)*r2**2 \
    #        * complement(np.interp(r2, r, rho)/(r2**2*np.interp(r2,r,P))*psi, r2) \
    #        + ( np.interp(r2, r, K_rho_Gamma1) + ( np.interp(r2, r, Gamma_1p) \
    #        + np.interp(r2, r, Gamma_1rho) ) * np.interp(r2, r, K_Gamma1_rho) ))).T
    
    ##psi = np.zeros(len(r))
    ##psi[0] = r[0] * f(0, 0)
    ##for ii in range(1, len(r)):
    ##    h = r[ii] - r[ii-1]
    ##    psi[ii] = psi[ii-1] + h * f(psi[ii-1], ii-1)
    ##print(psi)
    
    #def resid(psi):
    #    return np.gradient(psi, r) - f(psi)
    
    
    
    #result = solve_bvp(dpsi_dr, 
    #    lambda ya,yb: np.array([ (ya[0]-1)**2 + (yb[0]-1)**2 ]),
    #    r, [np.ones(len(r))], max_nodes=100000, tol=1e-9)
    
    
    # dPsi1_dr = 4 pi G r^2 rho Psi_2 + 
    #            [ K_rho_Gamma1 + ( Gamma_1P + Gamma_1rho ) * K_Gamma1_rho ]
    # dPsi2_dr = -rho/(r^2 P) Psi_1
    
    #def bvp(t, psi):
    #    rhot = rho_spl(t) 
    #    psi1 = 4 * np.pi * G * rhot * t**2 * psi[1] + z_spl(t)
    #    psi2 = - rhot / (t * P_spl(t)) * psi[0]
    #    return np.vstack(( psi1, psi2 ))
    
    #result = solve_bvp(bvp, 
    #    lambda ya, yb: np.array([ ya[0]-1, yb[0]-1 ]), 
    #    r, 
    #    np.array([ np.ones(len(r)), np.zeros(len(r)) ]), 
    #    max_nodes=100000000, tol=1e-4)
    
#    def bvp(t, psi):
#        rhot = rho_spl(t) 
#        dpsi = 4 * np.pi * G * rhot * t**2 \
#            * complement(rhot / ( t**2 * P_spl(t) ) * psi[0], t) - z_spl(t)
#        print('dpsi', dpsi)
#        return np.vstack(( dpsi )).T
    
#    result = solve_bvp(bvp, 
#        lambda ya, yb: np.array([ yb[0]-1 ]), 
#        r, 
#        np.array([ np.ones(len(x)) ]), 
#        max_nodes=100000000, tol=1e-3) 
    
    #result = solve_bvp(dPsi2_dr2, 
    #    lambda ya, yb: np.array([ (ya[0]-1)**2, (yb[0]-1)**2 ]),
    #    r, [ np.ones(len(r)), np.zeros(len(r)) ], 
    #    max_nodes=1e6)#, tol=1e-5)
    
    print(result)
    psi = result.sol(r)[0] #interp1d(result['x'], result['y'][0])(r)
    print('psi', psi)
    
    #result = least_squares(resid, np.ones(len(r)))
    #print(result)
    #psi = result['x']
    #psi = minimize(sqdiff, np.ones(len(r)), method='Nelder-Mead')['x']
    
    K_u_Y = P * UnivariateSpline(P**-1 * psi, r).derivative()(r) \
        + Gamma_1p * K_Gamma1_rho
    
    
    return { 
        ('c',      'rho'): (K_c_rho,      K_rho_c2),
        ('c2',     'rho'): (K_c2_rho,     K_rho_c2),
        ('Gamma1', 'rho'): (K_Gamma1_rho, K_rho_Gamma1),
        ('u',      'Y'):   (K_u_Y,        K_Y_u),
        ('psi', 'psi'):    (psi,          psi)
    }
#        continue
    #data splicing    
    cur_t = t[i-num_pts:i]
    cur_x = x[i-num_pts:i]
    cur_timestamp = timestamp[i-num_pts:i]
    
    #data spline
    try:
        #Take the gaussian average of data points and its variance
        _,var = moving_average(cur_x)
        sp = UnivariateSpline(cur_t,cur_x,w=c/np.sqrt(var))
        t_n = np.linspace(cur_t[0],cur_t[-1],1000)
        
        #spline results    
        x_n = sp(t_n)
        v_n = sp.derivative(n=1)(t_n)
        a_n = sp.derivative(n=2)(t_n)
        
        #compute for velocity (cm/day) vs. acceleration (cm/day^2) in log axes
        x_s = sp(cur_t)
        v_s = abs(sp.derivative(n=1)(cur_t) * 100)
        a_s = abs(sp.derivative(n=2)(cur_t) * 100)
    except:
        print "Interpolation Error {}".format(pd.to_datetime(str(cur_timestamp[-1])).strftime("%m/%d/%Y %H:%M"))
        x_n = np.ones(len(t_n))*np.nan        
        v_n = np.ones(len(t_n))*np.nan
        a_n = np.ones(len(t_n))*np.nan
        x_s = np.ones(len(cur_t))*np.nan
        v_s = np.ones(len(cur_t))*np.nan
        a_s = np.ones(len(cur_t))*np.nan
    
Example #50
0
def interpolate_derivative(xdat, ydat):
    # make data less exact/more crude
    spl = UnivariateSpline(xdat, ydat, k=4,s=0)
    roots = spl.derivative().roots()
    yfit = spl(xdat)
    return yfit, roots
Example #51
0
from scipy.interpolate import UnivariateSpline
from EOS import rho, cp
from const import *
from ext_data import opac


for i in range (0, z.size):
	FconvArr[i] = Fconv(T[i], P[i], z[i])
	FradArr[i] = Frad(T[i], P[i], z[i])
	rhoArr[i] = rho(T[i], P[i], z[i])
	cpArr[i] = cp(T[i], P[i])
	opacArr[i] = opac(T[i],rhoArr[i])

F = FconvArr+FradArr
F_spline = UnivariateSpline(z, F, s=0)
dFdz = F_spline.derivative()
dFdz = dFdz(z)

#dFdz = np.diff(F)/h
#dFdz = np.append(dFdz, dFdz[dFdz.size-1])

#  semi-implicitni metoda pro casovy prirustek

#d_main = np.empty(500)
#d_sub = np.zeros(500)
#d_sup = np.zeros(500)
#b = np.zeros(500)
#
#h = 25000	#grid spacing
#
#Bz0 = np.sqrt(8e-7*np.pi*(bcg.P[499]-P[499]))
Example #52
0
    def _autofocus(self,
                   seconds,
                   focus_range,
                   focus_step,
                   thumbnail_size,
                   keep_files,
                   dark_thumb,
                   merit_function,
                   merit_function_kwargs,
                   coarse,
                   plots,
                   start_event,
                   finished_event,
                   smooth=0.4, *args, **kwargs):
        # If passed a start_event wait until Event is set before proceeding
        # (e.g. wait for coarse focus to finish before starting fine focus).
        if start_event:
            start_event.wait()

        initial_focus = self.position
        if coarse:
            self.logger.debug(
                "Beginning coarse autofocus of {} - initial position: {}",
                self._camera, initial_focus)
        else:
            self.logger.debug(
                "Beginning autofocus of {} - initial position: {}", self._camera, initial_focus)

        # Set up paths for temporary focus files, and plots if requested.
        image_dir = self.config['directories']['images']
        start_time = current_time(flatten=True)
        file_path_root = "{}/{}/{}/{}".format(image_dir,
                                              'focus',
                                              self._camera.uid,
                                              start_time)

        # Take an image before focusing, grab a thumbnail from the centre and add it to the plot
        file_path = "{}/{}_{}.{}".format(file_path_root, initial_focus,
                                         "initial", self._camera.file_extension)
        thumbnail = self._camera.get_thumbnail(seconds, file_path, thumbnail_size, keep_file=True)

        if plots:
            thumbnail = images.mask_saturated(thumbnail)
            if dark_thumb is not None:
                thumbnail = thumbnail - dark_thumb
            fig = plt.figure(figsize=(9, 18), tight_layout=True)
            ax1 = fig.add_subplot(3, 1, 1)
            im1 = ax1.imshow(thumbnail, interpolation='none', cmap=palette, norm=colours.LogNorm())
            fig.colorbar(im1)
            ax1.set_title('Initial focus position: {}'.format(initial_focus))

        # Set up encoder positions for autofocus sweep, truncating at focus travel
        # limits if required.
        if coarse:
            focus_range = focus_range[1]
            focus_step = focus_step[1]
        else:
            focus_range = focus_range[0]
            focus_step = focus_step[0]

        focus_positions = np.arange(max(initial_focus - focus_range / 2, self.min_position),
                                    min(initial_focus + focus_range / 2, self.max_position) + 1,
                                    focus_step, dtype=np.int)
        n_positions = len(focus_positions)

        metric = np.empty((n_positions))

        for i, position in enumerate(focus_positions):
            # Move focus, updating focus_positions with actual encoder position after move.
            focus_positions[i] = self.move_to(position)

            # Take exposure
            file_path = "{}/{}_{}.{}".format(file_path_root,
                                             focus_positions[i], i, self._camera.file_extension)
            thumbnail = self._camera.get_thumbnail(
                seconds, file_path, thumbnail_size, keep_file=keep_files)
            thumbnail = images.mask_saturated(thumbnail)
            if dark_thumb is not None:
                thumbnail = thumbnail - dark_thumb
            # Calculate focus metric
            metric[i] = images.focus_metric(thumbnail, merit_function, **merit_function_kwargs)
            self.logger.debug("Focus metric at position {}: {}".format(position, metric[i]))

        fitted = False

        # Find maximum values
        imax = metric.argmax()

        if imax == 0 or imax == (n_positions - 1):
            # TODO: have this automatically switch to coarse focus mode if this happens
            self.logger.warning(
                "Best focus outside sweep range, aborting autofocus on {}!".format(self._camera))
            best_focus = focus_positions[imax]

        elif not coarse:
            # Crude guess at a standard deviation for focus metric, 40% of the maximum value
            weights = np.ones(len(focus_positions)) / (smooth * metric.max())

            # Fit smoothing spline to focus metric data
            fit = UnivariateSpline(focus_positions, metric, w=weights, k=4, ext='raise')

            try:
                stationary_points = fit.derivative().roots()
            except ValueError as err:
                self.logger.warning('Error finding extrema of spline fit: {}'.format(err))
                best_focus = focus_positions[imax]
            else:
                extrema = fit(stationary_points)
                if len(extrema) > 0:
                    best_focus = stationary_points[extrema.argmax()]
                    fitted = True

        else:
            # Coarse focus, just use max value.
            best_focus = focus_positions[imax]

        if plots:
            ax2 = fig.add_subplot(3, 1, 2)
            ax2.plot(focus_positions, metric, 'bo', label='{}'.format(merit_function))
            if fitted:
                fs = np.arange(focus_positions[0], focus_positions[-1] + 1)
                ax2.plot(fs, fit(fs), 'b-', label='Smoothing spline fit')

            ax2.set_xlim(focus_positions[0] - focus_step / 2, focus_positions[-1] + focus_step / 2)
            u_limit = 1.10 * metric.max()
            l_limit = min(0.95 * metric.min(), 1.05 * metric.min())
            ax2.set_ylim(l_limit, u_limit)
            ax2.vlines(initial_focus, l_limit, u_limit, colors='k', linestyles=':',
                       label='Initial focus')
            ax2.vlines(best_focus, l_limit, u_limit, colors='k', linestyles='--',
                       label='Best focus')
            ax2.set_xlabel('Focus position')
            ax2.set_ylabel('Focus metric')
            if coarse:
                ax2.set_title('{} coarse focus at {}'.format(self._camera, start_time))
            else:
                ax2.set_title('{} fine focus at {}'.format(self._camera, start_time))
            ax2.legend(loc='best')

        final_focus = self.move_to(best_focus)

        file_path = "{}/{}_{}.{}".format(file_path_root, final_focus,
                                         "final", self._camera.file_extension)
        thumbnail = self._camera.get_thumbnail(seconds, file_path, thumbnail_size, keep_file=True)

        if plots:
            thumbnail = images.mask_saturated(thumbnail)
            if dark_thumb is not None:
                thumbnail = thumbnail - dark_thumb
            ax3 = fig.add_subplot(3, 1, 3)
            im3 = ax3.imshow(thumbnail, interpolation='none', cmap=palette, norm=colours.LogNorm())
            fig.colorbar(im3)
            ax3.set_title('Final focus position: {}'.format(final_focus))
            if coarse:
                plot_path = file_path_root + '_coarse.png'
            else:
                plot_path = file_path_root + '_fine.png'

            fig.savefig(plot_path)
            plt.close(fig)
            if coarse:
                self.logger.info('Coarse focus plot for camera {} written to {}'.format(
                    self._camera, plot_path))
            else:
                self.logger.info('Fine focus plot for camera {} written to {}'.format(
                    self._camera, plot_path))

        self.logger.debug(
            'Autofocus of {} complete - final focus position: {}', self._camera, final_focus)

        if finished_event:
            finished_event.set()

        return initial_focus, final_focus
Example #53
0
    secondterm =  np.cross(n, np.cross(n - v, a)    ) /  (1-np.dot(n,v))**3 / R
    E = firstterm + secondterm
    B = np.cross(n,E)
    return {'E': E, 'B', B}

times = np.linspace(0,100,1000)

tauguess = 0.
for time in times:
    sol = optimize.root(properinterval, tauguess,args=(fieldx,fieldy,fieldz,time))
    soltau = sol.x
    tauguess = soltau
    rRet = np.array([xs(tau),ys(tau),zs(tau)])
    #dtdtau = ts.derivative()(tau)
    #v = np.array([xs.derivative()(tau),ys.derivative()(tau),zs.derivative()(tau)     ]) / dtdtau
    v = np.array([xts.derivative()(time),yts.derivative()(time) ,zts.derivative()(time)])
    a = np.array([xts.derivative(2)(time),yts.derivative(2)(time) ,zts.derivative(2)(time)])
    myEB = EB(fieldr,rRet,v, a)
    E = myEB.E
    B = myEB.B
    S = np.cross(E,B)




print soltau

#for light, x=t x =-t
plt.plot(t,t+fieldx-fieldt)
plt.plot(t,-t-fieldx+fieldt)
plt.show()
Example #54
0
def get_parameter_limits(xval, loglike, cl_limit=0.95, cl_err=0.68269, tol=1E-2,
                         bounds=None):
    """Compute upper/lower limits, peak position, and 1-sigma errors
    from a 1-D likelihood function.  This function uses the
    delta-loglikelihood method to evaluate parameter limits by
    searching for the point at which the change in the log-likelihood
    value with respect to the maximum equals a specific value.  A
    cubic spline fit to the log-likelihood values is used to
    improve the accuracy of the calculation.

    Parameters
    ----------

    xval : `~numpy.ndarray`
       Array of parameter values.

    loglike : `~numpy.ndarray`
       Array of log-likelihood values.

    cl_limit : float
       Confidence level to use for limit calculation.

    cl_err : float
       Confidence level to use for two-sided confidence interval
       calculation.

    tol : float
       Absolute precision of likelihood values.

    Returns
    -------

    x0 : float
        Coordinate at maximum of likelihood function.

    err_lo : float    
        Lower error for two-sided confidence interval with CL
        ``cl_err``.  Corresponds to point (x < x0) at which the
        log-likelihood falls by a given value with respect to the
        maximum (0.5 for 1 sigma).  Set to nan if the change in the
        log-likelihood function at the lower bound of the ``xval``
        input array is less than than the value for the given CL.

    err_hi : float
        Upper error for two-sided confidence interval with CL
        ``cl_err``. Corresponds to point (x > x0) at which the
        log-likelihood falls by a given value with respect to the
        maximum (0.5 for 1 sigma).  Set to nan if the change in the
        log-likelihood function at the upper bound of the ``xval``
        input array is less than the value for the given CL.

    err : float
        Symmetric 1-sigma error.  Average of ``err_lo`` and ``err_hi``
        if both are defined.

    ll : float
        Lower limit evaluated at confidence level ``cl_limit``.

    ul : float
        Upper limit evaluated at confidence level ``cl_limit``.

    lnlmax : float
        Log-likelihood value at ``x0``.

    """

    dlnl_limit = onesided_cl_to_dlnl(cl_limit)
    dlnl_err = twosided_cl_to_dlnl(cl_err)

    try:
        # Pad the likelihood function
        # if len(xval) >= 3 and np.max(loglike) - loglike[-1] < 1.5*dlnl_limit:
        #    p = np.polyfit(xval[-3:], loglike[-3:], 2)
        #    x = np.linspace(xval[-1], 10 * xval[-1], 3)[1:]
        #    y = np.polyval(p, x)
        #    x = np.concatenate((xval, x))
        #    y = np.concatenate((loglike, y))
        # else:
        x, y = xval, loglike
        spline = UnivariateSpline(x, y, k=2,
                                  #k=min(len(xval) - 1, 3),
                                  w=(1 / tol) * np.ones(len(x)))
    except:
        print("Failed to create spline: ", xval, loglike)
        return {'x0': np.nan, 'ul': np.nan, 'll': np.nan,
                'err_lo': np.nan, 'err_hi': np.nan, 'err': np.nan,
                'lnlmax': np.nan}

    sd = spline.derivative()

    imax = np.argmax(loglike)
    ilo = max(imax - 1, 0)
    ihi = min(imax + 1, len(xval) - 1)

    # Find the peak
    x0 = xval[imax]

    # Refine the peak position
    if np.sign(sd(xval[ilo])) != np.sign(sd(xval[ihi])):
        x0 = find_function_root(sd, xval[ilo], xval[ihi])

    lnlmax = float(spline(x0))

    def fn(t): return spline(t) - lnlmax
    fn_val = fn(xval)
    if np.any(fn_val[imax:] < -dlnl_limit):
        xhi = xval[imax:][fn_val[imax:] < -dlnl_limit][0]
    else:
        xhi = xval[-1]        
    # EAC: brute force check that xhi is greater than x0
    # The fabs is here in case x0 is negative
    if xhi <= x0:
        xhi = x0 + np.fabs(x0)

    if np.any(fn_val[:imax] < -dlnl_limit):
        xlo = xval[:imax][fn_val[:imax] < -dlnl_limit][-1]
    else:
        xlo = xval[0]
    # EAC: brute force check that xlo is less than x0
    # The fabs is here in case x0 is negative        
    if xlo >= x0:
        xlo = x0 - 0.5*np.fabs(x0)

    ul = find_function_root(fn, x0, xhi, dlnl_limit, bounds=bounds)
    ll = find_function_root(fn, x0, xlo, dlnl_limit, bounds=bounds)
    err_lo = np.abs(x0 - find_function_root(fn, x0, xlo, dlnl_err,
                                            bounds=bounds))
    err_hi = np.abs(x0 - find_function_root(fn, x0, xhi, dlnl_err,
                                            bounds=bounds))

    err = np.nan
    if np.isfinite(err_lo) and np.isfinite(err_hi):
        err = 0.5 * (err_lo + err_hi)
    elif np.isfinite(err_hi):
        err = err_hi
    elif np.isfinite(err_lo):
        err = err_lo

    o = {'x0': x0, 'ul': ul, 'll': ll,
         'err_lo': err_lo, 'err_hi': err_hi, 'err': err,
         'lnlmax': lnlmax}
    return o
s_0_5_6 = lut.sp[0,:,0,5,6]
s,n = nanmasked(s_0_5_6)
snorm = norm2max(s_0_5_6)
[i1000,i1077,i1493,i1600,i1200,i1300,i530,i610,
 i1565,i1634,i1193,i1198,i1236,i1248,i1270,i1644,
 i1050,i1040,i1065,i600,i870,i515] = find_closest(lut.wvl,np.array([1000,1077,1493,1600,1200,1300,530,
                                                     610,1565,1634,1193,1198,1236,1248,
                                                     1270,1644,1050,1040,1065,600,870,515]))
norm2 = s_0_5_6/s_0_5_6[i1000]
dsp = smooth(np.gradient(norm2,lut.wvl/1000.),2)

# <codecell>

norm2_uni = UnivariateSpline(lut.wvl/1000.0,norm2,k=5)
norm2_uni.set_smoothing_factor(1)
dnorm2 = norm2_uni.derivative()

# <codecell>

norm2_bspline = splrep(lut.wvl/1000.0,norm2,k=5)
norm2_b = splev(lut.wvl/1000.0,norm2_bspline,der=0)
dbnorm2 = splev(lut.wvl/1000.0,norm2_bspline,der=1)

# <codecell>

dsp2 = smooth(deriv(norm2,lut.wvl/1000.),2)

# <codecell>

plt.figure()
plt.plot(lut.wvl,norm2)
Example #56
0
		fa = a.reshape(-1,a.shape[-1])
		fo = np.zeros([fa.shape[0],nbin])
		for i in range(len(fa)):
			fo[i] = np.bincount(pix, fa[i], minlength=nbin)
		return fo.reshape(a.shape[:-1]+(nbin,))
	# Gapfill poltod in regions with far too few hits, to
	# avoid messing up the poltod power spectrum
	mask = poldiv[0,0] < np.mean(poldiv[0,0])*0.1
	for i in range(poltod.shape[0]):
		poltod[i] = gapfill.gapfill_copy(poltod[i], rangelist.Rangelist(mask))

	# Calc phase which is equal to az while az velocity is positive
	# and 2*max(az) - az while az velocity is negative
	x = np.arange(len(az))
	az_spline = UnivariateSpline(x, az, s=1e-4)
	daz  = az_spline.derivative(1)(x)
	ddaz = az_spline.derivative(2)(x)
	phase = az.copy()
	phase[daz<0] = 2*np.max(az)-az[daz<0]
	# Bin by az and phase
	apix = build_bins(az, nbin)
	ppix = build_bins(phase, nbin)
	for i, pix in enumerate([apix, ppix]):
		tod_eq.rhs[i] += bin_by_pix(polrhs, pix, nbin)
		tod_eq.div[i] += bin_by_pix(poldiv, pix, nbin)
		acc_eq.rhs[i] += bin_by_pix(ddaz,   pix, nbin)
		acc_eq.div[i] += bin_by_pix(1,      pix, nbin)
		for j in range(ndet):
			di = d.dets[j]
			det_eq.rhs[i,di] += bin_by_pix(d.tod[j]*weight[j], pix, nbin)
			det_eq.div[i,di] += bin_by_pix(weight[j], pix, nbin)
Example #57
0
import matplotlib.pyplot as plt


N=100
v = .5

t = np.linspace(0,50,N)
x = t * v
y = np.zeros(N)
z = np.zeros(N)

#find splines
xs = UnivariateSpline(t, x, s=1)
ys = UnivariateSpline(t, y, s=1)
zs = UnivariateSpline(t, z, s=1)

dxdt = xs.derivative()(t)
dydt = ys.derivative()(t)
dzdt = zs.derivative()(t)

gamma = np.sqrt(1 - dxdt**2 - dydt**2 - dzdt**2)

tau = integrate.cumtrapz(gamma, t)
tau = np.insert(tau,0,0.)

plt.figure()
plt.plot(t,tau)
plt.figure()
plt.plot(t,x)
plt.show()
def preprocess(filename, num_resamplings = 25):

	# read data
	#filename = "../data/MarieTherese_jul31_and_Aug07_all.pkl"

	pkl_file = open(filename, 'rb')
        data1 = cPickle.load(pkl_file)
        num_strokes = len(data1)

        # get the unique stroke labels, map to class labels (ints) for later using dictionary
        stroke_dict = dict()
        value_index = 0
        for i in range(0,num_strokes):
                current_key = data1[i][0]
                if current_key not in stroke_dict:
                        stroke_dict[current_key] = value_index
                        value_index = value_index + 1

        # save the dictionary to file, for later use
        dict_filename = "../data/stroke_label_mapping.pkl"
        dict_file = open(dict_filename, 'wb')
        pickle.dump(stroke_dict, dict_file)

	# - smooth data
	# 	for each stroke, get the vector of data, smooth/interpolate it over time, store sampling from smoothed signal in vector
	# - sample at regular intervals (1/30 of total time, etc.) -> input vector X


	num_params = len(data1[0][1][0]) #accelx, accely, etc.
	#num_params = 16 #accelx, accely, etc.

        # re-sample the interpolated spline this many times (25 or so seems ok, since most letters have this many points)


        # build an output array large enough to hold the vectors for each stroke and the (unicode -> int) stroke value (1 elts)
#        output_array = np.zeros((num_strokes, (num_resamplings_2 + num_resamplings) * num_params + 1))
        output_array = np.zeros((num_strokes, (5 * num_resamplings) * num_params + 1))
        print output_array.size

        print filename
        print num_params
        print num_resamplings_2
        print

	for i in range(0, num_strokes):

                # how far?
                if (i % 100 == 0):
                        print float(i)/num_strokes
	
		X_matrix = np.zeros((num_params, num_resamplings * 5)) # the array to store in (using original data and 2 derivs, 2 integrals)

                # the array to store reshaped resampled vector in
		X_2_vector_scaled = np.zeros((num_params, num_resamplings_2)) 

                # the array to store the above 2 concatenated
#		concatenated_X_X_2 = np.zeros((num_params, num_resamplings_2 + num_resamplings)) 
		concatenated_X_X_2 = np.zeros((num_params, num_resamplings * 5)) # the array to store in (using original data and 2 derivs, 2 integrals)

		# for each parameter (accelX, accelY, ...)

                # map the unicode character to int
                curr_stroke_val = stroke_dict[data1[i][0]]
                                        
                #print(len(curr_stroke))
                #print(curr_stroke[0])
                #print(curr_stroke[1])

		curr_data = data1[i][1]

                # fix if too short for interpolation - pad current data with 3 zeros
                if(len(curr_data) <= 3):
                        curr_data = np.concatenate([curr_data, np.zeros((3,num_params))])

		time = np.arange(0, len(curr_data), 1) # the sample 'times' (0 to number of samples)
		time_new = np.arange(0, len(curr_data), float(len(curr_data))/num_resamplings) # the resampled time points

		for j in range(0, num_params): # iterate through parameters

			signal = curr_data[:,j] # one signal (accelx, etc.) to interpolate
			# interpolate the signal using a spline or so, so that arbitrary points can be used 
			# (~30 seems reasonable based on data, for example)
                        
			#tck = interpolate.splrep(time, signal, s=0)  # the interpolation represenation
                        tck = UnivariateSpline(time, signal, s=0)

			# sample the interpolation num_resamplings times to get values
                        # resampled_data = interpolate.splev(time_new, tck, der=0) # the resampled data
                        resampled_data = tck(time_new)

                        # scale data (center, norm)
                        resampled_data = preprocessing.scale(resampled_data)
                        
                        # first integral
                        tck.integral = tck.antiderivative()
                        resampled_data_integral = tck.integral(time_new)

                        # scale data (center, norm)
                        resampled_data_integral = preprocessing.scale(resampled_data_integral)

                        # 2nd integral
                        tck.integral_2 = tck.antiderivative(2)
                        resampled_data_integral_2 = tck.integral_2(time_new)

                        # scale data (center, norm)
                        resampled_data_integral_2 = preprocessing.scale(resampled_data_integral_2)

                        # first deriv
                        tck.deriv = tck.derivative()
                        resampled_data_deriv = tck.deriv(time_new)

                        # scale
                        resampled_data_deriv = preprocessing.scale(resampled_data_deriv)

                        # second deriv
                        tck.deriv_2 = tck.derivative(2)
                        resampled_data_deriv_2 = tck.deriv_2(time_new)

                        #scale
                        resampled_data_deriv_2 = preprocessing.scale(resampled_data_deriv_2)


                        # concatenate into one vector
                        concatenated_resampled_data = np.concatenate((resampled_data, 
                                                                      resampled_data_integral, 
                                                                      resampled_data_integral_2, 
                                                                      resampled_data_deriv, 
                                                                      resampled_data_deriv_2))
                        
                        # store for the correct parameter, to be used later as part of inputs to SVM
 			X_matrix[j] = concatenated_resampled_data

			# while we're at it, square vector of resampled data to get a matrix, vectorize the matrix, and store
			#  for each X in list, multiply X by itself -> X_2
			#- vectorize X^2 (e.g. 10 x 10 -> 100 dimensions)
#			X_2_matrix = np.outer(concatenated_resampled_data, concatenated_resampled_data) # temp matrix for outer product
#			X_2_vector = np.reshape(X_2_matrix, -1) # reshape into a vector

			#- center and normalize X^2 by mean and standard deviation
#			X_2_vector_scaled[j] = preprocessing.scale(X_2_vector) 

			#- concatenate with input X -> 110 dimensions
#			concatenated_X_X_2[j] = np.concatenate([X_matrix[j], X_2_vector_scaled[j]])

# FOR NOW, ONLY USE X, NOT OUTER PRODUCT
			concatenated_X_X_2[j] = X_matrix[j]

                # NOTE, THIS SHOULD REALLY JUST BE A BIG VECTOR FOR EACH STROKE, SO RESHAPE BEFORE ADDING TO OUTPUT LIST
                # ALSO, THE STROKE VALUE SHOULD BE ADDED
                this_sample = np.concatenate((np.reshape(concatenated_X_X_2, -1), np.array([curr_stroke_val])))
                concatenated_samples = np.reshape(this_sample, -1)

                # ADD TO OUTPUT ARRAY
                output_array[i] = concatenated_samples
        
        print(output_array.size)
        
	return(output_array)