示例#1
0
def findInflection(x, y, threshold=0.9):
    """
    Fits y = m*x to Zimm data before onset of Taylor instability.
    Returns m.
    Last updated by Kazem Edmond on Feb. 26, 2013.
    
    Inflection is found by identifying sudden change in slope.  Slope
    between each interval is calculated using a spline fit.  Data is 
    cleaned by removing outliers using a simple "threshold", where difference
    in between adjacent x is within 5 times of first interval.
    The threshold parameter defines size of inflection to look for.
    
    """
    # Clean data by removing outliers to help spline fit
    wout = np.where( np.abs(x[0:-1] - x[1:]) > 5*np.abs(x[0]-x[1]) )
    wout2 = wout[0][range(1, np.size(wout), 2)]    # Note: only use odd indices from np.where()
    
    # If necessary, new arrays for cleaned data:
    if np.size(wout2 > 0):
        xcln = np.delete(x, wout2)
        ycln = np.delete(y, wout2)
    else:
        xcln = x
        ycln = y
    
    # Fit a spline to cleaned data, giving slopes over intervals:
    spl = InterpolatedUnivariateSpline(xcln, ycln)
    
    # Store slopes in new array:
    nvals = int(np.floor(max(xcln)))
    spVals = np.zeros([nvals, 4])
    
    for i in range(1, nvals, 1): spVals[i] = spl.derivatives(i)
    
    # Find range over which it's Newtonian.
    # Only consider data that is within threshold of initial slope.
    wlin = np.where(spVals[1:, 1] > threshold * spVals[1, 1])
    
    wid = np.where(np.subtract(wlin, range(0, np.size(wlin)))[0] > 0)

    # Check if all of the data is Newtonian:    
    if np.size(wid) > 0:
        idd = int(wid[0])
        xlin = xcln[0:idd]
        ylin = ycln[0:idd]
    else:
        xlin = xcln[wlin]
        ylin = ycln[wlin]
    
    # 2.27.2013: Alternative to spline fitting
    # Remove overall trend of data, resulting peak is inflection point.
    # Requires that inflection peak actually exists
    # m, _, _, _ = np.linalg.lstsq(x[:, np.newaxis], y)
    # xp = x[yp == np.max(y - m * (x - np.mean(x)))]
    # mf, _, _, _ = np.linalg.lstsq(x[x<xp][:, np.newaxis], y[x<xp])
    
    # Least squares linear fit to get the slope:
    m, _, _, _ = np.linalg.lstsq(xlin[:, np.newaxis], ylin)
    
    return m
示例#2
0
def FindABS(Det_A):
	"""	determines ABS energies as zeroes of GF determinant """
	DetG = InterpolatedUnivariateSpline(En_A[EdgePos1+1:EdgePos2],sp.real(Det_A[:]))
	RootsG_A = DetG.roots()
	NABS = len(RootsG_A)
	ABSpos_A = sp.zeros(2)
	Diff_A = sp.zeros(2)
	if NABS == 0:	
		## assumes ABS states too close to gap edges
		## this also happens when using brentq to calculate densities and
		## it starts from wrong initial guess
		print("# - Warning: FindABS: no ABS found: Probably too close to band edges.")
		ABS_A = sp.array([-Delta+2.0*dE,Delta-2.0*dE])
		ABSpos_A = sp.array([EdgePos1+1,EdgePos2-1])
		Diff_A = sp.array([DetG.derivatives(ABS_A[0])[1],DetG.derivatives(ABS_A[1])[1]])
	elif NABS == 1: 
		## ABS too close to each other?
		print("# - Warning: FindABS: only one ABS found: {0: .6e}".format(RootsG_A[0]))
		print("# -          Assuming they are too close to Fermi energy.")
		print("# -          Using mirroring to get the other ABS, please check the result.")
		ABS_A = [-sp.fabs(RootsG_A[0]),sp.fabs(RootsG_A[0])]
		for i in range(2):
			ABSpos_A[i] = FindInEnergies(ABS_A[i],En_A)
			Diff_A[i] = DetG.derivatives(ABS_A[i])[1]
	elif NABS == 2:	
		## two ABS states, ideal case
		ABS_A = sp.copy(RootsG_A)
		for i in range(2):
			ABSpos_A[i] = FindInEnergies(RootsG_A[i],En_A)
			Diff_A[i] = DetG.derivatives(RootsG_A[i])[1]
	else:
		print("# - Error: FindABS: Too many zeroes of the determinant.")
		exit()
	if sp.fabs(ABS_A[0]+ABS_A[1]) > 1e-6:
		print("# - Warning: FindABS: positive and negative ABS energies don't match, diff = {0: .6e}"\
		.format(sp.fabs(ABS_A[0]-ABS_A[1])))
	if sp.fabs(ABS_A[0])<dE:
		## ABS energy smaller than the energy resolution
		print("# - Warning: FindABS: ABS energies smaller than energy resolution")
		print("# -          We put the poles to lowest possible energies.")
		ABSpos_A = [Nhalf-1,Nhalf+1]
	return [ABS_A,Diff_A,ABSpos_A]
示例#3
0
def synth(startpitch, synthparms, numpoints=100, plot=False):
    times = np.zeros(len(synthparms) * numpoints)
    contour = np.zeros(len(synthparms) * numpoints)
    for i, synthparm in enumerate(synthparms):
        if i == 0:
            p0 = startpitch
            dp0 = 0.0
            ddp0 = 0.0
        if synthparm[0] != synthparms[
                i - 1][1]:  #not contiguous (e.g. a pause is present)
            dp0 = 0.0
            ddp0 = 0.0
        if any([e is None for e in synthparm
                ]):  #no parameters available for this syllable, skip...
            dp0 = 0.0
            ddp0 = 0.0
            continue
        utt_t = np.linspace(synthparm[0],
                            synthparm[1],
                            numpoints,
                            endpoint=False)
        times[i * numpoints:i * numpoints + numpoints] = utt_t
        syl_t = utt_t - synthparm[0]  #start at 0.0
        #y = mx + c
        syltarget_m = synthparm[3]
        syltarget_c = get_intercept(syl_t[-1], synthparm[2], synthparm[3])
        scontour = sylcontour(syl_t, syltarget_m, syltarget_c, p0, dp0, ddp0,
                              synthparm[4])
        if plot:
            pl.plot(syl_t + synthparm[0],
                    np.polyval(coefs, syl_t),
                    linestyle="dashed",
                    color="red")
            pl.plot(syl_t + synthparm[0], scontour, color="green")
        spline = InterpolatedUnivariateSpline(syl_t, scontour)
        contour[i * numpoints:i * numpoints + numpoints] = scontour
        p0, dp0, ddp0, temp = spline.derivatives(syl_t[-1])
    synthtrack = Track()
    synthtrack.times = times[contour.nonzero()].copy()
    synthtrack.values = contour[contour.nonzero()].reshape((-1, 1)).copy()
    return synthtrack
示例#4
0
class MassFunction(object):
    """Object representing a mass function for a given input cosmology.

    A MassFunction object can return a properly normalized halo abundance or
    halo bias as a function of halo mass or as a function of nu, as well as
    translate between mass and nu. Current definition is from Sheth & Torman

    Attributes:
        redshift: float redshift at which to compute the mass function
        cosmo_single_epoch: SingleEpoch cosmology object from cosmology.py
        halo_dict: dictionary of floats defining halo and mass function 
            parameters (see defualts.py for details)
    """
    def __init__(self, redshift=0.0, cosmo_single_epoch=None, 
                 halo_dict=None, **kws):
        self._redshift = redshift
        #self.cosmo = cosmology.SingleEpoch(self._redshift, cosmo_dict)
        if cosmo_single_epoch is None:
            cosmo_single_epoch = cosmology.SingleEpoch(self._redshift)
        self.cosmo = cosmo_single_epoch
        self.cosmo.set_redshift(self._redshift)
        self.delta_c = self.cosmo.delta_c()

        if halo_dict is None:
            halo_dict = defaults.default_halo_dict
        self.halo_dict = halo_dict

        self.delta_v = self.halo_dict['delta_v']
        if self.delta_v == -1:
            self.delta_v = self.cosmo.delta_v()
        self.stq = halo_dict["stq"]
        self.st_little_a = halo_dict["st_little_a"]
        self.c0 = halo_dict["c0"]/(1.0 + redshift)

        self._set_mass_limits()
        self._initialize_splines()
        self._normalize()
        
    def get_redshift(self):
        """
        Return the internal redshift valriable.
        """
        return self._redshift

    def set_redshift(self, redshift):
        """
        Reset mass function parameters at redshift.

        Args:
            redshift: float value of redshift
            cosmo_dict: dictionary of floats defining a cosmology (see
                defaults.py for details)
        """
        self._redshift = redshift

        self.cosmo.set_redshift(redshift)

        self.delta_c = self.cosmo.delta_c()
        self.c0 = self.halo_dict["c0"]/(1.0 + redshift)
        self.delta_v = self.halo_dict['delta_v']
        if self.delta_v == -1:
            self.delta_v = self.cosmo.delta_v()

        self._set_mass_limits()
        self._initialize_splines()
        self._normalize()
        
    def get_cosmology(self):
        """
        Return the internal cosmology dictionary.
        """
        return self.cosmo.get_cosmology()

    def set_cosmology(self, cosmo_dict, redshift = None):
        """
        Reset mass function parameters for cosmology cosmo_dict.

        Args:
            cosmo_dict: dictionary of floats defining a cosmology (see
                defaults.py for details)
            redshift: float value of redshift
        """
        if redshift is None:
            redshift = self._redshift
        self.cosmo.set_cosmology(cosmo_dict, redshift)

        self.delta_c = self.cosmo.delta_c()
        self.delta_v = self.halo_dict['delta_v']
        if self.delta_v == -1:
            self.delta_v = self.cosmo.delta_v()
        self.c0 = self.halo_dict["c0"]/(1.0 + redshift)

        self._set_mass_limits()
        self._initialize_splines()
        self._normalize()

    def set_cosmology_object(self, cosmo_single_epoch):
        self._redshift = cosmo_single_epoch.redshift()
        self.cosmo = cosmo_single_epoch

        self.delta_c = self.cosmo.delta_c()
        self.delta_v = self.halo_dict['delta_v']
        if self.delta_v == -1:
            self.delta_v = self.cosmo.delta_v()
        self.c0 = self.halo_dict["c0"]/(1.0 + self._redshift)

        self._set_mass_limits()
        self._initialize_splines()
        self._normalize()
        
    def get_halo(self):
        """
        Return the internal dictionary defining a halo.
        """
        return self.halo_dict

    def set_halo(self, halo_dict):
        """
        Reset mass function parameters for halo_dict.

        Args:
            halo_dict: dictionary of floats defining halos (see
                defaults.py for details)
        """
        self.halo_dict = halo_dict

        self.stq = self.halo_dict["stq"]
        self.st_little_a = self.halo_dict["st_little_a"]
        self.c0 = self.halo_dict["c0"]/(1.0 + self._redshift)
        self.delta_v = self.halo_dict['delta_v']
        if self.delta_v == -1:
            self.delta_v = self.cosmo.delta_v()

        self._normalize()

    def _set_mass_limits(self):
        mass_min = 1.0e9
        mass_max = 1.0e16
        if (defaults.default_limits["mass_min"] > 0 and 
            defaults.default_limits["mass_max"] > 0):
            self.ln_mass_min = numpy.log(defaults.default_limits["mass_min"])
            self.ln_mass_max = numpy.log(defaults.default_limits["mass_max"])
            self._ln_mass_array = numpy.linspace(
                self.ln_mass_min, self.ln_mass_max,
                defaults.default_precision["mass_npoints"])
            return None

        mass_limit_not_set = True
        while mass_limit_not_set:
            if 0.1*(1.0+0.05) < self.cosmo.nu_m(mass_min):
                #print "Min mass", mass_min,"too high..."
                mass_min = mass_min/1.05
                #print "\tSetting to",mass_min,"..."
                continue
            elif 0.1*(1.0-0.05) > self.cosmo.nu_m(mass_min):
                #print "Min mass", mass_min,"too low..."
                mass_min = mass_min*1.05
                #print "\tSetting to",mass_min,"..."
                continue
            if  50.0*(1.0-0.05) > self.cosmo.nu_m(mass_max):
                #print "Max mass", mass_max,"too low..."
                mass_max = mass_max*1.05
                #print "\tSetting to",mass_max,"..."
                continue
            elif 50.0*(1.0+0.05) < self.cosmo.nu_m(mass_max):
                #print "Max mass", mass_max,"too high..."
                mass_max = mass_max/1.05
                #print "\tSetting to",mass_max,"..."
                continue
            mass_limit_not_set = False

        #print "Mass Limits:",mass_min*(0.95),"-",mass_max*(1.05)

        self.ln_mass_min = numpy.log(mass_min)
        self.ln_mass_max = numpy.log(mass_max)

        self._ln_mass_array = numpy.linspace(
            self.ln_mass_min, self.ln_mass_max,
            defaults.default_precision["mass_npoints"])

    def _initialize_splines(self):
        self._nu_array = numpy.zeros_like(self._ln_mass_array)

        for idx in xrange(self._ln_mass_array.size):
            mass = numpy.exp(self._ln_mass_array[idx])
            self._nu_array[idx] = self.cosmo.nu_m(mass)

        self.nu_min = 1.001*self._nu_array[0]
        self.nu_max = 0.999*self._nu_array[-1]

        #print "nu_min:",self.nu_min,"nu_max:",self.nu_max

        self._nu_spline = InterpolatedUnivariateSpline(
            self._ln_mass_array, self._nu_array)
        self._ln_mass_spline = InterpolatedUnivariateSpline(
            self._nu_array, self._ln_mass_array)

        # Set M_star, the mass for which nu == 1
        self.m_star = self.mass(1.0)

    def _normalize(self):
        self.f_norm = 1.0
        norm = integrate.romberg(
            self.f_nu, self.nu_min, self.nu_max, vec_func=True,
            tol=defaults.default_precision["global_precision"],
            rtol=defaults.default_precision["mass_precision"],
            divmax=defaults.default_precision["divmax"])
        self.f_norm = 1.0/norm

        self.bias_norm = 1.0
        norm = integrate.romberg(
            lambda x: self.f_nu(x)*self.bias_nu(x),
            self.nu_min, self.nu_max, vec_func=True,
            tol=defaults.default_precision["global_precision"],
            rtol=defaults.default_precision["mass_precision"],
            divmax=defaults.default_precision["divmax"])
        self.bias_norm = 1.0/norm

    def f_nu(self, nu):
        """
        Halo mass function as a function of normalized mass over-density nu

        Args:
            nu: float array normalized mass over-density nu
        Returns:
            float array number of halos
        """
        nu_prime = nu*self.st_little_a
        return (
            self.f_norm*(1.0 + nu_prime**(-1.0*self.stq))*
            numpy.sqrt(nu_prime)*numpy.exp(-0.5*nu_prime)/nu)

    def f_m(self, mass):
        """
        Halo mass function as a function of halo mass

        Args:
            mass: float array halo mass
        Returns:
            float array number of halos
        """
        return self.f_nu(self.nu(mass))
    
    def dndm(self, mass):
        """
        Convenience function for computing the number of halos per mass.
        
        Args:
            mass: float value or array of halo mass in M_solar/h
        Returns:
            float value or array number of halos per mass per (Mpc/h)^3
            
        """
        try:
            _dndm = numpy.empty(len(mass))
            for idx, m in enumerate(mass):
                _dndm[idx] = 0.5*(self.cosmo.rho_bar()/(m*m)*
                                  self.f_m(m)*
                                  self._nu_spline.derivatives(numpy.log(m))[1])
            return _dndm
        except TypeError:
            return 0.5(self.cosmo.rho_bar()/(mass*mass)*
                       self.f_m(mass)*
                       self._nu_spline.derivatives(numpy.log(mass))[1])

    def bias_nu(self, nu):
        """
        Halo bias as a function of nu.

        Args:
            mass: float array mass over-density mu
        Returns:
            float array halo bias
        """
        nu_prime = nu*self.st_little_a
        return self.bias_norm*(
            1.0 + (nu_prime - 1.0)/self.delta_c +
            2.0*self.stq/(self.delta_c*(1.0 + nu_prime**self.stq)))
        
    def bias_m(self, mass):
        """
        Halo bias as a function of mass.

        Args:
            mass: float array halo mass
        Returns:
            float array halo bias
        """
        return self.bias_nu(self.nu(mass))

    def nu(self, mass):
        """
        nu as a function of halo mass.

        Args:
            nu: float array mass M [M_Solar]
        Returns:
            float array 
        """
        return self._nu_spline(numpy.log(mass))

    def ln_mass(self, nu):
        """
        Natural log of halo mass as a function of nu.

        Args:
            nu: float array normalized mass over-density
        Returns:
            float array natural log mass [M_Solar]
        """
        return self._ln_mass_spline(nu)

    def mass(self, nu):
        """
        Halo mass as a function of nu.

        Args:
            nu: float array normalized mass over-density
        Returns:
            float array halo mass [M_Solar]
        """
        return numpy.exp(self.ln_mass(nu))

    def write(self, output_file_name):
        """
        Write current mass function values

        Args:
            output_file_name: string file name to write mass function parameters
        """
        print "M* = 10^%1.4f M_sun" % numpy.log10(self.m_star)
        output_file = open(output_file_name, "w")
        output_file.write("#ttype1 = mass [M_solar/h]\n#ttype2 = nu\n"
                          "#ttype3 = f(nu)\n#ttype4 = bias(nu)\n")
        for ln_mass, nu, in zip(self._ln_mass_array, self._nu_array):
            output_file.write("%1.10f %1.10f %1.10f %1.10f\n" % (
                numpy.exp(ln_mass), nu, self.f_nu(nu), self.bias_nu(nu)))
        output_file.close()
示例#5
0
def synth2(startpitch,
           synthparms,
           numpoints=100,
           plot=False,
           minlambd=10.0,
           dlambd=5.0):
    """ Limit the strength of articulation to avoid acceleration in
        opposite direction of endheight target...
    """
    times = np.zeros(len(synthparms) * numpoints)
    contour = np.zeros(len(synthparms) * numpoints)
    for i, synthparm in enumerate(synthparms):
        if i == 0:
            p0 = startpitch
            dp0 = 0.0
            ddp0 = 0.0
        if synthparm[0] != synthparms[
                i - 1][1]:  #not contiguous (e.g. a pause is present)
            dp0 = 0.0
            ddp0 = 0.0
        if any([e is None for e in synthparm
                ]):  #no parameters available for this syllable, skip...
            dp0 = 0.0
            ddp0 = 0.0
            continue
        utt_t = np.linspace(synthparm[0],
                            synthparm[1],
                            numpoints,
                            endpoint=False)
        times[i * numpoints:i * numpoints + numpoints] = utt_t
        syl_t = utt_t - synthparm[0]  #start at 0.0
        #y = mx + c
        syltarget_m = synthparm[3]
        syltarget_c = get_intercept(syl_t[-1], synthparm[2], synthparm[3])
        while True:  #resynthesise with lower strength until constraint met
            scontour = sylcontour(syl_t, syltarget_m, syltarget_c, p0, dp0,
                                  ddp0, synthparm[4])
            spline = InterpolatedUnivariateSpline(syl_t, scontour)
            #check acceleration
            if synthparm[4] <= minlambd:
                break
            accels = spline(syl_t, 2)
            if synthparm[2] > p0:
                if np.all(accels > 0.0):
                    break
            elif synthparm[2] < p0:
                if np.all(accels < 0.0):
                    break
            else:
                break
            synthparm[4] -= dlambd
            if synthparm[4] < minlambd:
                synthparm[4] = minlambd
        if plot:
            pl.plot(syl_t + synthparm[0],
                    np.polyval(coefs, syl_t),
                    linestyle="dashed",
                    color="red")
            pl.plot(syl_t + synthparm[0], scontour, color="green")
        contour[i * numpoints:i * numpoints + numpoints] = scontour
        p0, dp0, ddp0, temp = spline.derivatives(syl_t[-1])
    synthtrack = Track()
    synthtrack.times = times[contour.nonzero()].copy()
    synthtrack.values = contour[contour.nonzero()].reshape((-1, 1)).copy()
    return synthtrack
示例#6
0
def get_synthparms(syls, f0, startpitch, qtaspecs):
    syls = [syl.gir("Syllable") for syl in syls]

    endheights = np.linspace(qtaspecs["pitch"][0], qtaspecs["pitch"][1],
                             qtaspecs["pitch"][2])
    slopes = np.linspace(qtaspecs["slope"][0], qtaspecs["slope"][1],
                         qtaspecs["slope"][2])
    lambds = np.linspace(qtaspecs["lambd"][0], qtaspecs["lambd"][1],
                         qtaspecs["lambd"][2])
    xcepts = qtaspecs["xcept"]

    times = []
    contiguous = []
    for i, syl in enumerate(syls):
        times.append([syl[STARTLAB], syl[ENDLAB]])
        if i == 0:
            contiguous.append(False)
        else:
            if times[-2][1] != times[-1][0]:
                contiguous.append(False)
            else:
                contiguous.append(True)

    synthparms = []
    sylmses = []
    for i, iscont, tbounds, syl in zip(range(len(syls)), contiguous, times,
                                       syls):
        #set initial conditions
        if i == 0:
            p0 = startpitch
            dp0 = 0.0
            ddp0 = 0.0
        if not iscont:  #if not contiguous, we reset the pitch dynamics but keep "p0"
            dp0 = 0.0
            ddp0 = 0.0
        #special context parameter ranges:
        sylcontext = syl["qtaparmclass"]  #can be None
        if sylcontext in xcepts:
            xparms = xcepts[sylcontext]
            l_endheights = np.linspace(xparms["pitch"][0], xparms["pitch"][1],
                                       xparms["pitch"][2])
            l_slopes = np.linspace(xparms["slope"][0], xparms["slope"][1],
                                   xparms["slope"][2])
            l_lambds = np.linspace(xparms["lambd"][0], xparms["lambd"][1],
                                   xparms["lambd"][2])
        else:
            l_endheights = endheights
            l_slopes = slopes
            l_lambds = lambds
        #get f0 contour for this syllable, interpolate if completely
        #unvoiced (this should theoretically not be possible but may
        #occur because of extraction and/or alignment errors):
        f0syl = f0.slice(f0.index_at(tbounds[0]), f0.index_at(tbounds[1]))
        try:
            assert len(f0syl.values.ravel().nonzero()[0]) > 0
        except AssertionError:
            f0syl = f0.newtrack_from_linearinterp(
                f0.times[f0.index_at(tbounds[0]):f0.index_at(tbounds[1])],
                ignore_zeros=True)
        #search over parameters to minimise MSE
        parms = np.zeros(
            (len(l_endheights) * len(l_slopes) * len(l_lambds), 3))
        mses = np.zeros(len(l_endheights) * len(l_slopes) * len(l_lambds))
        i = 0
        syl_t = f0syl.times - f0syl.times[0]
        for endheight in l_endheights:
            for slope in l_slopes:
                for l in l_lambds:
                    parms[i][0] = endheight
                    parms[i][1] = slope
                    parms[i][2] = l
                    #syltarget = mx + c
                    syltarget_m = slope
                    syltarget_c = get_intercept(syl_t[-1], endheight, slope)
                    a = sylcontour(syl_t, syltarget_m, syltarget_c, p0, dp0,
                                   ddp0, l)
                    nonzeroindices = f0syl.values.ravel().nonzero()
                    mses[i] = np.mean((f0syl.values.ravel()[nonzeroindices] -
                                       a[nonzeroindices])**2)
                    i += 1
        #process the best parms:
        bestparms = parms[np.argmin(mses)]
        syltarget_m = bestparms[1]
        syltarget_c = get_intercept(syl_t[-1], bestparms[0], bestparms[1])
        a = sylcontour(syl_t, syltarget_m, syltarget_c, p0, dp0, ddp0,
                       bestparms[2])
        synthparms.append(
            [tbounds[0], tbounds[1], bestparms[0], bestparms[1], bestparms[2]])
        sylmses.append(np.min(mses))
        #calculate initial conditions for next syllable
        spline = InterpolatedUnivariateSpline(syl_t, a)
        p0, dp0, ddp0, unused = spline.derivatives(syl_t[-1])
    return synthparms, sylmses
示例#7
0
def findInflection(x, y, threshold=0.9):
    """
    Fits y = m*x to Zimm data before onset of Taylor instability.
    Returns m.
    Last updated by Kazem Edmond on Feb. 26, 2013.
    
    Inflection is found by identifying sudden change in slope.  Slope
    between each interval is calculated using a spline fit.  Data is 
    cleaned by removing outliers using a simple "threshold", where difference
    in between adjacent x is within 5 times of first interval.
    The threshold parameter defines size of inflection to look for.
    
    """
    # Clean data by removing outliers to help spline fit
    wout = np.where(np.abs(x[0:-1] - x[1:]) > 5 * np.abs(x[0] - x[1]))
    wout2 = wout[0][range(1, np.size(wout),
                          2)]  # Note: only use odd indices from np.where()

    # If necessary, new arrays for cleaned data:
    if np.size(wout2 > 0):
        xcln = np.delete(x, wout2)
        ycln = np.delete(y, wout2)
    else:
        xcln = x
        ycln = y

    # Fit a spline to cleaned data, giving slopes over intervals:
    spl = InterpolatedUnivariateSpline(xcln, ycln)

    # Store slopes in new array:
    nvals = int(np.floor(max(xcln)))
    spVals = np.zeros([nvals, 4])

    for i in range(1, nvals, 1):
        spVals[i] = spl.derivatives(i)

    # Find range over which it's Newtonian.
    # Only consider data that is within threshold of initial slope.
    wlin = np.where(spVals[1:, 1] > threshold * spVals[1, 1])

    wid = np.where(np.subtract(wlin, range(0, np.size(wlin)))[0] > 0)

    # Check if all of the data is Newtonian:
    if np.size(wid) > 0:
        idd = int(wid[0])
        xlin = xcln[0:idd]
        ylin = ycln[0:idd]
    else:
        xlin = xcln[wlin]
        ylin = ycln[wlin]

    # 2.27.2013: Alternative to spline fitting
    # Remove overall trend of data, resulting peak is inflection point.
    # Requires that inflection peak actually exists
    # m, _, _, _ = np.linalg.lstsq(x[:, np.newaxis], y)
    # xp = x[yp == np.max(y - m * (x - np.mean(x)))]
    # mf, _, _, _ = np.linalg.lstsq(x[x<xp][:, np.newaxis], y[x<xp])

    # Least squares linear fit to get the slope:
    m, _, _, _ = np.linalg.lstsq(xlin[:, np.newaxis], ylin)

    return m