Ejemplo n.º 1
0
def particle_motion_odr(stream, noise_thres=0):
    """
    Computes the orientation of the particle motion vector based on an
    orthogonal regression algorithm.

    :param stream: ZNE sorted trace data
    :type stream: :class:`~obspy.core.stream.Stream`
    :param noise_tres: variance of the noise sphere; data points are excluded
        when falling within the sphere of radius sqrt(noise_thres)
    :type noise_thres: float
    :returns: azimuth, incidence, error of azimuth, error of incidence
    """
    Z = []
    N = []
    E = []
    comp, npts = np.shape(stream)

    for i in range(0, npts):
        if (stream[0][i] ** 2 + stream[1][i] ** 2 + stream[2][i] ** 2) \
                > noise_thres:
            Z.append(stream[0][i])
            N.append(stream[1][i])
            E.append(stream[2][i])

    def fit_func(beta, x):
        # XXX: Eventually this is correct: return beta[0] * x + beta[1]
        return beta[0] * x

    data = scipy.odr.Data(E, N)
    model = scipy.odr.Model(fit_func)
    odr = scipy.odr.ODR(data, model, beta0=[1.])
    out = odr.run()
    az_slope = out.beta[0]
    az_error = out.sd_beta[0]

    N = np.asarray(N)
    E = np.asarray(E)
    Z = np.asarray(Z)
    R = np.sqrt(N ** 2 + E ** 2)

    data = scipy.odr.Data(R, abs(Z))
    model = scipy.odr.Model(fit_func)
    odr = scipy.odr.ODR(data, model, beta0=[1.0])
    out = odr.run()
    in_slope = out.beta[0]
    in_error = out.sd_beta[0]

    azim = math.atan2(1.0, az_slope)
    inc = math.atan2(1.0, in_slope)
    az_error = 1.0 / ((1.0 ** 2 + az_slope ** 2) * azim) * az_error
    in_error = 1.0 / ((1.0 ** 2 + in_slope ** 2) * inc) * in_error

    return math.degrees(azim), math.degrees(inc), az_error, in_error
Ejemplo n.º 2
0
    def LinearFit(self, full_scan, points, pose):
        using_adjacent = False
        if len(points) == 1:
            # try to find adjacent points
            for neighbor in full_scan:
                if (abs(neighbor[0] - points[0][0]) < self.disc * 2.0
                        and abs(neighbor[1] - points[0][1]) < self.disc * 2.0
                        and neighbor[0] != points[0][0]
                        and neighbor[1] != points[0][1]):
                    using_adjacent = True
                    points.append(neighbor)

        if len(points) == 1:
            # get perpendicular fit
            A = -1.0 * (points[0][0] - pose[0][2]) / (points[0][1] -
                                                      pose[1][2])
            b = points[0][1] - (A * points[0][0])

        else:
            # get fit from orthogonal regression (thanks scipy!)
            points_arr = np.array(points)
            data = scipy.odr.RealData(points_arr[:, 0], points_arr[:, 1])
            odr = scipy.odr.ODR(data, scipy.odr.polynomial(1))
            output = odr.run()
            b = output.beta[0]
            A = output.beta[1]

        b /= self.disc

        if using_adjacent:
            while len(points) > 1.0:
                del points[len(points) - 1]

        return A, b
Ejemplo n.º 3
0
def fit_gaussians_to_isotopes():
	for isotope in isotopes:
		# except for Na-22, because Na-22 sucks
		if isotope.name == 'Na-22':
			continue

		for peak in isotope.peaks:
			guess_channel = int(energy_to_channel_estimation(peak.energy))
			guess = (guess_channel, isotope.count[guess_channel], peak.fitradius)

			fitrange = slice(guess_channel - peak.fitradius,
	                    guess_channel + peak.fitradius)

			peak.channel_fit = channel[fitrange]
			peak.count_fit = isotope.count[fitrange]

			data = scipy.odr.RealData(
				peak.channel_fit, peak.count_fit, sy=np.sqrt(peak.count_fit))

			odr = scipy.odr.ODR(data, scipy.odr.Model(
				lambda p, x: gaussian(x, *p)), beta0=guess)

			result = odr.run()

			peak.popt = result.beta
			peak.center = result.beta[0]
			peak.error = result.sd_beta[0]

			if _verbose:
				print(
					f'{isotope.name},\t{peak.energy/1e6:0.3f} MeV,\tchannel guess: {guess_channel}\tfit: {peak.center:0.2f}\tuncertainty: {peak.error:0.2f}')
Ejemplo n.º 4
0
def fit_final():
	data = scipy.odr.RealData(X, Y, sx=Y_err)

	odr = scipy.odr.ODR(data, scipy.odr.Model(
		lambda p, x: linear(x, *p)), beta0=[2e-3, 1e-3])

	out = odr.run()

	return out.beta, out.sd_beta
Ejemplo n.º 5
0
def linear_regression(x, y, uncert_x=None, uncert_y=None):
    """
    Linear regression with uncertainties in both directions.

    Parameters
    ----------
    x : array_like
        x-values
    y : array_like
        y-values
    uncert_x : array_like or None
        uncertainty of x-values
    uncert_y : array_like
        uncertainty of y-values

    Takes two arrays for x-values and y-values and optional two arrays of
    uncertainties. The uncertainty arguments are default to None, which is
    treated as a zero uncertainty in every value.
    Fits a function y=m*x+c to the values and returns the results for m and c
    with uncertainty as well as their correlation and the chisquared of the
    fit. It returns a tuple containing:
        m, sigma_m, c, sigma_c, correlation, chisquared
    
    If both uncertainties are unequal zero, the calculation can not be
    performed analytically and the package scipy.odr is used.
    """
    # if only one uncertainty is given, calculate analytically
    if uncert_x is None:
        return linear_regression_1(x, y, uncert_y)
    elif uncert_y is None:
        # as linear_regression_1 assumes uncertainties in y, switch the axes
        m, um, c, uc, corr, chisq = linear_regression_1(y,x,uncert_x)
        sigma_c = sqrt(uc**2/m**2+c**2*um**2/m**4-c/m**3*corr*um*uc)
        return 1/m, um/m**2, -c/m, sigma_c, corr, chisq
    
    # For a first guess, assume a slope around 1 and weight both uncertainties
    # equal. Calculate initial values analytically.
    uncert_sum = uncert_x + uncert_y
    m0, um0, c0, uc0, corr0, chisq0 = linear_regression_1(x, y, uncert_sum)

    def f(B, x):
        return B[0]*x + B[1]
    
    model  = scipy.odr.Model(f)
    data   = scipy.odr.RealData(x, y, sx=uncert_x, sy=uncert_y)
    odr    = scipy.odr.ODR(data, model, beta0=[m0, c0])
    output = odr.run()
    ndof = len(x)-2
    chiq = output.res_var*ndof
    sigma_m = sqrt(output.cov_beta[0,0])
    sigma_c = sqrt(output.cov_beta[1,1])
    corr = output.cov_beta[0,1] /sigma_m /sigma_c
    
    return (output.beta[0], sigma_m, output.beta[1], sigma_c, corr, chiq)
Ejemplo n.º 6
0
def lineare_regression_minus1(x, y, ex, ey):
    def f(B, x):
        return -x + B[0]

    model = scipy.odr.Model(f)
    data = scipy.odr.RealData(x, y, sx=ex, sy=ey)
    odr = scipy.odr.ODR(data, model, beta0=[0])
    output = odr.run()
    ndof = len(x) - 1
    chiq = output.res_var * ndof

    return output.beta[0], np.sqrt(output.cov_beta[0, 0]), chiq
Ejemplo n.º 7
0
    def histogram(self, bin_number, guess=300):
        plt.figure()

        counts, bins, patch = plt.hist(gas.spd,
                                       bins=bin_number,
                                       label="Simulation Result",
                                       normed=True,
                                       color="skyblue",
                                       ec="skyblue")
        step = (bins[1] - bins[0]) / 2
        bins = [i + step for i in bins[:-1]]
        k = 1.38e-23
        m = self.__particle_m
        '''This is normalised 2D Maxwell-Boltzmann distribution equation.'''
        def boltzmann_2d(p, x):
            T = p
            return (m / T / k) * x * np.exp((-m * x**2.) / 2. / k / T)

        '''This is unnormalised 3D Maxwell-Boltzmann distribution equation.'''

        def boltzmann_3d(p, x):
            T = p
            return ((m /
                     (2. * np.pi * k * T))**1.5) * 4 * np.pi * (x**2) * np.exp(
                         (-m * x**2.) / 2. / k / T)

        func_list = [None, None, boltzmann_2d, boltzmann_3d]
        '''Use optimal distance regression method to fit the histogram.
             The estimation must be close to the real value and cannot be too large.'''
        init_guess = [guess]
        model = scipy.odr.Model(func_list[self.__d])
        data = scipy.odr.RealData(bins, counts)
        odr = scipy.odr.ODR(data, model, beta0=init_guess)
        out = odr.run()
        out.pprint()

        x_fit = np.linspace(0, bins[-1], 2000)
        y_fit = func_list[self.__d](out.beta, x_fit)
        fit_label = [
            None, None, "Unnormalised 2D Maxwell-\nBoltzmann Distribution Fit",
            "Unnormalised 3D Maxwell-\nBoltzmann Distribution Fit"
        ]
        plt.plot(x_fit, y_fit, c='b', label=fit_label[self.__d])
        plt.title("Probability Density   v.s.   Particle Speed")
        plt.xlabel("Particle Speed (m/s)")
        plt.ylabel("Probability Density")
        plt.scatter(bins, counts, c='k', s=4)
        plt.legend(loc='best')
        plt.show()

        return None
def odr_linear(x, y, intercept=None, beta0=None):
    """
    Performs orthogonal linear regression on x, y data.

    Parameters
    ----------
    x: array_like
        x-data, 1D array.  Must be the same lengths as `y`.
    y: array_like
        y-data, 1D array.  Must be the same lengths as `x`.
    intercept: float, default None
        If not None, fixes the intercept.
    beta0: array_like, shape (2,)
        Guess at the slope and intercept, respectively.

    Returns
    -------
    output: ndarray, shape (2,)
        Array containing slope and intercept of ODR line.
    """

    def linear_fun(p, x):
        return p[0] * x + p[1]

    def linear_fun_fixed(p, x):
        return p[0] * x + intercept

    # Set the model to be used for the ODR fitting
    if intercept is None:
        model = scipy.odr.Model(linear_fun)
        if beta0 is None:
            beta0 = (0.0, 1.0)
    else:
        model = scipy.odr.Model(linear_fun_fixed)
        if beta0 is None:
            beta0 = (1.0,)

    # Make a Data instance
    data = scipy.odr.Data(x, y)

    # Instantiate ODR
    odr = scipy.odr.ODR(data, model, beta0=beta0)

    # Perform ODR fit
    try:
        result = odr.run()
    except scipy.odr.odr_error:
        raise scipy.odr.odr_error('ORD failed.')

    return result.beta
Ejemplo n.º 9
0
def wilson(data):
    '''
    Calculate useful things like mass ratio and systemic velocity, taking
    into account the uncertainties in both the primary and secondary velocities.

    Parameters
    ----------
    data : list
        Radial velocity pairs in a 2D list.

    Returns
    -------
    out.beta[0] : float
        Mass Ratio of the system. The ratio of the secondary
        component mass to the primary.
    
    intercept[1] : float
        y-intercept of the line which fits data.

    stderr[0] : float
        Standard error of the estimated Mass Ratio.

    '''
    import scipy.odr
    
    # Primary RVs on y.
    y = [datum[1] for datum in data if not np.isnan(datum[1]+datum[3])]
    y_err = [datum[2] for datum in data if not np.isnan(datum[1]+datum[3])]
    # Secondary RVs on x.
    x = [datum[3] for datum in data if not np.isnan(datum[1]+datum[3])]
    x_err = [datum[4] for datum in data if not np.isnan(datum[1]+datum[3])]

    # "line" will be used by scipy.odr to determine the mass_ratio best fit.
    def line(p, x):
        q, gamma = p
        return -q * x + gamma

    # Create a model for fitting.
    line_model = scipy.odr.Model(line)

    # Create a RealData object using the data arguments.
    model_data = scipy.odr.RealData(x, y, sx=x_err, sy=y_err)

    # Set up ODR with the model and model_data.
    odr = scipy.odr.ODR(model_data, line_model, beta0=[0.,1.])

    # Run the regression.
    out=odr.run()

    return [out.beta[0], out.beta[1], out.sd_beta[0]]
Ejemplo n.º 10
0
def _odr_linear(x, y, intercept=None, beta0=None):
    """
    Performs orthogonal linear regression on x, y data.

    Parameters
    ----------
    x: array_like
        x-data, 1D array.  Must be the same lengths as `y`.
    y: array_like
        y-data, 1D array.  Must be the same lengths as `x`.
    intercept: float, default None
        If not None, fixes the intercept.
    beta0: array_like, shape (2,)
        Guess at the slope and intercept, respectively.

    Returns
    -------
    output: ndarray, shape (2,)
        Array containing slope and intercept of ODR line.
    """

    def linear_fun(p, x):
        return p[0] * x + p[1]

    def linear_fun_fixed(p, x):
        return p[0] * x + intercept

    # Set the model to be used for the ODR fitting
    if intercept is None:
        model = scipy.odr.Model(linear_fun)
        if beta0 is None:
            beta0 = (0.0, 1.0)
    else:
        model = scipy.odr.Model(linear_fun_fixed)
        if beta0 is None:
            beta0 = (1.0,)

    # Make a Data instance
    data = scipy.odr.Data(x, y)

    # Instantiate ODR
    odr = scipy.odr.ODR(data, model, beta0=beta0)

    # Perform ODR fit
    try:
        result = odr.run()
    except scipy.odr.odr_error:
        raise scipy.odr.odr_error("ORD failed.")

    return result.beta
Ejemplo n.º 11
0
def odrfit(x, y, sx, sy, func, start_vector):
    model = odr.Model(func)
    data = odr.RealData(x, y, sx=sx, sy=sy)
    odr = odr.ODR(data, model, beta0=start_vector)
    out = odr.run()
    pols = out.beta
    sds = out.sd_beta

    yp = func(pols, x)
    mean = sum(y) / len(y)
    ssres = sum((y - yp)**2)
    sstot = sum((y - mean)**2)
    r2 = 1 - ssres / sstot

    return pols, sds, r2
Ejemplo n.º 12
0
def runTotalLeastSquaresRegression(x,y):
    # Create a model for fitting.
    linear_model = scipy.odr.Model(fitFunction)

    # Create a RealData object using our initiated data from above.
    data = scipy.odr.RealData(x, y)

    # Set up ODR with the model and data.
    odr = scipy.odr.ODR(data, linear_model, beta0=[0., 1.])

    # Run the regression.
    out = odr.run()

    # Use the in-built pprint method to give us results.
    out.pprint()

    return out.beta
Ejemplo n.º 13
0
	def LinearFit(self, points, pose):
		if len(points) == 1:
			# get perpendicular fit
			A = -1.0 * (points[0][0] - pose[0][2]) / (points[0][1] - pose[1][2])
			b = points[0][1] - (A * points[0][0])
		else:
			# get fit from orthogonal regression (thanks scipy!)
			points = np.array(points)
			data = scipy.odr.RealData(points[:,0],points[:,1])
			odr = scipy.odr.ODR(data, scipy.odr.polynomial(1))
			output = odr.run()
			b = output.beta[0]
			A = output.beta[1]

		b /= self.disc

		return A,b
Ejemplo n.º 14
0
def fit_amplitude(lineamplitudes, err_lineamplitudes, crdt_invariant,
                  err_crdt_invariant):
    def fun(p, x):
        return p * x * 2  # factor 2 to get to complex amplitudes again

    fit_model = scipy.odr.Model(fun)
    data_model = scipy.odr.RealData(
        x=crdt_invariant,
        y=lineamplitudes,
        sx=err_crdt_invariant,
        sy=None if all(err == 0.
                       for err in err_lineamplitudes) else err_lineamplitudes,
    )
    odr = scipy.odr.ODR(data_model,
                        fit_model,
                        beta0=[0.5 * lineamplitudes[0] / crdt_invariant[0]])
    odr_output = odr.run()
    return odr_output.beta[0], odr_output.sd_beta[0]
Ejemplo n.º 15
0
def odr_fit(xi, yi, dxi, dyi):
    """Perform weighted orthogonal distance regression.
    https://docs.scipy.org/doc/scipy/reference/odr.html (valid on 2019-04-16)
    Parametes:
    xi, yi      np.array, x and y values
    dxi, dxy    np.array, x and y errors
    Returns:
    slope       regression slope estimate
    intercept   regression intercept estimate
    """
    def f(B, x):
        """Define linear function y = a * x + b for ODR.
        Parameters:
        B   [slope, intercept]
        x   x values
        """
        return B[0] * x + B[1]

    # define the model for ODR
    linear = scipy.odr.Model(f)

    # formalize the data
    data = scipy.odr.RealData(
        xi,
        yi,
        sx=dxi,
        sy=dyi)

    # make OLS fit to get initial guesses for slope and intercept
    slope_ols, intercept_ols = np.polyfit(x, y, 1)

    # instantiate ODR with your data, model and initial parameter estimate
    # use OLS regression coefficients as initial guess
    odr = scipy.odr.ODR(
        data,
        linear,
        beta0=[slope_ols, intercept_ols])

    # run the fit
    output = odr.run()
    slope, intercept = output.beta

    return slope, intercept
Ejemplo n.º 16
0
def fit_energy_scale():
	energies = []
	channels = []
	channel_uncertainties = []

	for isotope in isotopes:
		for peak in isotope.peaks:
			if peak.center:
				energies.append(peak.energy)
				channels.append(peak.center)
				channel_uncertainties.append(peak.error)

	data = scipy.odr.RealData(channels, energies, sx=channel_uncertainties)

	odr = scipy.odr.ODR(data, scipy.odr.Model(
		lambda p, x: linear(x, *p)), beta0=[1 / etc_slope_guess, 0])

	out = odr.run()

	return out.beta, out.sd_beta
Ejemplo n.º 17
0
def lineare_regression_xy(x,y,ex,ey):
    '''

    Lineare Regression mit Fehlern in x und y.

    Parameters
    ----------
    x : array_like
        x-Werte der Datenpunkte
    y : array_like
        y-Werte der Datenpunkte
    ex : array_like
        Fehler auf die x-Werte der Datenpunkte
    ey : array_like
        Fehler auf die y-Werte der Datenpunkte

    Diese Funktion benoetigt als Argumente vier Listen:
    x-Werte, y-Werte sowie jeweils eine mit den Fehlern der x-
    und y-Werte.
    Sie fittet eine Gerade an die Werte und gibt die
    Steigung a und y-Achsenverschiebung b mit Fehlern
    sowie das chi^2 und die Korrelation von a und b
    als Liste aus in der Reihenfolge
    [a, ea, b, eb, chiq, cov].

    Die Funktion verwendet den ODR-Algorithmus von scipy.
    '''
    a_ini,ea_ini,b_ini,eb_ini,chiq_ini,corr_ini = lineare_regression(x,y,ey)

    def f(B, x):
        return B[0]*x + B[1]

    model  = scipy.odr.Model(f)
    data   = scipy.odr.RealData(x, y, sx=ex, sy=ey)
    odr    = scipy.odr.ODR(data, model, beta0=[a_ini, b_ini])
    output = odr.run()
    ndof = len(x)-2
    chiq = output.res_var*ndof
    corr = output.cov_beta[0,1]/np.sqrt(output.cov_beta[0,0]*output.cov_beta[1,1])

    return output.beta[0],output.sd_beta[0],output.beta[1],output.sd_beta[1],chiq,corr
Ejemplo n.º 18
0
def fit_gaussian():
	for dataset in datasets:
		guess_channel = int(dataset.center)
		guess = (guess_channel, dataset.count_diff[guess_channel], fitradius)

		fitrange = slice(guess_channel - fitradius,
	                  guess_channel + fitradius)

		dataset.channel_fit = channel[fitrange]
		dataset.count_fit = dataset.count_diff[fitrange]

		data = scipy.odr.RealData(
			dataset.channel_fit, dataset.count_fit, sy=np.sqrt(dataset.count_fit))

		odr = scipy.odr.ODR(data, scipy.odr.Model(
			lambda p, x: gaussian(x, *p)), beta0=guess)

		result = odr.run()

		dataset.popt = result.beta
		dataset.center = result.beta[0]
		dataset.center_error = result.sd_beta[0]
		dataset.center_energy, dataset.center_energy_error = channel_to_energy(
			dataset.center, dataset.center_error)
Ejemplo n.º 19
0
def particle_motion_odr(stream, noise_thres=0):
    """
    Computes the orientation of the particle motion vector based on an
    orthogonal regression algorithm.

    :param stream: ZNE sorted trace data
    :type stream: :class:`~obspy.core.stream.Stream`
    :param noise_tres: variance of the noise sphere; data points are excluded
        when falling within the sphere of radius sqrt(noise_thres)
    :type noise_thres: float
    :returns: azimuth, incidence, error of azimuth, error of incidence
    """
    z = []
    n = []
    e = []
    comp, npts = np.shape(stream)

    for i in range(0, npts):
        if (stream[0][i] ** 2 + stream[1][i] ** 2 + stream[2][i] ** 2) \
                > noise_thres:
            z.append(stream[0][i])
            n.append(stream[1][i])
            e.append(stream[2][i])

    def fit_func(beta, x):
        # XXX: Eventually this is correct: return beta[0] * x + beta[1]
        return beta[0] * x

    data = scipy.odr.Data(e, n)
    model = scipy.odr.Model(fit_func)
    odr = scipy.odr.ODR(data, model, beta0=[1.])
    out = odr.run()
    az_slope = out.beta[0]
    az_error = out.sd_beta[0]

    n = np.asarray(n)
    e = np.asarray(e)
    z = np.asarray(z)
    r = np.sqrt(n**2 + e**2)

    data = scipy.odr.Data(r, abs(z))
    model = scipy.odr.Model(fit_func)
    odr = scipy.odr.ODR(data, model, beta0=[1.0])
    out = odr.run()
    in_slope = out.beta[0]
    in_error = out.sd_beta[0]

    azimuth = math.atan2(1.0, az_slope)
    incidence = math.atan2(1.0, in_slope)

    az_error = 1.0 / ((1.0**2 + az_slope**2) * azimuth) * az_error
    # az_error = math.degrees(az_error)
    in_error = 1.0 / ((1.0**2 + in_slope**2) * incidence) * in_error
    # in_error = math.degrees(in_error)

    azimuth = math.degrees(azimuth)
    incidence = math.degrees(incidence)

    if azimuth < 0.0:
        azimuth = 360.0 + azimuth
    if incidence < 0.0:
        incidence += 180.0
    if incidence > 90.0:
        incidence = 180.0 - incidence
        if azimuth > 180.0:
            azimuth -= 180.0
        else:
            azimuth += 180.0
    if azimuth > 180.0:
        azimuth -= 180.0

    return azimuth, incidence, az_error, in_error
Ejemplo n.º 20
0
				y[ind] /= y[ind_norm]
				x_err = x*0.05
				if resultfile != None:
					f = open("%s_tm%e_tau%e_de%e.dat"%(resultfile[:-4], t, ta, de),'w')
					f.write("#bfac sig sig_err delta tau tm dac\n")
					N.savetxt(f, results[tbvt][ind])
					f.close()
				result_d =  N.polyfit(x[ind*(y>min_value)],N.log(y[ind*(y>min_value)]),1)
				P.semilogy(x[(y>min_value)*ind], y[ind*(y>min_value)],'o')
				P.semilogy(x[(y>min_value)*ind], N.exp(N.polyval(result_d,x[ind*(y>min_value)])),'-')

				#assume 5% error from calibration
				data = scipy.odr.Data(x=x[ind],y=y[ind],wd=y_err[ind])
				model = scipy.odr.Model(diffusion)
				odr = scipy.odr.ODR(data,model,beta0=[2e-11,0.0], ifixx=(0,))
				odr.run()
				print "ODR Result"
				odr.output.pprint()

				results_d.append([t,result_d[0]])
	results_d = N.array(results_d)

	saveresults = open("PFGSTE_Myoglobin-0p3_2009-10-28.result",'a')
	saveresults.write("#bfac sig sig_err delta tau tm dac\n")
	N.savetxt(saveresults, results_d )
	saveresults.close()

	P.subplot(212)
	P.loglog(results_d[:,0],-1*results_d[:,1],'o')
	P.show()
	
# test data and error
x0 = np.linspace(-10, 10, 100)
y0 = - 0.07 * x0 * x0 + 0.5 * x0 + 2.
noise_x = np.random.normal(0.0, 1.0, len(x0))
noise_y = np.random.normal(0.0, 1.0, len(x0))
y = y0 + noise_y
x = x0 + noise_x

# Create a RealData object
data = odr.RealData(x, y, sx=noise_x, sy=noise_y)

# Set up ODR with the model and data.
odr = odr.ODR(data, quad_model, beta0=[0., 1., 1.])

# Run the regression.
out = odr.run()

#print fit parameters and 1-sigma estimates
popt = out.beta
perr = out.sd_beta
print("fit parameter 1-sigma error")
print('———————————–')
for i in range(len(popt)):
	print(str(popt[i])+ ' +- '+str(perr[i]))

# prepare confidence level curves
nstd = 5. # to draw 5-sigma intervals
popt_up = popt + nstd * perr
popt_dw = popt - nstd * perr

x_fit = np.linspace(min(x), max(x), 100)
Ejemplo n.º 22
0
def plot_scatter_2d(df,
                    structure,
                    mapping,
                    color_selection,
                    plot_foi,
                    *doi,
                    x_lab='dna_volume',
                    odrreg=False,
                    addtotitle=None,
                    savegraphs=False,
                    savedir=Path(''),
                    y_lim=None):
    """
    Function will plot 2D scatter plots with y-axis being each of the features provided and x-axis being dna_volume by
    default or user's input. If skipped plotting any drug group, will print message with error to console.

    :param df: pd.dataframe

    :param structure: string name of structure currently being analyzed

    :param mapping: List of drug_label strings

    :param color_selection: list of string denoting plotting color with length equals to number of drug_labels
    in that structure group

    :param plot_foi: list of feature of interest to plot. Generates one plot per foi

    :param doi: string extension of drug of interest to plot

    :param x_lab: string name to go on x-axis. Default "dna_volume"

    :param odrreg: boolean. Default False. True would plot an odrreg line with R^2 value and slope

    :param addtotitle: optional. string to tag to graph title

    :param savegraphs: boolean. Default False will not save graphs. True will save out graphs to png.

    :param savedir: Path object of directory to save out graphs to

    :param y_lim: tuple. parameter with ymin and ymax for normalizing scale on all graphs

    :return: None
    """
    if isinstance(doi[0], list):
        doi = doi[0]

    for foi in plot_foi:

        # To make sure all plots are on same scale, hard-code y-limit
        if y_lim is not None:
            plt.ylim(*y_lim)

        for drug in doi:
            index = mapping.index(drug)
            fig = plt.figure()
            ax = fig.add_subplot(111)
            y_lab = foi

            if addtotitle is None:
                title = f'{structure}__{foi}__{drug}'
            else:
                title = f'{structure}__{foi}__{addtotitle}__{drug}'

            ax.set_title(title)
            ax.set_xlabel(f'{x_lab}')
            ax.set_ylabel(f'{y_lab}')

            try:

                drug_group = df.groupby('drug_label').get_group(drug)

                color = color_selection[index]

                ax.scatter(drug_group[x_lab],
                           drug_group[y_lab],
                           c=color,
                           label=drug)

                x = drug_group[x_lab]
                y = drug_group[y_lab]

                x_norm = x / np.mean(x)
                y_norm = y / np.mean(y)

                slope, intercept, r_value, p_value, std_err = stats.linregress(
                    x_norm, y_norm)
                r_sq = r_value**2

                if odrreg:
                    # Model for fitting
                    linear_model = scipy.odr.Model(linear_f)

                    # Real Data Object
                    data = scipy.odr.Data(x, y)

                    # Set up ODR with model and data
                    odr = scipy.odr.ODR(data, linear_model, beta0=[0, 1])
                    odr.set_job(fit_type=0)
                    out = odr.run()

                    # Generate fitted data
                    y_fit = linear_f(out.beta, x)
                    # y_fit = linear_f(out.beta, x_norm)
                    # ax.plot(x, y_fit*np.mean(y), c='k', label='ODR')
                    odrslope = out.beta[0]
                    ax.plot(
                        x,
                        y_fit,
                        c='k',
                        label=f'ODR. $R^2$: {r_sq:.3f}, slope={odrslope:.3f}')

                plt.legend()

            except Exception as err:
                print(f'Skipped plotting {drug}: ({err})')
                pass

            if savegraphs:
                fig.set_size_inches(10, 6)
                fig.savefig(savedir / f'{title}.png')
Ejemplo n.º 23
0
    def selectorPlot(self):

        roi = self.selector[0].getRegion()

        if 1 < np.abs(roi[1] - roi[0]) < 30 or \
             (1 < np.abs(roi[1] - roi[0]) and \
                (self.plot_type.currentText() == 'Azimuth Window' or \
                    self.plot_type.currentText() == 'Jurkevic Azimuth') or \
                    self.plot_type.currentText() == 'Azimuth Colourmap'):

            ### TEMP
            stime = UTCDateTime(self.bam.setup.fireball_datetime) + roi[0]
            etime = UTCDateTime(self.bam.setup.fireball_datetime) + roi[1]

            win_len = float(self.win_len_e.text())
            win_frac = float(self.win_frac_e.text())

            try:

                pol_res = polarization_analysis(self.condensed_stream, win_len, win_frac, \
                                                    float(self.low_edits.text()), float(self.high_edits.text()), stime, etime, adaptive=True)
            except ValueError:
                pol_res = {}
                pol_res['azimuth'] = np.nan
                pol_res['timestamp'] = np.nan
            except IndexError:
                pol_res = {}
                pol_res['azimuth'] = np.nan
                pol_res['timestamp'] = np.nan
                print(
                    "Too many indicies for array - Not sure on this error yet")

            self.particle_motion_canvas.clear()
            self.waveform_canvas.clear()
            self.waveform_fft_canvas.clear()

            st_data = [None] * 3
            ti_data = [None] * 3
            raw_data = [None] * 3
            noise_data = [None] * 3

            for i in range(len(self.condensed_stream)):

                st = self.condensed_stream[i].copy()
                stn = self.stn

                delta = st.stats.delta
                start_datetime = st.stats.starttime.datetime
                end_datetime = st.stats.endtime.datetime

                stn.offset = (
                    start_datetime -
                    self.bam.setup.fireball_datetime).total_seconds()

                self.current_waveform_delta = delta
                self.current_waveform_time = np.arange(0, st.stats.npts / st.stats.sampling_rate, \
                     delta)

                time_data = np.copy(self.current_waveform_time)

                st.detrend()

                resp = stn.response
                st2 = st.copy()
                st2 = st2.remove_response(inventory=resp, output="DISP")
                # st2.remove_sensitivity(resp)

                waveform_data = st2.data

                waveform_data = waveform_data[:len(time_data)]
                time_data = time_data[:len(waveform_data)] + stn.offset

                self.current_waveform_processed = waveform_data

                number_of_pts_per_s = st.stats.sampling_rate

                len_of_region = roi[1] - roi[0]

                num_of_pts_in_roi = len_of_region * number_of_pts_per_s

                num_of_pts_in_offset = np.abs(number_of_pts_per_s * stn.offset)

                num_of_pts_to_roi = roi[0] * number_of_pts_per_s

                pt_0 = int(num_of_pts_in_offset + num_of_pts_to_roi)
                pt_1 = int(pt_0 + num_of_pts_in_roi)

                raw_data[i] = waveform_data[pt_0:pt_1]
                noise_data[i] = waveform_data[0:(pt_1 - pt_0)]

                #####
                # Bandpass
                #####
                low = float(self.low_edits.text())
                high = float(self.high_edits.text())

                if low > 0 and high > 0:
                    # Init the butterworth bandpass filter
                    butter_b, butter_a = butterworthBandpassFilter(low, high, \
                        1.0/self.current_waveform_delta, order=2)

                    # Filter the data
                    waveform_data = scipy.signal.filtfilt(
                        butter_b, butter_a, np.copy(waveform_data))

                # waveform_data = st.data

                # butter_b, butter_a = butterworthBandpassFilter(float(self.low_edits.text()), float(self.high_edits.text()), \
                # 1.0/self.current_waveform_delta, order=6)

                # # Filter the data
                # waveform_data = scipy.signal.filtfilt(butter_b, butter_a, np.copy(waveform_data))

                st_data[i] = waveform_data[pt_0:pt_1]
                ti_data[i] = time_data[pt_0:pt_1]

            e_r, n_r, z_r = raw_data[0], raw_data[1], raw_data[2]
            e_n, n_n, z_n = noise_data[0], noise_data[1], noise_data[2]
            e, n, z = st_data[0], st_data[1], st_data[2]
            et, nt, zt = ti_data[0], ti_data[1], ti_data[2]

            e = np.array(e)
            n = np.array(n)

            def fit_func(beta, x):
                return beta[0] * x

            data = scipy.odr.Data(e, n)
            model = scipy.odr.Model(fit_func)
            odr = scipy.odr.ODR(data, model, beta0=[1.0])
            out = odr.run()
            az_slope = out.beta[0]
            az_error = out.sd_beta[0]
            azimuth = np.arctan2(1.0, az_slope)
            az_error = np.degrees(1.0 / ((1.0**2 + az_slope**2) * azimuth) *
                                  az_error)

            z = np.array(z)
            r = np.sqrt(n**2 + e**2) * np.cos(azimuth - np.arctan2(n, e))

            data = scipy.odr.Data(r, z)
            model = scipy.odr.Model(fit_func)
            odr = scipy.odr.ODR(data, model, beta0=[1.0])
            out = odr.run()
            in_slope = out.beta[0]
            in_error = out.sd_beta[0]

            x_h, y_h = np.abs(scipy.signal.hilbert(st_data[0])), np.abs(
                scipy.signal.hilbert(st_data[1]))

            data = scipy.odr.Data(x_h, y_h)
            model = scipy.odr.Model(fit_func)
            odr = scipy.odr.ODR(data, model, beta0=[1.0])
            out = odr.run()
            h_az_slope = out.beta[0]
            h_az_error = out.sd_beta[0]

            h_azimuth = np.arctan2(1.0, h_az_slope)
            h_az_error = np.degrees(
                1.0 / ((1.0**2 + h_az_slope**2) * h_azimuth) * h_az_error)

            incidence = np.arctan2(1.0, in_slope)

            in_error = np.degrees(1.0 / ((1.0**2 + in_slope**2) * incidence) *
                                  in_error)

            azimuth = np.degrees(azimuth)
            incidence = np.degrees(incidence)

            # Fit to lines using orthoganal distance regression to a linear function
            if self.group_no == 0:
                pen = QColor(0, 255, 0)
                brush = QColor(0, 255, 0, 125)
            else:
                pen = QColor(0, 0, 255)
                brush = QColor(0, 0, 255, 125)

            if self.plot_type.currentText() == 'Azimuth':
                p_mot_plot = pg.PlotDataItem()
                p_mot_plot.setData(x=e, y=n)
                self.particle_motion_canvas.addItem(
                    pg.InfiniteLine(pos=(0, 0), angle=90 - azimuth, pen=pen))
                self.particle_motion_canvas.setLabel(
                    'bottom', "Channel: {:}".format(
                        self.condensed_stream[0].stats.channel))
                self.particle_motion_canvas.setLabel(
                    'left', "Channel: {:}".format(
                        self.condensed_stream[1].stats.channel))

                self.particle_motion_canvas.addItem(p_mot_plot)
                self.particle_motion_canvas.addItem(pg.TextItem(text='Azimuth = {:.2f}° ± {:.2f}°'.format(azimuth, az_error), color=(255, 0, 0), \
                                        anchor=(0, 0)))
                self.particle_motion_canvas.setXRange(np.min(e),
                                                      np.max(e),
                                                      padding=0)
                self.particle_motion_canvas.setYRange(np.min(n),
                                                      np.max(n),
                                                      padding=0)

            elif self.plot_type.currentText() == 'Incidence':
                p_mot_plot = pg.PlotDataItem()
                p_mot_plot.setData(x=r, y=z)
                self.particle_motion_canvas.addItem(
                    pg.InfiniteLine(pos=(0, 0), angle=90 - incidence, pen=pen))
                self.particle_motion_canvas.setLabel(
                    'bottom', "Horizontal in Direction of Azimuth")
                self.particle_motion_canvas.setLabel(
                    'left', "Channel: {:}".format(
                        self.condensed_stream[2].stats.channel))
                self.particle_motion_canvas.addItem(p_mot_plot)
                self.particle_motion_canvas.addItem(pg.TextItem(text='Incidence = {:.2f}° ± {:.2f}°'.format(incidence, in_error), color=(255, 0, 0), \
                                        anchor=(0, 0)))

                self.particle_motion_canvas.setXRange(np.min(r),
                                                      np.max(r),
                                                      padding=0)
                self.particle_motion_canvas.setYRange(np.min(z),
                                                      np.max(z),
                                                      padding=0)

            elif self.plot_type.currentText() == 'Jurkevic Azimuth':
                # fix this
                bndps = [[0.001, 10], [0.1, 20], [1, 30]]

                for bb, b in enumerate(bndps):
                    az_list, t_list = jurkevicWindows(z_r,
                                                      n_r,
                                                      e_r,
                                                      st.stats,
                                                      window_size=win_len,
                                                      window_overlap=win_frac,
                                                      bandpass=b)

                    p_mot_plot = pg.ScatterPlotItem()
                    p_mot_plot.setData(x=t_list, y=az_list, pen=pg.intColor(bb), \
                                brush=pg.intColor(bb), name="Bandpass: {:} - {:} Hz".format(b[0], b[1]))
                    self.particle_motion_canvas.addItem(p_mot_plot)

                    # print("{:} -> Bandpass: {:} - {:} Hz".format(pg.intColor(bb), b[0], b[1]))

                self.particle_motion_canvas.setLabel('bottom', "Time")
                self.particle_motion_canvas.setLabel('left', "Azimuth")

                self.particle_motion_canvas.setXRange(0,
                                                      np.max(t_list),
                                                      padding=0)
                self.particle_motion_canvas.setYRange(0, 180, padding=0)
                self.particle_motion_canvas.setLimits(xMin=0,
                                                      xMax=np.max(t_list),
                                                      yMin=0,
                                                      yMax=180)

            elif self.plot_type.currentText() == 'Azimuth Colourmap':

                bandpass_low = float(self.low_edits.text())
                bandpass_high = float(self.high_edits.text())

                bins = 15
                bin_overlap = 0.5

                # this equation is wrong
                size_of_bin = ((bandpass_high - bandpass_low) -
                               (bins - 1) * bin_overlap) / bins

                lows = np.linspace(bandpass_low, bandpass_high - size_of_bin,
                                   bins)
                highs = np.linspace(bandpass_low + size_of_bin, bandpass_high,
                                    bins)

                bndps = []

                for i in range(len(lows)):
                    bndps.append([lows[i], highs[i]])

                total_list = []
                for bb, b in enumerate(bndps):
                    az_list, t_list = jurkevicWindows(z_r,
                                                      n_r,
                                                      e_r,
                                                      st.stats,
                                                      window_size=win_len,
                                                      window_overlap=1.0,
                                                      bandpass=b)
                    total_list.append(az_list)

                img = pg.ImageItem()
                self.particle_motion_canvas.addItem(img)

                az_img = np.array(total_list)
                img.setImage(np.transpose(az_img))
                img.scale(t_list[-1]/np.size(az_img, axis=1),\
                            lows[-1]/np.size(az_img, axis=0))
                img.translate(0, bandpass_low)
                self.hist = pg.HistogramLUTItem()

                # Link the histogram to the image
                self.hist.setImageItem(img)
                if self.added:
                    self.waveform_hist_view.clear()

                self.waveform_hist_view.addItem(self.hist)
                self.added = True
                # Fit the min and max levels of the histogram to the data available
                self.hist.setLevels(np.min(az_img), np.max(az_img))
                # This gradient is roughly comparable to the gradient used by Matplotlib
                # You can adjust it and then save it using hist.gradient.saveState()
                self.hist.gradient.restoreState({
                    'mode':
                    'rgb',
                    'ticks': [(0.5, (0, 182, 188, 255)),
                              (1.0, (246, 111, 0, 255)),
                              (0.0, (75, 0, 113, 255))]
                })
                self.particle_motion_canvas.setLimits(xMin=0,
                                                      xMax=t_list[-1] +
                                                      bandpass_low,
                                                      yMin=bandpass_low,
                                                      yMax=bandpass_high)
                self.hist.setHistogramRange(0, 180)
                # self.particle_motion_canvas.setLookupTable(self.hist)

            # elif self.plot_type.currentText() == 'Abercrombie 1995':
            #     STEP_DEG = 1 #deg

            #     ba_list = np.arange(0, 360, STEP_DEG)

            #     raw_stream = self.condensed_stream[i].copy()

            #     for ba in ba_list:
            #         raw_stream.rotate('NE->RT', back_azimuth=ba)

            else:
                az_window = pol_res['azimuth']

                # times are in unix time, casting UTCDatetime to float makes it also unix
                t_window = pol_res['timestamp'] - float(stime)
                az_win_error = pol_res['azimuth_error']

                p_mot_plot = pg.ScatterPlotItem()
                p_mot_plot.setData(x=t_window, y=az_window)
                p_mot_plot_err = pg.ErrorBarItem()
                p_mot_plot_err.setData(x=t_window,
                                       y=az_window,
                                       height=az_win_error)
                azimuth = np.mean(az_window)
                az_error = np.std(az_window)
                self.particle_motion_canvas.setLabel('bottom', "Time")
                self.particle_motion_canvas.setLabel('left', "Azimuth")
                self.particle_motion_canvas.addItem(
                    pg.InfiniteLine(pos=(0, azimuth), angle=0, pen=pen))
                self.particle_motion_canvas.addItem(pg.TextItem(text='Azimuth = {:.2f}° ± {:.2f}°'.format(azimuth, az_error), color=(255, 0, 0), \
                        anchor=(0, 0)))
                self.particle_motion_canvas.addItem(p_mot_plot_err)
                self.particle_motion_canvas.addItem(p_mot_plot)
                self.particle_motion_canvas.setXRange(0,
                                                      np.max(t_window),
                                                      padding=0)
                self.particle_motion_canvas.setYRange(0, 180, padding=0)
                # self.particle_motion_canvas.getViewBox().setLimits(xMin=0, xMax=15,
                #              yMin=range_[1][0], yMax=range_[1][1])

            self.azimuth = azimuth
            self.az_error = az_error

            roi_waveform = pg.PlotDataItem()
            pts = np.linspace(pt_0/st.stats.sampling_rate + stn.offset - roi[0], \
                                pt_1/st.stats.sampling_rate + stn.offset - roi[0], num=len(z))
            roi_waveform.setData(x=pts,
                                 y=bandpassFunc(z, 2, 8, st.stats.delta))
            self.waveform_canvas.addItem(roi_waveform)

            sps = st.stats.sampling_rate
            dt = 1 / st.stats.sampling_rate
            length = len(z_r)
            freq = np.linspace(1 / len_of_region,
                               (sps / 2), length) * sps / length

            FAS = abs(fft(z_r))
            FAS_n = abs(fft(z_n))
            fas_data = pg.PlotDataItem()
            fas_data.setData(x=freq, y=FAS, pen=(255, 0, 0))

            fas_noise_data = pg.PlotDataItem()
            fas_noise_data.setData(x=freq, y=FAS_n, pen=(255, 255, 255))

            fas_diff_data = pg.PlotDataItem()
            fas_diff_data.setData(x=freq,
                                  y=np.abs(FAS / FAS_n),
                                  pen=(0, 125, 255))

            print("Red - Data", "White - Noise", "Blue - Data/Noise")
            self.waveform_fft_canvas.addItem(fas_data)
            self.waveform_fft_canvas.addItem(fas_noise_data)
            self.waveform_fft_canvas.addItem(fas_diff_data)
Ejemplo n.º 24
0
def particle_motion_odr(stream, noise_thres=0):
    """
    Computes the orientation of the particle motion vector based on an
    orthogonal regression algorithm.

    :param stream: ZNE sorted trace data
    :type stream: :class:`~obspy.core.stream.Stream`
    :param noise_tres: variance of the noise sphere; data points are excluded
        when falling within the sphere of radius sqrt(noise_thres)
    :type noise_thres: float
    :returns: azimuth, incidence, error of azimuth, error of incidence
    """
    z = []
    n = []
    e = []
    comp, npts = np.shape(stream)

    for i in range(0, npts):
        if (stream[0][i] ** 2 + stream[1][i] ** 2 + stream[2][i] ** 2) > noise_thres:
            z.append(stream[0][i])
            n.append(stream[1][i])
            e.append(stream[2][i])

    def fit_func(beta, x):
        # XXX: Eventually this is correct: return beta[0] * x + beta[1]
        return beta[0] * x

    data = scipy.odr.Data(e, n)
    model = scipy.odr.Model(fit_func)
    odr = scipy.odr.ODR(data, model, beta0=[1.0])
    out = odr.run()
    az_slope = out.beta[0]
    az_error = out.sd_beta[0]

    n = np.asarray(n)
    e = np.asarray(e)
    z = np.asarray(z)
    r = np.sqrt(n ** 2 + e ** 2)

    data = scipy.odr.Data(r, abs(z))
    model = scipy.odr.Model(fit_func)
    odr = scipy.odr.ODR(data, model, beta0=[1.0])
    out = odr.run()
    in_slope = out.beta[0]
    in_error = out.sd_beta[0]

    azimuth = math.atan2(1.0, az_slope)
    incidence = math.atan2(1.0, in_slope)

    az_error = 1.0 / ((1.0 ** 2 + az_slope ** 2) * azimuth) * az_error
    # az_error = math.degrees(az_error)
    in_error = 1.0 / ((1.0 ** 2 + in_slope ** 2) * incidence) * in_error
    # in_error = math.degrees(in_error)

    azimuth = math.degrees(azimuth)
    incidence = math.degrees(incidence)

    if azimuth < 0.0:
        azimuth = 360.0 + azimuth
    if incidence < 0.0:
        incidence += 180.0
    if incidence > 90.0:
        incidence = 180.0 - incidence
        if azimuth > 180.0:
            azimuth -= 180.0
        else:
            azimuth += 180.0
    if azimuth > 180.0:
        azimuth -= 180.0

    return azimuth, incidence, az_error, in_error
Ejemplo n.º 25
0
	ax.get_xaxis().set_major_formatter(xformatter)
################################################################################
elif args.plot == '5+6':
	# Arrhenius representation for band gap
	def linear(p, x):
		return p[0] * x + p[1]

	import scipy.odr  # too lazy to scroll up

	data = scipy.odr.RealData(
            1 / limited_temp, np.log(-n(limited_hall_coeff, limited_temp) / limited_temp**(3 / 2)))  # , sx=(1 / limited_temp**2))

	odr = scipy.odr.ODR(data, scipy.odr.Model(
            linear), beta0=(-4458.922336972104, 50.90372804322858))

	result = odr.run()

	slp, inter = result.beta
	slp_err, inter_err = result.sd_beta

	slpb, interb, r, __, __ = scipy.stats.linregress(
            1 / limited_temp, np.log(-n(limited_hall_coeff, limited_temp) / limited_temp**(3 / 2)))

	plt.plot(1 / limited_temp, np.log(-n(limited_hall_coeff,
                                      limited_temp) / limited_temp**(3. / 2)), '+', markersize=12, label='data')
	plt.plot(1 / limited_temp, slp * 1
          / limited_temp + inter, '--', label=f'linear fit\n $R^2={r**2:.4f}$', color='red')
	plt.xlabel('$\\frac{1}{T} \\left(\\frac{1}{K}\\right)$')
	plt.ylabel('$\\log\\ \\frac{n_\\mathrm{i}}{T^{\\frac{3}{2}}}$')

	if args.verbose: