Ejemplo n.º 1
0
def mypolyfit(x, y, order=1, verbose=1):
    """
    coeff, yfit = mypolyfit(x,y,order=1, verbose=1)
    """
    from numpy.lib.polynomial import polyfit, poly1d
    coeffs = polyfit(x, y, order)
    polyModel = poly1d(coeffs)
    if verbose: print("Fit coeffs:", coeffs)
    return coeffs, polyModel(x), polyModel
Ejemplo n.º 2
0
    def C44_Calculator(self, EMT, PARAMETERS):
        # Return 0 is used when the method is not desired to be used
        # return 0
        """ This method uses the given EMT calculator to calculate and return the value of the matrix element C44 for 
            a system of atoms of a given element type. The calculation is done by using that:
            C44 = 1 / Volume * d^2/depsilon^2 (E_system) where epsilon is the displacement in one direction of the 
            system along one axis diveded by the highth of the system. """

        # An atom object is created and the calculator attached
        atoms = FaceCenteredCubic(size=(self.Size, self.Size, self.Size),
                                  symbol=self.Element)
        atoms.set_calculator(EMT)

        # The volume of the sample is calculated
        Vol = atoms.get_volume()

        # The value of the relative displacement, epsilon, is set
        epsilon = 1. / 1000

        # The matrix used to change the unitcell by n*epsilon is initialized
        LMM = numpy.array([[1, 0, -10 * epsilon], [0, 1, 0], [0, 0, 1]])

        # The original unit cell is conserved
        OCell = atoms.get_cell()

        # The array for storing the energies is initialized
        E_calc = numpy.zeros(20)

        # The numerical value of C44 is calculated
        for i in range(20):
            # The new system cell based on the pertubation epsilon is set
            atoms.set_cell(numpy.dot(OCell, LMM), scale_atoms=True)
            # The energy of the system is calculated
            E_calc[i] = atoms.get_potential_energy()
            # The value of LMM is updated
            LMM[0, 2] += epsilon

        # A polynomial fit is made for the energy as a function of epsilon

        # The displaced axis is defined
        da = numpy.arange(-10, 10) * epsilon

        # The fit is made
        Poly = numpyfit.polyfit(da, E_calc, 2)

        # Now C44 can be estimated from this fit by the second derivative of Poly = a * x^2 + b * x + c , multiplied
        # with 1 / (2 * Volume) of system
        C44 = 2. / Vol * Poly[0]

        return C44
def G_reg1d(xx, yy, ww=None):
    '''
    计算斜率和截距
    ww: weights
    '''
    rtn = []
    ab = polyfit(xx, yy, 1, w=ww)
    rtn.append(ab[0])
    rtn.append(ab[1])
    rtn.append(std(yy) / std(xx))
    rtn.append(mean(yy) - rtn[2] * mean(xx))
    r = corrcoef(xx, yy)
    rr = r[0, 1] * r[0, 1]
    rtn.append(rr)
    return rtn
Ejemplo n.º 4
0
def G_reg1d(xx, yy, ww=None):
    """
    description needed
    ww: weights
    """
    rtn = []
    ab = polyfit(xx, yy, 1, w=ww)
    rtn.append(ab[0])
    rtn.append(ab[1])
    rtn.append(std(yy) / std(xx))
    rtn.append(mean(yy) - rtn[2] * mean(xx))
    r = corrcoef(xx, yy)
    rr = r[0, 1] * r[0, 1]
    rtn.append(rr)
    return rtn
Ejemplo n.º 5
0
def linearRegression(x, y, deg = 1, plotData = False):
    '''returns the least square estimation of the linear regression of y = ax+b
    as well as the plot'''
    from numpy.lib.polynomial import polyfit
    from matplotlib.pyplot import plot
    from numpy.core.multiarray import arange
    coef = polyfit(x, y, deg)
    if plotData:
        def poly(x):
            result = 0
            for i in range(len(coef)):
                result += coef[i]*x**(len(coef)-i-1)
            return result
        plot(x, y, 'x')
        xx = arange(min(x), max(x),(max(x)-min(x))/1000)
        plot(xx, [poly(z) for z in xx])
    return coef
Ejemplo n.º 6
0
def linearRegression(x, y, deg=1, plotData=False):
    '''returns the least square estimation of the linear regression of y = ax+b
    as well as the plot'''
    from numpy.lib.polynomial import polyfit
    from numpy.core.multiarray import arange
    coef = polyfit(x, y, deg)
    if plotData:

        def poly(x):
            result = 0
            for i in range(len(coef)):
                result += coef[i] * x**(len(coef) - i - 1)
            return result

        plt.plot(x, y, 'x')
        xx = arange(min(x), max(x), (max(x) - min(x)) / 1000)
        plt.plot(xx, [poly(z) for z in xx])
    return coef
Ejemplo n.º 7
0
def list_derivative(f, x, n, o=3, p=5):
    """
    Returns the nth derivative of f with respect to x, where zip(x,f) are 
    datapoints, using curve-fit polynomials of order o to p points.
    """
    assert p % 2 == 1  # should be odd imo
    assert len(f) >= p  # won't work otherwise
    derivative = []
    for i in xrange(len(f)):
        start = i - (p - 1) / 2
        if start < 0:
            start = 0
        end = start + p
        if end >= len(f):
            start = -1 - p
            end = -1
        spline = polyfit(x[start:end], f[start:end], o)
        derivative.append(polyval(polyder(spline, n), x[i]))
    print("diff in list lengths: " + str(len(f) - len(derivative)))
    return array(derivative)
Ejemplo n.º 8
0
    def L12_BM_LC_Calculator(self, EMT, PARAMETERS):
        # Return 0 is used when the method is not desired to be used
        # return 0
        """ The method L12_BM_LC_Calculator uses the EMT calculator to find the lattice constant which gives 
            the lowest possible energy for an alloy of two elements and from a polynomial fit of the energy as a 
            function of the lattice constant it calculates and returns the Bulk modulus, the volume of the system at 
            the lowest possible energy and this energy """
        # Three values for the lattice constant, a0, are chosen. The highest and lowest value is chosen so that it
        # is certain that these are to high/low respectively compared to the "correct" value. This is done by using the
        # experimental value of the lattice constant, a0_exp, as the middle value and to high/low values are then:
        # a0_exp +- a0_mod, a0_mod = a0_exp/10
        a0_exp = ((PARAMETERS[self.Element[0]][1] +
                   3 * PARAMETERS[self.Element[1]][1]) * numpy.sqrt(2) * beta *
                  Bohr / 4)
        a0_mod = a0_exp * 0.20
        a0_guesses = numpy.array([a0_exp - a0_mod, a0_exp, a0_exp + a0_mod])

        # An atoms object consisting of atoms of the chosen element is initialized
        atoms = L1_2(size=(self.Size, self.Size, self.Size),
                     symbol=self.Element,
                     latticeconstant=a0_exp)
        atoms.set_calculator(EMT)

        # An identity matrix for the system is saved to a variable
        IdentityMatrix = numpy.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])

        # An array for the energy for the chosen guesses for a0 is initialized:
        E_guesses = numpy.zeros(5)

        # The energies are calculated
        for i in range(3):
            # Changes the lattice constant for the atoms object
            atoms.set_cell(a0_guesses[i] * self.Size * IdentityMatrix,
                           scale_atoms=True)
            # Calculates the energy of the system for the new lattice constant
            E_guesses[i] = atoms.get_potential_energy()

        # Bisection is used in order to find a small interval of the lattice constant for the minimum of the energy (an
        # abitrary interval length is chosen), This is possible because we are certian from theory that there is only
        # one minimum of the energy function in the interval of interest and thus we wont "fall" into a local minimum
        # and stay there by accident.
        while (a0_guesses[2] - a0_guesses[0]) >= self.Tol:
            if min([E_guesses[0], E_guesses[2]]) == E_guesses[0]:
                # A new guess for the lattice constant is introduced
                a0_new_guess = 0.67 * a0_guesses[1] + 0.33 * a0_guesses[2]
                # The energy for this new guess is calculated
                atoms.set_cell(a0_new_guess * self.Size * IdentityMatrix,
                               scale_atoms=True)
                E_new_guess = atoms.get_potential_energy()
                # A check for changes in the energy minimum is made and the guesses for a0 and their corrosponding
                # energies are ajusted.
                if min(E_new_guess, min(E_guesses[0:3])) != E_new_guess:
                    a0_guesses[2] = a0_new_guess
                    E_guesses[2] = E_new_guess
                else:
                    a0_guesses[0] = a0_guesses[1]
                    a0_guesses[1] = a0_new_guess
                    E_guesses[0] = E_guesses[1]
                    E_guesses[1] = E_new_guess

            elif min([E_guesses[0], E_guesses[2]]) == E_guesses[2]:
                # A new guess for the lattice constant is introduced
                a0_new_guess = 0.33 * a0_guesses[0] + 0.67 * a0_guesses[1]
                # The energy for this new guess is calculated
                atoms.set_cell(a0_new_guess * self.Size * IdentityMatrix,
                               scale_atoms=True)
                E_new_guess = atoms.get_potential_energy()
                # A check for changes in the energy minimum is made and the guesses for a0 and their corrosponding
                # energies are ajusted.
                if min(E_new_guess, min(E_guesses[0:3])) != E_new_guess:
                    a0_guesses[0] = a0_new_guess
                    E_guesses[0] = E_new_guess
                else:
                    a0_guesses[2] = a0_guesses[1]
                    a0_guesses[1] = a0_new_guess
                    E_guesses[2] = E_guesses[1]
                    E_guesses[1] = E_new_guess

        # An estimate of the minimum energy can now be found from a second degree polynomial fit through the three
        # current guesses for a0 and the corresponding values of the energy.
        Poly = numpyfit.polyfit(a0_guesses, E_guesses[0:3], 2)

        # The lattice constant corresponding to the lowest energy from the Polynomiel fit is found
        a0 = -Poly[1] / (2 * Poly[0])

        # Now five guesses for a0 and the corresponding energy are evaluated from and around the current a0.
        a0_guesses = a0 * numpy.array([
            1 - 2 * self.Tol / 5, 1 - self.Tol / 5, 1, 1 + self.Tol / 5,
            1 + 2 * self.Tol / 5
        ])

        for i in range(5):
            # Changes the lattice constant for the atoms object
            atoms.set_cell(a0_guesses[i] * self.Size * IdentityMatrix,
                           scale_atoms=True)
            # Calculates the energy of the system for the new lattice constant
            E_guesses[i] = atoms.get_potential_energy()

        # The method EquationOfState is now used to find the Bulk modulus and the minimum energy for the system.

        # The volume of the sample for the given a0_guesses
        Vol = (self.Size * a0_guesses)**3

        # The equilibrium volume, energy and bulk modulus are calculated
        (Vol0, E0, B) = EquationOfState(Vol.tolist(), E_guesses.tolist()).fit()

        return (Vol0, E0, B, Vol0**(1. / 3) / self.Size)
Ejemplo n.º 9
0
plt.title("Best Fit Line")
showPlot()

#Print s_square
error_var = squared_error(yVals, regLine) / len(yVals)
print "s_square= " + str(error_var)
print "RMSE= " + str((mean_squared_error(yVals, regLine, p))**0.5)

r_squared = coefficient_of_determination(yVals, regLine)
print "R_squared: " + str(r_squared)

F_val = calc_F(yVals, regLine, p)
print "F Statistics: " + str(F_val)

#trying Polynomial Fits
pl1 = polyfit(xVals, yVals, 1)
pl2 = polyfit(xVals, yVals, 2)
pl3 = polyfit(xVals, yVals, 3)
# print pl1
# print pl2
# print pl3

# plt.plot(xVals, yVals, 'o')
# plt.plot(xVals, np.polyval(pl1,xVals), 'r', linewidth=0.1)
# plt.plot(xVals, np.polyval(pl2,xVals), 'b', linewidth=0.5)
# plt.plot(xVals, np.polyval(pl3,xVals), 'g', linewidth=0.1)
# showPlot()

yfit = pl1[0] * xVals + pl1[1]
# yfit = pl2[0] * xVals + pl2[1]
# yfit = pl3[0] * xVals + pl3[1]
Ejemplo n.º 10
0
def analyze_olfaction_covariance(covariance, receptors):
    ''' Covariance: n x n covariance matrix.
        Positions:  list of n  positions '''
    
    positions = [pose.get_2d_position() for pose, sens in receptors] #@UnusedVariable
    positions = array(positions).transpose().squeeze()
    
    require_shape(square_shape(), covariance)
    n = covariance.shape[0]
    require_shape((2, n), positions)
    
    distances = create_distance_matrix(positions)
    correlation = cov2corr(covariance)
    flat_distances = distances.reshape(n * n) 
    flat_correlation = correlation.reshape(n * n)
    
    # let's fit a polynomial
    deg = 4
    poly = polyfit(flat_distances, flat_correlation, deg=deg)  
    
    knots = linspace(min(flat_distances), max(flat_distances), 2000)
    poly_int = polyval(poly, knots)
    
    poly_fder = polyder(poly)
    
    fder = polyval(poly_fder, distances)
    
    Ttheta = create_olfaction_Ttheta(positions, fder)
    Tx, Ty = create_olfaction_Txy(positions, fder, distances)
    
    
    report = Node('olfaction-theory')
    report.data('flat_distances', flat_distances)
    report.data('flat_correlation', flat_correlation)
    
    
    with report.data_file('dist_vs_corr', 'image/png') as filename:
        pylab.figure()
        pylab.plot(flat_distances, flat_correlation, '.')
        pylab.plot(knots, poly_int, 'r-')
        pylab.xlabel('distance')
        pylab.ylabel('correlation')
        pylab.title('Correlation vs distance')
        pylab.legend(['data', 'interpolation deg = %s' % deg]) 
        pylab.savefig(filename)
        pylab.close()
    
    with report.data('fder', fder).data_file('fder', 'image/png') as filename:
        pylab.figure()
        pylab.plot(knots, polyval(poly_fder, knots), 'r-')
        pylab.title('f der')
        pylab.savefig(filename)
        pylab.close() 
    
    report.data('distances', distances)
    report.data('correlation', correlation)
    report.data('covariance', covariance)
    report.data('f', polyval(poly, distances))   
    
    
    
    f = report.figure(id='cor-vs-distnace', caption='Estimated kernels',
                       shape=(3, 3))
    f.sub('dist_vs_corr')
    f.sub('fder')
    f.sub('f', display='scale')
    f.sub('distances', display='scale')
    f.sub('correlation', display='posneg')
    f.sub('covariance', display='posneg')
    
    T = numpy.zeros(shape=(3, Tx.shape[0], Tx.shape[1]))
    T[0, :, :] = Tx
    T[1, :, :] = Ty
    T[2, :, :] = Ttheta
    
    T_report = create_report_figure_tensors(T, report_id='tensors',
        caption="Predicted learned tensors")
    
    report.add_child(T_report)

    return report
Ejemplo n.º 11
0
def redise(p0, x, y):
    a, b = p0
    return (y - (a * x + b))


data = sp.genfromtxt("web_traffic.tsv", "\t")

y = data[:, 1]
x = data[:, 0]

x_clean = x[np.where(y > -1)]
y_clean = y[np.where(y > -1)]

rs1 = leastsq(redise, [0, 0], args=(x_clean, y_clean))
rs2 = polyfit(x_clean, y_clean, 2, full=True)
rs5 = polyfit(x_clean, y_clean, 25, full=True)

y_predit1 = rs1[0][0] * x_clean + rs1[0][1]
y_predit2 = sp.poly1d(rs2[0])
y_predit5 = sp.poly1d(rs5[0])

print y_predit2
result = fsolve(y_predit2 - 10000, 0)
print result
pt.plot(x_clean, y_clean, '.')
pt.plot(x_clean, y_predit1, '-', label="predit1")
pt.plot(x_clean, y_predit2(x_clean), '-', label="predit2")
pt.plot(x_clean, y_predit5(x_clean), '-', label='predit5')
pt.legend()
pt.show()