def polynomialFit(Y, X, x0, h): # use only polynomial 1 numObs = X.shape[0] l = numpy.zeros((1,numObs)) L = numpy.zeros((2,numObs)) constant = numpy.ones((numObs,1)) dX = numpy.mat(X - x0) regressor = numpy.hstack([constant, dX]) # make the weights wVec = kernel.tricube(X, x0, h) # anti bug assert wVec.shape[1] == 1 if wVec.sum() == 0: ''' ERROR This is actually wrong, but not sure how to deal with it elegantly ''' return 0, l, 0 # standardize wVec = wVec/wVec.sum() # sparsify the W sampleIdx = wVec.nonzero()[0] effectiveSampleNum = sampleIdx.shape[0] effectiveWVec = wVec[sampleIdx] regressor = regressor[sampleIdx,:] wMat = numpy.zeros((effectiveSampleNum, effectiveSampleNum)) for iObs in range(effectiveSampleNum): wMat[iObs,iObs] = effectiveWVec[iObs] # calculate the coefficients XTW = regressor.H * wMat try: effectiveL = numpy.linalg.inv( XTW * regressor )*XTW l[:,sampleIdx] = effectiveL[0,:] L[:,sampleIdx] = effectiveL except: ''' When except happens, switch to nadaraya-waston estimator, which is just wVec ''' l = wVec.T L = numpy.zeros((2,numObs)) L[numpy.array([0]),:] = wVec.T # take the constant as forecast betaHat = numpy.dot(L,Y) return betaHat[0], l, betaHat[1]
def polynomialFit(Y, X, x0, h): # use only polynomial 1 numObs = X.shape[0] l = numpy.zeros((1, numObs)) L = numpy.zeros((2, numObs)) constant = numpy.ones((numObs, 1)) dX = numpy.mat(X - x0) regressor = numpy.hstack([constant, dX]) # make the weights wVec = kernel.tricube(X, x0, h) # anti bug assert wVec.shape[1] == 1 if wVec.sum() == 0: ''' ERROR This is actually wrong, but not sure how to deal with it elegantly ''' return 0, l, 0 # standardize wVec = wVec / wVec.sum() # sparsify the W sampleIdx = wVec.nonzero()[0] effectiveSampleNum = sampleIdx.shape[0] effectiveWVec = wVec[sampleIdx] regressor = regressor[sampleIdx, :] wMat = numpy.zeros((effectiveSampleNum, effectiveSampleNum)) for iObs in range(effectiveSampleNum): wMat[iObs, iObs] = effectiveWVec[iObs] # calculate the coefficients XTW = regressor.H * wMat try: effectiveL = numpy.linalg.inv(XTW * regressor) * XTW l[:, sampleIdx] = effectiveL[0, :] L[:, sampleIdx] = effectiveL except: ''' When except happens, switch to nadaraya-waston estimator, which is just wVec ''' l = wVec.T L = numpy.zeros((2, numObs)) L[numpy.array([0]), :] = wVec.T # take the constant as forecast betaHat = numpy.dot(L, Y) return betaHat[0], l, betaHat[1]
''' import numpy as np import kernel as kw ''' check kernelInt ''' assert kw.kernelInt(0) == 1 assert kw.kernelInt(-0.9) == 1 assert kw.kernelInt(0.9) == 1 assert kw.kernelInt(1.1) == 0 assert kw.kernelInt(-1.1) == 0 # test for longer arrays x = np.array([0, -1.1, 0.8]) trueVal = np.array([1, 0, 1], dtype=int) z = kw.kernelInt(x) assert (z == trueVal).any ''' check tricube ''' x = np.array([-1.1, -1., -0.5, 0, 0.5, 1, 1.1]) z = kw.tricube(x, 0, 1) trueVal = np.array([ 0, 0, (7. / 8.)**3 * (70. / 81.), (70. / 81.), (7. / 8.)**3 * (70. / 81.), 0, 0 ]) assert (z == trueVal).any print 'Kernel passes unit test.'
--------------------------------------------------------------------------- ''' import numpy as np import kernel as kw ''' check kernelInt ''' assert kw.kernelInt(0) == 1 assert kw.kernelInt(-0.9) == 1 assert kw.kernelInt(0.9) == 1 assert kw.kernelInt(1.1) == 0 assert kw.kernelInt(-1.1) == 0 # test for longer arrays x = np.array([0,-1.1,0.8]) trueVal = np.array([1,0,1], dtype = int) z = kw.kernelInt(x) assert (z == trueVal).any ''' check tricube ''' x = np.array([-1.1, -1., -0.5, 0, 0.5, 1, 1.1] ) z = kw.tricube(x, 0, 1) trueVal = np.array([0, 0, (7./8.)**3*(70./81.), (70./81.),(7./8.)**3*(70./81.),0,0]) assert (z == trueVal).any print 'Kernel passes unit test.'