def __init__(self): self.dict = {3: [1, 2, 4, 5, 8]} self.d = os.getcwd() + '\\Datas' self.fn = FN.FN(d=self.d + '\\RawXlsx') self.fn.analyzeExtensions() self.filesName = [] for i in self.fn.filesName: self.filesName.append(self.fn.analyzeFN(i)) pass
def __getFilesName(self, dir, typeList): filesName = [] fn = FN.FN(dir) fn.analyzeExtensions() for i in fn.filesName: temp = fn.analyzeFN(i) if temp[2] in typeList: filesName.append(temp) return filesName
def __init__(self): self.dict = {3: [1, 2, 4, 5, 8]} self.wordsType = [ 'vi.', 'vt.', 'a.', 'n.', 'ad.', 'v.', 'prep.', 'suffix', 'infml.' ] self.wordsType2 = [] self.d = os.getcwd() + '\\Datas' self.fn = FN.FN(d=self.d + '\\RawXlsx') self.fn.analyzeExtensions() self.filesName = [] for i in self.fn.filesName: self.filesName.append(self.fn.analyzeFN(i)) pass
def __init__(self): self.d = r'D:\0COCO\本科\英语四六级\六级词汇词根+联想记忆法(MP3+文本)' self.fn = FN.FN(d=self.d) self.fn.analyzeExtensions() self.filesName = [] self.wordsType = [ 'vt.', 'adj.', 'n.', 'v.', 'vi.', 'adv.', 'prep.', 'conj.' ] self.wordsType2 = [] for i in self.fn.filesName: temp = self.fn.analyzeFN(i) if temp[2] == '.lrc': self.filesName.append(temp) pass
def __init__(self, recreate=False): self.d = os.getcwd() + '\\Datas' self.fn = FN.FN(d=self.d + '\\ModifyXlsx') self.fn.analyzeExtensions() self.font = Font(name='Arial', size=11, bold=False, italic=False, vertAlign=None, underline='none', strike=False, color='FF000000') self.space = [' ', ' '] if (recreate): md = ModifyData.MD() md.test() self.spy = Spyder.Spy() if (not os.path.exists(self.d + '\\Mp3')): os.mkdir(self.d + '\\Mp3') pass
import PDP data=sio.loadmat('ex7data1') X=data['X'] #Part One: Load Example Dataset print 'One: ======== Load Example Dataset1 ... ' plt.plot(X[:,0],X[:,1],'bo') plt.axis(xmin=0.5,xmax=6.5,ymin=2,ymax=8) plt.title('Example Dataset1') #Part Two: Principal Component Analysis print 'Two: ================ Running PCA on example dataset...' result=FN.featureNormalize(X) X_norm=result[0] mu=result[1] res=PCA.pca(X_norm) U=res[0] S=res[1] S=np.eye(S.shape[0])*S print 'Top eigenvector: ' print 'U[:,0] = %f %f ' % (U[0,0],U[1,0]) print '(You should expect to see -0.707107, -0.707107)' tmp1=mu+1.5*np.dot(S[0,0],U[:,0].transpose()) tmp2=mu+1.5*np.dot(S[1,1],U[:,1].transpose())
import psyneulink as pnl import FN FN = pnl.Composition(name='FN') FNpop_0 = pnl.IntegratorMechanism(name='FNpop_0', function=pnl.FitzHughNagumoIntegrator( name='Function_FitzHughNagumoIntegrator', d_v=1, initial_v=-1)) FN.add_node(FNpop_0)
import FN import CF import os if __name__ == '__main__': wordDir = r'D:\temp\project\01Tests\3-fightPlane\Classes' scrEnc = 'utf-8' desEnc = 'gbk' Type = ['.cpp', '.h', '.lrc'] fn1 = FN.FN(wordDir) fn1.analyzeExtensions() c = 1 error = [] oldFiles = [] for i in fn1.filesName: t = fn1.analyzeFN(i) if t[2] in Type: try: with open(t[0] + t[1] + t[2], 'r', encoding=scrEnc) as f: text = f.read() fileDir = t[0] + t[1] + t[2] if (fileDir.find('_coco56_GBK_To_UTF-8') == -1): print('c=', c, sep='') print(t) c += 1 oldFiles.append(fileDir) newFileDir = t[0] + t[1] + '_coco56_GBK_To_UTF-8' + t[2] if (not os.path.exists(newFileDir)): with open(newFileDir, 'w', encoding=desEnc) as f2: f2.write(text) except UnicodeDecodeError:
import FN import CF import os if __name__ == '__main__': fn1 = FN.FN(r'D:\0COCO\System\桌面\SpeedPan\C++ Primer视频教程(初级中级高级)') fn1.analyzeExtensions() d = fn1.filesNum d = sorted(d.items(), key=lambda item:item[1], reverse=True) ''' 这里的d.items()实际上是将d转换为可迭代对象, 迭代对象的元素为 (‘lilee’,25)、(‘wangyan’,21)、(‘liqun’,32)、(‘lidaming’,19), items()方法将字典的元素 转化为了元组, 而这里key参数对应的lambda表达式的意思则是选取元组中的第二个元素作为比较参数 (如果写作key=lambda item:item[0]的话则是选取第一个元素作为比较对象,也就是key值作为比较对象。 lambda x:y中x表示输出参数,y表示lambda 函数的返回值), 所以采用这种方法可以对字典的value进行排序。 注意排序后的返回值是一个list,而原字典中的名值对被转换为了list中的元组。 ''' print(d)
import FN import CF import os if __name__ == '__main__': fn1 = FN.FN( r'H:\OneDrive - revolutionize B2C bandwidth\视频教程\架构H:\OneDrive - revolutionize B2C bandwidth\视频教程\架构' ) fn1.analyzeExtensions() d = fn1.filesNum d = sorted(d.items(), key=lambda item: item[1], reverse=True) ''' 这里的d.items()实际上是将d转换为可迭代对象, 迭代对象的元素为 (‘lilee’,25)、(‘wangyan’,21)、(‘liqun’,32)、(‘lidaming’,19), items()方法将字典的元素 转化为了元组, 而这里key参数对应的lambda表达式的意思则是选取元组中的第二个元素作为比较参数 (如果写作key=lambda item:item[0]的话则是选取第一个元素作为比较对象,也就是key值作为比较对象。 lambda x:y中x表示输出参数,y表示lambda 函数的返回值), 所以采用这种方法可以对字典的value进行排序。 注意排序后的返回值是一个list,而原字典中的名值对被转换为了list中的元组。 ''' print(d)
result=LC.learningCurve(tmp1,y,tmp2,yval,Lambda) l1=plt.plot(np.array(range(m))+1,result[0],'r',np.array(range(m))+1,result[1],'b') plt.title('Learning curve for linear regression') plt.legend(l1,('Train','Cross Validataion'),loc=1) plt.xlabel('Number of training examples') plt.ylabel('Error') plt.show() #Part Five: Feature Mapping for Polynomial Regression print 'Five: ==========================Feature Mapping for Polynomial Regression...' p=8 X_poly=PF.polyFeatures(X,p) result=FN.featureNormalize(X_poly) X_poly=result[0] mu=result[1] sigma=result[2] X_poly=np.hstack((np.ones((m,1)),X_poly)) Xtest=data['Xtest'] X_poly_test=PF.polyFeatures(Xtest,p) X_poly_test=X_poly_test-mu X_poly_test=X_poly_test/sigma X_poly_text=np.hstack((np.ones((X_poly_test.shape[0],1)),X_poly_test)) X_poly_val=PF.polyFeatures(Xval,p)
@author: aa """ import numpy as np import FN import GDM import NE print 'Loading data ...' data=np.loadtxt('ex1data2.txt',delimiter=',') X=data[:,0:2] y=data[:,2] m=len(y) print 'Normalizing Features ...' tmp=FN.featureNormalize(X) X=tmp[0] mu=tmp[1] sigma=tmp[2] X=np.hstack((np.ones((m,1)), X)) print 'Running gradient descent ...' alpha = 0.1 num_iters=1000 theta=np.zeros((3,1)) tmp=GDM.gradientDescentMulti(X,y,theta,alpha,num_iters) theta=tmp[0]