def download_file(file): U.p(file.raw_url) # if 'ThreadPoolTaskExecutor.java' in file.raw_url: # a = 1 try: raw, status_code = NetUtil.fetch_file_info(file.raw_url) if status_code == 200: # file.raw = str(raw_soup) file.raw = raw else: file.raw = '' parent_raws = [] parent_size = len(file.get_parent_raw_urls()) for index in range(parent_size): raw, status_code = NetUtil.fetch_file_info( file.get_parent_raw_urls()[index]) if status_code == 200: parent_raw = raw else: # 网络错误 # 无文件 parent_raw = '' parent_raws.insert(index, parent_raw) file.set_parent_raws(parent_raws) except Exception: print(Message.internet_error)
def name(a): '''Anti abs''' if U.inMuti(a,'/','\\',f=str.endswith):a=a[:-1] if not isAbs(a):return a else: a=T.sub(a,dir(a)) # U.repl() if U.inMuti(a,'/','\\',f=str.startswith):return a[1:] else:return a
def execute(windSpeed, windDir, accum_windSpeed24, accum_windDir24): U0 = U.execute(windSpeed, windDir) V0 = V.execute(windSpeed, windDir) U24 = U.execute(accum_windSpeed24, accum_windDir24) V24 = V.execute(accum_windSpeed24, accum_windDir24) DU = U0 - U24 DV = V0 - V24 wSp = hypot(DU, DV) return wSp
def execute(windSpeed, windDir, accum_windSpeed24, accum_windDir24): U0 = U.calculate(windSpeed, windDir) V0 = V.calculate(windSpeed, windDir) U24 = U.calculate(accum_windSpeed24, accum_windDir24) V24 = V.calculate(accum_windSpeed24, accum_windDir24) DU = U0 - U24 DV = V0 - V24 wSp = hypot(DU,DV) return wSp
def execute(windSpeed, windDir, accum_windSpeed24, accum_windDir24): U0 = U.execute(windSpeed, windDir) V0 = V.execute(windSpeed, windDir) U24 = U.execute(accum_windSpeed24, accum_windDir24) V24 = V.execute(accum_windSpeed24, accum_windDir24) DU = U0 - U24 DV = V0 - V24 WD = 57.2957 * arctan2(-DU, -DV) WD[WD < 0] += 360 return WD
def execute(windSpeed, windDir, accum_windSpeed24, accum_windDir24): U0 = U.calculate(windSpeed, windDir) V0 = V.calculate(windSpeed, windDir) U24 = U.calculate(accum_windSpeed24, accum_windDir24) V24 = V.calculate(accum_windSpeed24, accum_windDir24) DU = U0 - U24 DV = V0 - V24 WD = 57.2957 * arctan2(-DU,-DV) WD = numpy.where(WD < 0, WD + 360, WD) return WD
def query(self): U.p(self.url) if not re.match('^https://github.com/', self.url): return -1, Message.invalid_url, None else: status_code, message, content = fetch(self.url) if status_code == 200: if message is not Message.success: # 无内容 return status_code, message, None else: file_list = content[0] meta = content[1] download(file_list) return status_code, Message.success, (file_list, meta) else: return status_code, Message.internet_error, None
def makeDirs(ap): sp=getSplitor(ap) if not _p.isabs(ap): if ap.startswith('.'): fr=U.__frame U.repl() else: ls=ap.split(sp) base='' for i in ls: base+=(i+sp) if exist(base):continue else: try: _os.mkdir(base) except:return False return True
def download_file(file): raw_soup, status_code = NetUtil.fetch_info(file.raw_url) if status_code == 200: file.raw = raw_soup.text else: file.raw = '' parent_raws = [] parent_size = len(file.get_parent_raw_urls()) for index in range(parent_size): raw_soup, status_code = NetUtil.fetch_info( file.get_parent_raw_urls()[index]) if status_code == 200: parent_raw = raw_soup.text else: # 无文件 parent_raw = '' parent_raws.insert(index, parent_raw) U.p(file.name) file.set_parent_raws(parent_raws)
def ll(ap='.',stime=True,type='',t='',d=False,dir=False,f=False,file=False): '''return {file : [size,atime,mtime,ctime,st_mode]} linux struct stat: http://pubs.opengroup.org/onlinepubs/009695399/basedefs/sys/stat.h.html''' dr={} for i in ls(ap,type=type,t=t,d=d,dir=dir,f=f,file=file): s=_os.stat(i) dr[i]=[size(i),s.st_atime,s.st_mtime,s.st_ctime,s.st_mode] if stime: # import U for j in py.range(len(dr[i])): if py.type(dr[i][j]) is py.float:dr[i][j]=U.stime(time=dr[i][j]) return dr
def list(ap='.',r=False,type='',t='',d=False,dir=False,f=False,file=False): '''Parms:boll r recursion str (type,t) '(d,f,a,r)' default return all''' # print ap if dir:d=True if file:f=True if t and not type:type=t if 'd' in type:d=True if 'f' in type:f=True if 'a' in type:d=True;f=True if 'r' in type:r=True if d or dir or f or file:pass else:d=f=True #default return all if py.type(ap)!=py.type('') or py.len(ap)<1:ap='.' # if len(ap)==2 and ap.endswith(':'):ap+='/' if not U.inMuti(ap,'/','\\',f=str.endswith):ap+='/' # print ap # U.repl() ########## below r is result rls=[] try:r3=py.list(_os.walk(ap).next()) except Exception as ew: # print ap;raise ew return [] if ap=='./':ap='' # U.repl() r3[1]=[ap+i for i in r3[1]] r3[2]=[ap+i for i in r3[2]] if d:rls.extend(r3[1]) # if r: for i in r3[1]:rls.extend(list(i,r=r,d=d,f=f)) if f:rls.extend(r3[2]) return rls
def main(): U = py.importU() py.pdb() # U=globals()['U']# 为何在此不能自动引用 globals import U U.pln(getAllNetwork()) exit() import sys, os sys.path.append('d:\pm') from qgb import U, T, F o = getVersionInfo() U.pln(o.dwMajorVersion, o.dwMinorVersion) # CreateProcessWithLogonW( # lpUsername='******', # lpPassword='******', # lpApplicationName=r'C:\WINDOWS\system32\calc.exe') U.pln('[%s]' % getTitle(), getProcessPath(), U.getModPath())
def convert_commit_info(soup, url): # 获取parentBranch Id,list形式 parent_branch_id_html = soup.findAll('a', attrs={'class': 'sha'}) if parent_branch_id_html is None or len(parent_branch_id_html) == 0: # 没有parent branch U.p("No Parent Branch") return None, None else: author = None date = None committer = None commit_hash = None children = None commit_log = None parents = [] # <a data-pjax="#js-repo-pjax-container" href="/basti-shi031/LoopViewPager">LoopViewPager</a> project_name = soup.find( 'a', attrs={'data-pjax': '#js-repo-pjax-container'}) if project_name == None: return False, None project_name = project_name.text # <a class="url fn" rel="author" href="/basti-shi031">basti-shi031</a> # 作者 author = soup.find('a', attrs={'class': 'url fn', 'rel': 'author'}) if author == None: return False, None author = author.text # <relative-time datetime="2016-01-11T03:09:39Z" title="2016年1月11日 GMT+8 上午11:09">on 11 Jan 2016</relative-time> # 时间 date = soup.find('relative-time').get('datetime') # <a href="/basti-shi031/RichTextView/commits?author=basti-shi031" # class="commit-author tooltipped tooltipped-s user-mention" # aria-label="View all commits by basti-shi031">basti-shi031</a> # committer committer = soup.find('a', attrs={'class': 'commit-author user-mention'}) if committer == None: return False committer = committer.text # <span class="sha user-select-contain">1a2219ebb73464497f5efa8dba8823781a4e887e</span> # commit_hash commit_hash = soup.find('span', attrs={'class': 'sha user-select-contain'}) if commit_hash == None: return False commit_hash = commit_hash.text # <p class="commit-title"> # Merge branch 'master' of <a href="https://git # hub.com/basti-shi031/RichTextView">https://github.com/basti-shi031/RichTextView</a> # </p> # commit_log commit_log = soup.find('p', attrs={'class': 'commit-title'}) if commit_log == None: return False commit_log = commit_log.text U.p(author) U.p(date) U.p(committer) U.p(commit_hash) U.p(commit_log) parent_ids = [] parent_urls = [] # 抓取每个文件的状态,add modified # <svg title="added" class="octicon octicon-diff-added" viewBox="0 0 14 16" version="1.1" width="14" height="16" aria-hidden="true"> # <svg title="modified" class="octicon octicon-diff-modified" viewBox="0 0 14 16" version="1.1" width="14" height="16" aria-hidden="true"> actions = soup.findAll( 'svg', attrs={'class': re.compile('^octicon octicon-diff-')}) # 按顺序存储每个文件的状态 actions_list = [] for action in actions: actions_list.append(action.get('title')) # file_name = li.findAll('a', attrs={'href': re.compile('^#diff')})[1].string for html in parent_branch_id_html: parent_branch_id = html.string parent_branch_url = html.get('href') parents.append(parent_branch_url.split('/')[-1]) parent_ids.append(parent_branch_id) parent_urls.append('https://github.com' + parent_branch_url) meta = Meta(author, date, committer, commit_hash, commit_log, children, parents, project_name, actions_list) # 查找改变的文件列表v3 # 拼接url # https://github.com/spring-projects/spring-framework/ # diffs?commit=3c1adf7f6af0dff9bda74f40dabe8cf428a62003 # &sha1=9d63f805b3b3ad07f102f6df779b852b2d1f306c # &sha2=3c1adf7f6af0dff9bda74f40dabe8cf428a62003&start_entry=0 # sha1是parent_commit_id # sha2是自己的commit_id # start_entry取0 # 传进来的url https://github.com/spring-projects/spring-framework/commit/3c1adf7f6af0dff9bda74f40dabe8cf428a62003 # 首先拼接成diff_url diff_url = url.split('/commit/')[0] diff_url = diff_url + '/diffs?commit=' + commit_hash + '&sha1=' + parents[ 0] + "&sha2=" + commit_hash + "&start_entry=0" print(diff_url) diffResponse, code = NetUtil.fetch_info(diff_url) file_list = [] file_list_htmls = diffResponse.findAll( name='a', attrs={'class': 'link-gray-dark'}) print(len(file_list_htmls)) for html in file_list_htmls: file_name = html.get("title") print(file_name) if filter_file(file_name): file_list.append( File.File(file_name, "", url, parent_urls, parent_ids, "")) return file_list, meta
# exit() dir('d:/test/t.py') DIH=DINT_HEX=INT_HEX={0:'00',1:'01',2:'02',3:'03',4:'04',5:'05',6:'06',7:'07',8:'08',9:'09',10:'0A',11:'0B',12:'0C',13:'0D',14:'0E',15:'0F',16:'10',17:'11',18:'12',19:'13',20:'14',21:'15',22:'16',23:'17',24:'18',25:'19',26:'1A',27:'1B',28:'1C',29:'1D',30:'1E',31:'1F',32:'20',33:'21',34:'22',35:'23',36:'24',37:'25',38:'26',39:'27',40:'28',41:'29',42:'2A',43:'2B',44:'2C',45:'2D',46:'2E',47:'2F',48:'30',49:'31',50:'32',51:'33',52:'34',53:'35',54:'36',55:'37',56:'38',57:'39',58:'3A',59:'3B',60:'3C',61:'3D',62:'3E',63:'3F',64:'40',65:'41',66:'42',67:'43',68:'44',69:'45',70:'46',71:'47',72:'48',73:'49',74:'4A',75:'4B',76:'4C',77:'4D',78:'4E',79:'4F',80:'50',81:'51',82:'52',83:'53',84:'54',85:'55',86:'56',87:'57',88:'58',89:'59',90:'5A',91:'5B',92:'5C',93:'5D',94:'5E',95:'5F',96:'60',97:'61',98:'62',99:'63',100:'64',101:'65',102:'66',103:'67',104:'68',105:'69',106:'6A',107:'6B',108:'6C',109:'6D',110:'6E',111:'6F',112:'70',113:'71',114:'72',115:'73',116:'74',117:'75',118:'76',119:'77',120:'78',121:'79',122:'7A',123:'7B',124:'7C',125:'7D',126:'7E',127:'7F',128:'80',129:'81',130:'82',131:'83',132:'84',133:'85',134:'86',135:'87',136:'88',137:'89',138:'8A',139:'8B',140:'8C',141:'8D',142:'8E',143:'8F',144:'90',145:'91',146:'92',147:'93',148:'94',149:'95',150:'96',151:'97',152:'98',153:'99',154:'9A',155:'9B',156:'9C',157:'9D',158:'9E',159:'9F',160:'A0',161:'A1',162:'A2',163:'A3',164:'A4',165:'A5',166:'A6',167:'A7',168:'A8',169:'A9',170:'AA',171:'AB',172:'AC',173:'AD',174:'AE',175:'AF',176:'B0',177:'B1',178:'B2',179:'B3',180:'B4',181:'B5',182:'B6',183:'B7',184:'B8',185:'B9',186:'BA',187:'BB',188:'BC',189:'BD',190:'BE',191:'BF',192:'C0',193:'C1',194:'C2',195:'C3',196:'C4',197:'C5',198:'C6',199:'C7',200:'C8',201:'C9',202:'CA',203:'CB',204:'CC',205:'CD',206:'CE',207:'CF',208:'D0',209:'D1',210:'D2',211:'D3',212:'D4',213:'D5',214:'D6',215:'D7',216:'D8',217:'D9',218:'DA',219:'DB',220:'DC',221:'DD',222:'DE',223:'DF',224:'E0',225:'E1',226:'E2',227:'E3',228:'E4',229:'E5',230:'E6',231:'E7',232:'E8',233:'E9',234:'EA',235:'EB',236:'EC',237:'ED',238:'EE',239:'EF',240:'F0',241:'F1',242:'F2',243:'F3',244:'F4',245:'F5',246:'F6',247:'F7',248:'F8',249:'F9',250:'FA',251:'FB',252:'FC',253:'FD',254:'FE',255:'FF'} DHI=DHEX_INT=HEX_INT={y:x for x,y in DIH.iteritems()} if __name__=='__main__': # import T,U l=csvAsList('process.csv') # U.shtml(l) # print getSourcePath() exit() r=hexToBytes(T.HEX.lower()) # U.write('h.r',r) print bytesToHex(r) s='''%3Ctextarea+stylew3equalsign%22width%3A100%25%3Bheight%3A100%25%22%3Epublic+class+Int%7B%0D%0A%0D%0A%09public+static+void+main%28String%5B%5D+ays%29%7B%0D%0A%09%09int+aw3equalsignays.lengthw3equalsignw3equalsign1%3FInteger.valueOf%28ays%5B0%5D%29%3A%0D%0A%09%09%2F**xjw++a**%2F++2%3B%0D%0A%09%09%0D%0A%09%09for%28int+iw3equalsign0%2Csw3equalsign0%2Ctw3equalsign0%3Bi%3Ca%3Bi%2B%2B%29%7B%0D%0A%09%09%09t%2Bw3equalsigna*new+Object%28%29%7B%0D%0A%09%09%09%09int+pow%28int+a%2Cint+n%29%7B%0D%0A%09%09%09%09%09if%28nw3equalsignw3equalsign0%29return+1%3B%0D%0A%09%09%09%09%09if%28nw3equalsignw3equalsign1%29return+a%3B%0D%0A%09%09%09%09%09return+pow%28a%2Cn-1%29*a%3B%0D%0A%09%09%09%09%7D%09%0D%0A%09%09%09%7D.pow%2810%2Ci%29%3B%0D%0A%09%09%09s%2Bw3equalsignt%3B%0D%0A%09%09%09if%28iw3equalsignw3equalsigna-1%29System.out.printf%28%22a+w3equalsign+%25s%5Cns+w3equalsign+%25s%5Cn%22%2Ca%2Cs%29%3B%09%09%0D%0A%09%09%7D%0D%0A%09%7D%0D%0A%0D%0A%7D%3C%2Ftextarea%3E%0D%0A''' import T,N,U su='http://www.w3school.com.cn/tiy/v.asp?code=' for i in T.HEX: for j in T.HEX: url=su+'%'+i+j s=N.http(url) if len(s)>0: U.pln(url,c=s) exit()