def on_post_save(self, view): filename=view.file_name() if not filename: return if not helper.checkFileExt(filename,"lua"): return # rebuild user definition curTime=time.time() if curTime-self.lastTime<2: return self.lastTime=curTime a=rebuild.rebuildSingle(filename,TEMP_PATH) arr=a[0] path=a[1] # remove prev global USER_DEFINITION_LIST for i in range(len(USER_DEFINITION_LIST)-1,0,-1): item=USER_DEFINITION_LIST[i] if item[2]==path: USER_DEFINITION_LIST.remove(item) USER_DEFINITION_LIST.extend(arr) path=os.path.join(TEMP_PATH, "user_definition.json") data=json.dumps(USER_DEFINITION_LIST) if not os.path.exists(TEMP_PATH): os.makedirs(TEMP_PATH) helper.writeFile(path,data) sublime.status_message("Current file definition rebuild complete!")
def run(self, dirs): settings = helper.loadSettings("QuickXDev") quick_cocos2dx_root = settings.get("quick_cocos2dx_root", "") if len(quick_cocos2dx_root)==0: sublime.error_message("quick_cocos2dx_root no set") return cmdPath="" if sublime.platform()=="osx": cmdPath=quick_cocos2dx_root+"/bin/compile_scripts.sh" elif sublime.platform()=="windows": cmdPath=quick_cocos2dx_root+"/bin/compile_scripts.bat" if not os.path.exists(cmdPath): helper.writeFile(cmdPath,compile_scripts_bat) if cmdPath=="" or not os.path.exists(cmdPath): sublime.error_message("compile_scripts no exists") return self.cmdPath=cmdPath self.compile_scripts_key=settings.get("compile_scripts_key", "") self.window.run_command("hide_panel") output="res/game.zip" on_done = functools.partial(self.on_done, dirs[0]) v = self.window.show_input_panel( "Output File:", output, on_done, None, None) v.sel().clear() v.sel().add(sublime.Region(4, 8))
def on_done(self, path, name): filePath = os.path.join(path, name) if os.path.exists(filePath): sublime.error_message("Unable to create file, file exists.") else: code = luaTemplate # add attribute settings = helper.loadSettings("quick-comminuty-dev") format = settings.get("date_format", "%Y-%m-%d %H:%M:%S") date = datetime.datetime.now().strftime(format) code = code.replace("${date}", date) author=settings.get("author", "Your Name") code = code.replace("${author}", author) _name=settings.get("_name", name) code = code.replace("${_name}", _name) _myclass = _name.split('.')[0] code = code.replace("${_class}", _myclass) # save helper.writeFile(filePath, code) v=sublime.active_window().open_file(filePath) # cursor v.run_command("insert_snippet",{"contents":code}) sublime.status_message("Lua file create success!")
def run(self, dirs): settings = helper.loadSettings("QuickXDev") quick_cocos2dx_root = settings.get("quick_cocos2dx_root", "") if len(quick_cocos2dx_root) == 0: sublime.error_message("quick_cocos2dx_root no set") return cmdPath = "" if sublime.platform() == "osx": cmdPath = quick_cocos2dx_root + "/bin/compile_scripts.sh" elif sublime.platform() == "windows": cmdPath = quick_cocos2dx_root + "/bin/compile_scripts.bat" if not os.path.exists(cmdPath): helper.writeFile(cmdPath, compile_scripts_bat) if cmdPath == "" or not os.path.exists(cmdPath): sublime.error_message("compile_scripts no exists") return self.cmdPath = cmdPath self.compile_scripts_key = settings.get("compile_scripts_key", "") self.window.run_command("hide_panel") output = "res/game.zip" on_done = functools.partial(self.on_done, dirs[0]) v = self.window.show_input_panel("Output File:", output, on_done, None, None) v.sel().clear() v.sel().add(sublime.Region(4, 8))
def _saveSettings(self): self.main.setHessian(self.hessian_scale.get()) self.main.setMinMatch(self.min_match_scale.get()) self.main.setDistance(self.good_dist_scale.get()) self.main.setObjects(list(self.objects)) setListboxItems(self.main.getObjects(), self.main.select_li) h.writeFile('objects.csv', self.objects) #print self.main.getDistance() #print self.main.getMinMatch() #print self.main.getHessian() self.master.destroy()
def run(self, dirs): curTime=time.time() if curTime-self.lastTime<3: sublime.status_message("Rebuild frequently!") return self.lastTime=curTime global USER_DEFINITION_LIST USER_DEFINITION_LIST=rebuild.rebuild(dirs[0],TEMP_PATH) path=os.path.join(TEMP_PATH, "user_definition.json") data=json.dumps(USER_DEFINITION_LIST) if not os.path.exists(TEMP_PATH): os.makedirs(TEMP_PATH) helper.writeFile(path,data) sublime.status_message("Rebuild user definition complete!")
def on_done(self, path, name): filePath = os.path.join(path, name) if os.path.exists(filePath): sublime.error_message("Unable to create file, file exists.") else: code = template.buildTemplate settings = helper.loadSettings("lufylegendDev") compiler_path=settings.get("google_closure_compiler_path", "") if len(compiler_path)==0: compiler_path="<path>/compiler-latest/compiler.jar" code = code.replace("${compiler}", compiler_path) # save helper.writeFile(filePath, code) sublime.active_window().open_file(filePath) sublime.status_message(name+" create success!")
def create_jsc_folder(self): jsc_folder = self.target_directory + '/' + 'jsc' helper.create_directory(jsc_folder) helper.create_directory(jsc_folder + '/cocos2d_js') helper.copytree(sublime.packages_path() + "/Cocos2dJSDev/cocos2d_js_lib/jsc/cocos2d_js", jsc_folder + '/cocos2d_js') # create complier_config.json complier_config_file = sublime.packages_path() + "/Cocos2dJSDev/cocos2d_js_lib/jsc/compiler_config.json" complier_config_content = helper.readFile(complier_config_file) helper.writeFile(jsc_folder + "/complier_config.json", complier_config_content) # create generate_jsc.py generate_jsc_file = sublime.packages_path() + "/Cocos2dJSDev/cocos2d_js_lib/jsc/generate_jsc.py" generate_jsc_content = helper.readFile(generate_jsc_file) generate_jsc_content = generate_jsc_content.replace("%BINDING_JS_FOLDER%", sublime.packages_path() + "/Cocos2dJSDev/cocos2d_js_lib/jsc/bindings") generate_jsc_content = generate_jsc_content.replace("%BIN_EXE_PATH%", sublime.packages_path() + "/Cocos2dJSDev/cocos2d_js_lib/jsc/bin/jsbcc") helper.writeFile(jsc_folder + "/generate_jsc.py", generate_jsc_content)
def fetchGallery(url, title, cartoonPage, page=1, urlArr=None): print('%s => now cartoonPage => %d' % (helper.now(), cartoonPage)) # print('now cartoon => %s' % title) if not urlArr: urlArr = [] pq = helper.get('%s/%d' % (url, page)) if not pq: return False for img in pq('p>img'): src = img.get('src') if src in urlArr: dirName = os.path.join('cartoon', title) helper.writeFile('\n'.join(urlArr), u'%s/url.txt' % dirName) return True urlArr.append(src) return fetchGallery(url, title, cartoonPage, page + 1, urlArr)
def create_ant_folder(self): ant_folder = self.target_directory + '/' + 'ant' helper.create_directory(ant_folder) # create build.xml build_file = sublime.packages_path() + "/Cocos2dJSDev/cocos2d_js_lib/ant/build.xml" build_content = helper.readFile(build_file) build_content = build_content.replace("%MODE%", helper.loadSettings("Cocos2dJSDev").get("ant").get("mode")) build_content = build_content.replace("%DEBUG%", helper.loadSettings("Cocos2dJSDev").get("ant").get("debug")) build_content = build_content.replace("%OUTPUT.js%", helper.loadSettings("Cocos2dJSDev").get("ant").get("output")) build_content = build_content.replace("%COMPLIER_JAR_PATH%", sublime.packages_path() + "/Cocos2dJSDev/cocos2d_js_lib/ant") build_content = build_content.replace("%COCOS2D_ROOT_PATH%", helper.loadSettings("Cocos2dJSDev").get("cocos2d_html5_root")) helper.writeFile(ant_folder + "/build.xml", build_content) # create cocos2d_extern.js cocos2d_externs_file = sublime.packages_path() + "/Cocos2dJSDev/cocos2d_js_lib/ant/cocos2d_externs.js" cocos2d_externs_content = helper.readFile(cocos2d_externs_file) helper.writeFile(ant_folder + "/cocos2d_externs.js", cocos2d_externs_content)
def input_filename_done(self, path, name): filePath = os.path.join(path, name) print(filePath) if os.path.exists(filePath): sublime.error_message("Unable to create file, file exists.") else: # save file_name = os.path.basename(os.path.splitext(name)[0]) template_content = helper.readFile(self.selected_template) template_content = template_content.replace("Template", file_name[0].upper() + file_name[1:]) template_content = template_content.replace("template", file_name[0].lower() + file_name[1:]) helper.writeFile(filePath, template_content) v = sublime.active_window().open_file(filePath) # done sublime.status_message("js file create done!") sublime.message_dialog("js file create done!")
def parseJs(filePath): # remove all file md5filename=helper.md5(filePath) saveDir=os.path.join(SAVE_DIR,md5filename) deleteFiles(saveDir,saveDir) # create dir if not os.path.exists(saveDir): os.makedirs(saveDir) # add filepath to filepath.txt for debug filepath=os.path.join(saveDir,"filepath.txt") helper.writeFile(filepath,filePath) f=codecs.open(filePath,"r","utf-8") lineNum=0 while True: line=f.readline() if line: lineNum+=1 # function m=re.match("^\s*function\s*(\w+)\s*\((.*)\)",line) m2=re.match("^\s*(\w+)\:\s*function\s*\((.*)\)",line) m3=re.match("^\s*(\w+\.\w+)\s*=\s*function\s*\((.*)\)",line) if m2: m=m2 elif m3: m=m3 if m: handleFunction(saveDir,"",m.group(1),m.group(2)) handleDefinition(m.group(1),filePath,lineNum) continue m=re.match("^\s*(\w+)\.prototype\.(\w+)\s*=\s*function\s*\((.*)\)",line) if m: handleFunction(saveDir,m.group(1),m.group(2),m.group(3)) handleDefinition(m.group(2),filePath,lineNum) continue # vars m=re.match("^\s*var\s+(\w+)",line) m2=re.match("^\s*(\w+\.\w+)\s*=",line) if m2: m=m2 if m: handleVar(saveDir,m.group(1)) handleDefinition(m.group(1),filePath,lineNum) continue else: break f.close()
def on_done(self, path, name): filePath = os.path.join(path, name) if os.path.exists(filePath): sublime.error_message("Unable to create file, file exists.") else: code = jsTemplate # add attribute format = "%Y-%m-%d %H:%M:%S" date = datetime.datetime.now().strftime(format) code = code.replace("${date}", date) author = getpass.getuser() code = code.replace("${author}", author) # save helper.writeFile(filePath, code) v = sublime.active_window().open_file(filePath) # cursor v.run_command("insert_snippet", {"contents": code}) sublime.status_message("Js file create success!")
def on_done(self, path, name): filePath = os.path.join(path, name) if os.path.exists(filePath): sublime.error_message("Unable to create file, file exists.") else: code = luaTemplate # add attribute settings = helper.loadSettings("QuickXDev") format = settings.get("date_format", "%Y-%m-%d %H:%M:%S") date = datetime.datetime.now().strftime(format) code = code.replace("${date}", date) author=settings.get("author", "Your Name") code = code.replace("${author}", author) # save helper.writeFile(filePath, code) v=sublime.active_window().open_file(filePath) # cursor v.run_command("insert_snippet",{"contents":code}) sublime.status_message("Lua file create success!")
def fetchGallery(url): pq = helper.get(url) # SexArt – Alexis Crystal & Michael Fly – Call | AdultPhotoSets.ru title = pq('title').text() title = title.split(' | ')[0] dirName = os.path.join('imgs', '0error', title) i = 0 tag = None imgUrl = [] aArr = pq('a.externalLink') if not aArr or len(aArr) < 1: aArr = pq('div.content>p>a') if not aArr or len(aArr) < 1: # http://imgtrex.com/8kbfdzphqsr1/daniela-dressed-for-sex-02-10000px arr = re.compile( r'http://imgtrex\.com/\w+/[a-z0-9-]+\.jpg').findall(pq.html()) if len(arr) == 0: print('can\'t find any <a>') return None aArr = [{'href': a} for a in arr] # for a in arr: # aArr.append({'href': a}) if aArr and len(aArr) > 0: if 'imgchili.net' in aArr[0].get('href'): imgArr = pq('div.content>p>a>img') # http://t10.imgchili tag = imgArr[0].get('src').replace( 'http://', '').split('.imgchili')[0].replace('t', '') for a in aArr: print('%s image index => %d' % (helper.now(), i)) url = fetchLargeImageUrl(a.get('href'), tag) if url == None: print('fetchLargeImageUrl failed') return None else: if url != '': imgUrl.append(url) i += 1 if len(imgUrl) > 0: helper.writeFile('\n'.join(imgUrl), '%s/url.txt' % dirName) return title
def parseJs(file): # remove all file md5filename = helper.md5(file) saveDir = os.path.join(SAVE_DIR, md5filename) deleteFiles(saveDir, saveDir) # create dir if not os.path.exists(saveDir): os.makedirs(saveDir) filepath = os.path.join(saveDir, "filepath.txt") helper.writeFile(filepath, file) end_index = file.rfind("/") className = file[end_index + 1:-3] completionsList = [] with open(file, "r", encoding='utf-8') as f: for line in f: #sample: onLoad: function () { m = re.match(' +(\w+): *function *\((.*?)\).*', line) if m: saveFunction(saveDir, className, m.group(1), m.group(2)) continue #sample: ComFun.dump = function(arr, maxLevel) { m = re.match(' *([a-zA-Z0-9\.]*) = function *\((.*?)\).*', line) if m: saveFunction(saveDir, className, m.group(1), m.group(2)) continue #sample var Constant = {} m = re.match('^var (.*) *=.*', line) if m: completionsList.append(m.group(1).strip()) continue #sample Constant.Enums = something m = re.match('^([\w\.]*) *=.*', line) if m: completionsList.append(m.group(1).strip()) if "module.exports" in completionsList: completionsList.remove("module.exports") saveCompletions(completionsList, saveDir, "c")
def fetchComic(webPage, comicIndex, url, page = 1, comicDir = None): pq = helper.get('%s/%d' % (url, page)) if page == 1: title = pq('title').text().replace('/', '&').split(' - ')[0] comicDir = os.path.join('animezilla', title) if os.path.exists(os.path.join('animezilla', '0uploaded', title, 'done.txt')): return True if os.path.exists(os.path.join(comicDir, 'done.txt')): return True helper.mkDir(comicDir) if webPage == 1 and comicIndex == 16: if page < 90: return fetchComic(webPage, comicIndex, url, 90, comicDir) img = pq('img#comic') print('[%s] downloading webPage page => %d, comic index => %d, comic page => %d' % (helper.now(), webPage, comicIndex, page)) downloadImg(img.attr('src'), os.path.join(comicDir, '%03d.jpg' % page), url) time.sleep(3) if(len(img.parents('a')) == 0): helper.writeFile('done', os.path.join(comicDir, 'done.txt')) return True return fetchComic(webPage, comicIndex, url, page + 1, comicDir)
def on_done(self, path, name): filePath = os.path.join(path, name) if os.path.exists(filePath): sublime.error_message("Unable to create file, file exists.") else: # load template file tmplPath = os.path.join(CUR_PATH,LIB_PATH, "lua.tmpl") code = helper.readFile(tmplPath) # add attribute settings = helper.loadSettings("quickx") format = settings.get("date_format", "%Y-%m-%d %H:%M:%S") date = datetime.datetime.now().strftime(format) code = code.replace("${date}", date) attr = settings.get("template_attr", {}) for key in attr: code = code.replace("${%s}" % (key), attr.get(key, "")) # save helper.writeFile(filePath, code) v=sublime.active_window().open_file(filePath) # cursor v.run_command("insert_snippet",{"contents":code}) sublime.status_message("Lua file create success!")
def parseLua(file): # remove all file md5filename=helper.md5(file) saveDir=os.path.join(SAVE_DIR,md5filename) deleteFiles(saveDir,saveDir) # create dir if not os.path.exists(saveDir): os.makedirs(saveDir) # add filepath to filepath.txt for debug filepath=os.path.join(saveDir,"filepath.txt") helper.writeFile(filepath,file) completionsList=[] f=codecs.open(file,"r","utf-8") lineNum=0 while True: line=f.readline() if line: lineNum+=1 # class m=re.match("^local\s+(\w+)\s*=\s*\{\}",line) if m: completionsList.append(m.group(1)) handleDefinition(m.group(1),None,file,lineNum) continue m=re.match("^local\s+(\w+)\s*=\s*class\(",line) if m: completionsList.append(m.group(1)) handleDefinition(m.group(1),None,file,lineNum) continue m=re.match("^(\w+)\s*=\s*class\(",line) if m: completionsList.append(m.group(1)) handleDefinition(m.group(1),None,file,lineNum) continue # function m=re.match("^function\s+(\w+\.*\w*)\s*\((.*)\)",line) if m: saveFunction(saveDir,"",m.group(1),m.group(2)) handleDefinition(m.group(1),m.group(2),file,lineNum) continue # class function m=re.match("^function\s+(\w+)\:(\w+)\s*\((.*)\)",line) if m: method=m.group(2) if method=="ctor": continue saveFunction(saveDir,m.group(1),method,m.group(3)) handleDefinition(m.group(2),m.group(3),file,lineNum,m.group(1)+":"+m.group(2)) continue # local property m=re.match("^\s*local\s+(\w+)\s*",line) if m: completionsList.append(m.group(1)) continue m=re.match("^\s*(self\.\w+)\s*=",line) if m: completionsList.append(m.group(1)) continue # global property m=re.match("^(\w+\.?\w*)\s*=",line) if m: completionsList.append(m.group(1)) handleDefinition(m.group(1),None,file,lineNum) continue else: break f.close() saveCompletions(completionsList,saveDir,"c")
def runWithPlayer(srcDir): global process global PROJECT_ROOT arr=os.path.split(srcDir) workdir=arr[0] PROJECT_ROOT=workdir+"/frameworks/cocos2d-x" # root quick_cocos2dx_root = checkQuickxRoot() if not quick_cocos2dx_root: return # player path for platform playerPath=checkPlayerPath(workdir) if not playerPath: return # if sublime.platform()=="osx": # playerPath=arr[0]+"/runtime/mac/test3D-desktop.app/Contents/MacOS/test3D-desktop" # elif sublime.platform()=="windows": # playerPath=arr[0]+"/simulator/win32/test.exe" # if playerPath=="" or not os.path.exists(playerPath): # sublime.error_message("player no exists") # return args=[playerPath] # param srcDirName=arr[1] args.append("-workdir") args.append(workdir) args.append("-file") args.append(srcDirName+"/main.lua") configPath=srcDir+"/config.lua" if os.path.exists(configPath): f=codecs.open(configPath,"r","utf-8") width="640" height="1136" while True: line=f.readline() if line: # debug m=re.match("^DEBUG\s*=\s*(\d+)",line) if m: debug=m.group(1) if debug=="0": args.append("-disable-write-debug-log") args.append("-disable-console") elif debug=="1": args.append("-disable-write-debug-log") args.append("-console") else: args.append("-write-debug-log") args.append("-console") # resolution m=re.match("^CONFIG_SCREEN_WIDTH\s*=\s*(\d+)",line) if m: width=m.group(1) m=re.match("^CONFIG_SCREEN_HEIGHT\s*=\s*(\d+)",line) if m: height=m.group(1) else: break code="return "+ datetime.datetime.now().strftime("%Y%m%d%H%M%S") configPath2=srcDir+"/BuildVersion.lua" helper.writeFile(configPath2, code) f.close() args.append("-size") args.append(width+"x"+height) args.append("-scale") args.append("0.5") if process: try: process.terminate() except Exception: pass if sublime.platform()=="osx": process=subprocess.Popen(args) elif sublime.platform()=="windows": process=subprocess.Popen(args)
def oneOffFromSitemap(url_to_sitemap, check_limit, date_limit, naming, test_regime): """Initially only checks a site against Google Pagespeed API """ urls = helper.fetchUrlsFromSitemap(url_to_sitemap, date_limit) i = 1 output_file = '' for url in urls: mess_to_console = '{0}. {1}'.format(i, url[1]) if i > check_limit: break try: if test_regime == 'googlePageSpeed': check_page = check_lighthouse(url[1]) if bool(check_page): print('{0} has been checked against Google Pagespeed API'. format(mess_to_console)) for key in check_page: output_file = output_file + '{0},{1},{2}\n'.format( url[1], key, check_page[key]) i = i + 1 elif test_regime == 'httpStatusCodeCheck': status_code = test.httpStatusCodeCheck(url[1], False) print('{0}. {01} has a status code: {2}'.format( i, mess_to_console, status_code)) output_file += '{0}, {1}\n'.format(url[1].replace('\n', ''), status_code) i = i + 1 elif test_regime == 'mobileFriendlyCheck': status_message = test.mobileFriendlyCheck( url[1], privatekeys.googleMobileFriendlyApiKey) print( "{0}. Mobile-friendliness of URL '{1}' were evaluated as: {2}" .format(i, url[1], status_message)) output_file += '{0}, {1}\n'.format(url[1].replace('\n', ''), status_message) i = i + 1 elif test_regime == 'thirdPartiesCheck': status_message = test.thirdPartiesCheck(url[1]) print("{0}. Third parties of URL '{1}' were evaluated as: {2}". format(i, url[1], status_message)) output_file += '{0}, {1}\n'.format(url[1].replace('\n', ''), status_message) i = i + 1 elif test_regime == 'contentCheck': print("{0}. Checking content of URL '{1}'.".format(i, url[1])) for key, value in content_check(url[1]).items(): output_file = output_file + '{0},{1},{2}\n'.format( url[1], key, value) i = i + 1 except: print('Error! The request for URL "{0}" failed.\nMessage:\n{2}'. format(url[1], sys.exc_info())) pass i = i + 1 # Writing the report file_name = 'rapporter/{0}_{1}_{2}.csv'.format( str(datetime.today())[:10], naming, helper.getUniqueId()) helper.writeFile(file_name, output_file) print('Report written to disk at {0}'.format(file_name))
def oneOffProcess(file, test_regime='httpStatusCodeCheck'): """ Inspects a textfile, assuming there's URLs in there, one URL per line. attributes: file path to open """ f = open(file, 'r') urlsInTextfile = [] iteration_counter = 1 keep_on = True time_to_sleep_in_seconds = 90 # TODO: reda ut varför Mobile Friendly inte orkar testa flera på raken, begränsning? output_file = "" i = 1 while keep_on: url = f.readline().replace('\n', '') mess_to_console = '{0}. {1}'.format(iteration_counter, url) if len(url) < 7: # break if line is shorter than seven characters keep_on = False elif not url.endswith('.pdf'): # depending on which test regime is chosen if test_regime == 'httpStatusCodeCheck': status_code = test.httpStatusCodeCheck(url, False) print('{0} has a status code: {1}'.format( mess_to_console, status_code).replace('\n', '')) output_file += '{0}, {1}\n'.format(url.replace('\n', ''), status_code) elif test_regime == 'sitemapCheck': """ Check the status code of domain.tld/sitemap.xml, assuming URL to only be the domain, not an URI """ if url[-1:] is '/': url = url[:-1] url = '{0}/{1}'.format(url, 'sitemap.xml') status_code = test.httpStatusCodeCheck(url, False) print('{0} has a status code: {1}'.format( mess_to_console, status_code).replace('\n', '')) is_sitemap = "undefined" if str(status_code)[:1] is "2" or str( status_code )[:1] is "3": # checking if status code is either 200 series or 300 is_sitemap = helper.is_sitemap( helper.httpRequestGetContent(url)) print('Is sitemap: {0}'.format(is_sitemap)) output_file += '{0}, {1}, {2}\n'.format( url.replace('\n', ''), status_code, is_sitemap) elif test_regime == 'urlHarvest': """ Fetches URLs from a page's content """ i = 0 print('Harvesting URLs from {0}'.format(url)) try: for found_url in helper.fetchUrlsFromPage(url, 50): output_file += '{0}\n'.format(found_url) i += 1 except: print('Error! The URL {0} failed.'.format(url)) pass #print('Found {0} URLs from {1}'.format(i,url)) elif test_regime == 'googlePageSpeed': check_page = check_lighthouse(url) if bool(check_page): print('{0} has been checked against Google Pagespeed API'. format(mess_to_console)) for key in check_page: output_file = output_file + '{0},{1},{2}\n'.format( url, key, check_page[key]) elif test_regime == 'mobileFriendlyCheck': print(url) status_message = test.mobileFriendlyCheck( url, privatekeys.googleMobileFriendlyApiKey) print( "Mobile-friendliness of URL '{0}' were evaluated as: {1}". format(url, status_message)) output_file += '{0}, {1}\n'.format(url.replace('\n', ''), status_message) sleep(time_to_sleep_in_seconds) # sleeping for n seconds elif test_regime == 'contentCheck': print("{0}. Checking content of URL '{1}'.".format(i, url)) for key, value in content_check(url).items(): output_file = output_file + '{0},{1},{2}\n'.format( url, key, value) i = i + 1 elif test_regime == 'findString': searching = find_string('piwik', url) print("{0}. Checking for string in URL '{1}' - {2}".format( i, url, searching)) output_file = output_file + '{0},{1}\n'.format(url, searching) i = i + 1 # sleep(time_to_sleep_in_seconds) # sleeping for n seconds urlsInTextfile.append(url) iteration_counter += 1 f.close() ### Writing the report file_name = 'rapporter/{0}_{1}_{2}.csv'.format( str(datetime.today())[:10], test_regime, helper.getUniqueId()) helper.writeFile(file_name, output_file) print('The report has now been written to a file named: {0}'.format( file_name))
def fetchGallery(url, page): print('now page %d' % page) pq = helper.get(url) # SexArt – Alexis Crystal & Michael Fly – Call | AdultPhotoSets.ru title = pq('title').text() title = title.split(' | ')[0] dirName = os.path.join('imgs', '0uploaded', title) dirName = filterDirName(dirName) if os.path.exists(dirName): print('exists!!! skip!') return True dirName = os.path.join('imgs', '0uploaded', '0baidu', title) dirName = filterDirName(dirName) if os.path.exists(dirName): print('exists!!! skip!') return True dirName = os.path.join('imgs', '0uploaded', 'MetArt', title) dirName = filterDirName(dirName) if os.path.exists(dirName): print('exists!!! skip!') return True dirName = os.path.join('imgs', '0nas', 'MetArt', title) dirName = filterDirName(dirName) if os.path.exists(dirName): print('exists!!! skip!') return True dirName = os.path.join('imgs', '0nas', 'MetArtX', title) dirName = filterDirName(dirName) if os.path.exists(dirName): print('exists!!! skip!') return True dirName = os.path.join('imgs', '0error', title) dirName = filterDirName(dirName) if os.path.exists(dirName): print('exists!!! skip!') return True # 创建本地目录 dirName = os.path.join('imgs', title) dirName = filterDirName(dirName) # print('make dir %s' % dirName) # 如果存在url.txt,说明这个相册已经抓取过了,直接return吧 if os.path.exists('%s/url.txt' % dirName): print('exists!!! skip!') return True helper.mkDir(dirName) i = 0 tag = None imgUrl = [] aArr = pq('a.externalLink') if not aArr or len(aArr) < 1: aArr = pq('div.content>p>a') if not aArr or len(aArr) < 1: aArr = pq('div.content>a') if not aArr or len(aArr) < 1: # http://imgtrex.com/8kbfdzphqsr1/daniela-dressed-for-sex-02-10000px arr = re.compile(r'http://imgtrex\.com/\w+/[a-z0-9-]+\.jpg').findall(pq.html()) if len(arr) == 0: print('can\'t find any <a>') if url == 'http://adultphotosets.ru/met-art-lupita-gifera/': return True if url == 'http://adultphotosets.ru/rylskyart-oretha-mars-second-2-mars/': return True if url == 'http://adultphotosets.ru/met-art-nikolina-deirth/': return True return False aArr = [{'href': a} for a in arr] # for a in arr: # aArr.append({'href': a}) if aArr and len(aArr) > 0: if 'imgchili.net' in aArr[0].get('href'): imgArr = pq('div.content>p>a>img') if not imgArr or len(imgArr) < 1: imgArr = pq('div.content>a>img') # http://t10.imgchili if imgArr and len(imgArr) > 0: tag = imgArr[0].get('src').replace('http://', '').split('.imgchili')[0].replace('t', '') for a in aArr: print('%s image index => %d' % (helper.now(), i)) url = fetchLargeImageUrl(a.get('href'), tag) if url == None: if i == 0: print('fetchLargeImageUrl failed') return True else: if url != '': imgUrl.append(url) i += 1 if len(imgUrl) > 0: helper.writeFile('\n'.join(imgUrl), '%s/url.txt' % dirName) return True
def parseLua(file): # remove all file md5filename = helper.md5(file) saveDir = os.path.join(SAVE_DIR, md5filename) deleteFiles(saveDir, saveDir) # create dir if not os.path.exists(saveDir): os.makedirs(saveDir) # add filepath to filepath.txt for debug filepath = os.path.join(saveDir, "filepath.txt") helper.writeFile(filepath, file) completionsList = [] f = codecs.open(file, "r", "utf-8") lineNum = 0 while True: line = f.readline() if line: lineNum += 1 # class m = re.match("^local\s+(\w+)\s*=\s*\{\}", line) if m: completionsList.append(m.group(1)) handleDefinition(m.group(1), None, file, lineNum) continue m = re.match("^local\s+(\w+)\s*=\s*class\(", line) if m: completionsList.append(m.group(1)) handleDefinition(m.group(1), None, file, lineNum) continue m = re.match("^(\w+)\s*=\s*class\(", line) if m: completionsList.append(m.group(1)) handleDefinition(m.group(1), None, file, lineNum) continue # function m = re.match("^function\s+(\w+\.*\w*)\s*\((.*)\)", line) if m: saveFunction(saveDir, "", m.group(1), m.group(2)) handleDefinition(m.group(1), m.group(2), file, lineNum) continue # class function m = re.match("^function\s+(\w+)\:(\w+)\s*\((.*)\)", line) if m: method = m.group(2) if method == "ctor": continue saveFunction(saveDir, m.group(1), method, m.group(3)) handleDefinition(m.group(2), m.group(3), file, lineNum, m.group(1) + ":" + m.group(2)) continue # local property m = re.match("^\s*local\s+(\w+)\s*", line) if m: completionsList.append(m.group(1)) continue m = re.match("^\s*(self\.\w+)\s*=", line) if m: completionsList.append(m.group(1)) continue # global property m = re.match("^(\w+\.?\w*)\s*=", line) if m: completionsList.append(m.group(1)) handleDefinition(m.group(1), None, file, lineNum) continue else: break f.close() saveCompletions(completionsList, saveDir, "c")
def content_check(check_url, strategy='mobile'): """ Checks the Pagespeed Insights with Google In addition to the 'mobile' strategy there is also 'desktop' aimed at the desktop user's preferences Returns a dictionary of the results. attributes: check_url, strategy """ check_url = check_url.strip() return_dict = {} try: get_content = helper.httpRequestGetContent(check_url) soup = BeautifulSoup(get_content, "html.parser") # soup = soup.encode("ascii") pagetitle = soup.title.string return_dict['pagetitle'] = '"{0}"'.format(pagetitle) pagetitle_length = len(pagetitle) return_dict['pagetitle_length'] = pagetitle_length num_links = len(soup.find_all('a')) return_dict['num_links'] = num_links # checking images num_images = len(soup.find_all('img')) return_dict['num_images'] = num_images images = soup.find_all('img') i = 0 for image in images: if image.get('alt') is not None: i = i + 1 # print(image.get('alt')) # for debugging num_images_without_alt = num_images - i return_dict['num_images_without_alt'] = num_images_without_alt try: meta_desc = soup.findAll( attrs={"name": "description"})[0]['content'] return_dict['meta_desc'] = '"{0}"'.format(meta_desc) meta_desc_length = len(meta_desc) return_dict['meta_desc_length'] = meta_desc_length except IndexError: return_dict['meta_desc'] = '' return_dict['meta_desc_length'] = 0 pass except: print('Meta desc check for URL \'{0}\' failed, reason: {1}'.format( check_url, sys.exc_info()[0])) # checking readability [ s.extract() for s in soup(['style', 'script', '[document]', 'head', 'title']) ] if 1 is 1: # if you want get readability for the whole page then the statement above should read "if 1 is 1:", otherwise "if 1 is 2:" to enter else below visible_text = soup.getText() else: # attribute "main" might in your code be a "div", "pagecontent" is the class where you want to get the content from. CHANGE IT to what ever you are using. visible_text = soup.find("main", class_="main-wrapper").getText() visible_text = "?\n".join(visible_text.split("?")) visible_text = "!\n".join(visible_text.split("!")) visible_text = ".\n".join(visible_text.split(".")) file_name = 'tmp/{0}_{1}_{2}.txt'.format( str(datetime.today())[:10], 'contentCheck', helper.getUniqueId()) helper.writeFile(file_name, visible_text) # readability = os.system('readability {0}'.format(file_name)) readability = subprocess.check_output(['readability', file_name]) readability = readability.decode("utf-8") helper.delete_file( file_name ) # uncomment if you'd like to see the text files that are used # helper.writeFile('tmp/readability-output.txt', readability) # uncomment if you'd like to see the readability output for line in readability.split('\n'): # first_entry = line.split(':')[0].strip() try: return_dict[line.split(':')[0].strip()] = line.split( ':')[1].strip() except: pass # print(meta_desc) except: # breaking and hoping for more luck with the next URL print( 'Error! Unfortunately the request for URL "{0}" failed, message:\n{1}' .format(check_url, sys.exc_info()[0])) pass return return_dict
'Weight': modelData.get('weight'), 'Measurements': '%s/%s/%s' % (modelData.get('chest', '0'), modelData.get( 'waist', '0'), modelData.get('hip', '0')), 'Ethnicity': modelData.get('ethnicity') } modelArr.append(model) # 下载头像先 helper.downloadImg( model.get('img'), os.path.join(model_dir, '%s.jpg' % model.get('name'))) helper.writeFile( json.dumps(model), os.path.join(model_dir, '%s.json' % model.get('name'))) page += 1 if page > total_pages: break photo_dir = '/Users/eddie104/Documents/hongjie/photosets/femjoy/photo' helper.mkDir(photo_dir) for model in modelArr: page = 1 while True: url = 'https://www.femjoy.com/api/v2/actors/%s/galleries?thumb_size=481x642&limit=20&page=%d' % ( model.get('slug'), page) txt = helper.get(url, returnText=True) jsonData = json.loads(txt) total_pages = jsonData.get('pagination').get('total_pages')
def run(self): ''' 解析网站源码 ''' time.sleep(3.6) global platform global error_detail_url try: slug = self.url.replace('https://www.goat.com/sneakers/', '') html = helper.get(self.url, returnText=True, platform=platform) if html: json_data = re.compile(r'window.__context__.*') json_data = json_data.findall(html)[0] json_data = json_data.replace('window.__context__ = ', '') json_data = json_data.replace('</script>', '') json_data = json.loads(json_data) json_data = json_data.get('default_store') json_data = json_data.get('product-templates') product_json = json_data.get('slug_map').get(slug) name = product_json.get('name') number = product_json.get('sku') color_value = product_json.get('details') color_name = name.split('\'')[1] if '\'' in name else '' size_list = product_json.get( 'formatted_available_sizes_new_v2') size_price_list = [{ 'size': float(data.get('size')), 'price': float(data.get('price_cents') / 100), 'isInStock': True } for data in size_list] # print({ # 'name': name, # 'number': number, # 'color_value': color_value, # 'color_name': color_name, # 'size_price_list': size_price_list, # }) img_downloaded = mongo.is_pending_goods_img_downloaded( self.url) if not img_downloaded: img_url = product_json.get('original_picture_url') result = helper.downloadImg( img_url, os.path.join('.', 'imgs', platform, '%s.jpg' % number)) if result == 1: # 上传到七牛 qiniuUploader.upload_2_qiniu( platform, '%s.jpg' % number, './imgs/%s/%s.jpg' % (platform, number)) img_downloaded = True mongo.insert_pending_goods( name, number, self.url, size_price_list, ['%s.jpg' % number], self.gender, color_value, platform, '5bbf4561c7e854cab45218ba', self.crawl_counter, color_name, img_downloaded) fetched_url_list.append(self.url) helper.writeFile(json.dumps(fetched_url_list), './logs/goat-%s.json' % helper.today()) else: error_counter = error_detail_url.get(self.url, 1) error_detail_url[self.url] = error_counter + 1 helper.log( '[ERROR] error timer = %s, url = %s' % (error_counter, self.url), platform) if error_counter < 3: self.q.put(self.url) except Exception as e: error_counter = error_detail_url.get(self.url, 1) error_detail_url[self.url] = error_counter + 1 helper.log( '[ERROR] error timer = %s, url = %s' % (error_counter, self.url), platform) helper.log(e, platform) if error_counter < 3: self.q.put(self.url) finally: helper.log('[INFO] %s is done' % self.url, platform)
'thompson-sampling-with-hint' ] args = parseArguements(algorithms) # Get the true means from the file means_true = helper.readFile(args.instance) # Call to the appropriate function regret = None if args.algorithm == 'epsilon-greedy': regret = epsilonGreedy(args.randomSeed, args.horizon, means_true, args.epsilon, args.verbose) elif args.algorithm == 'ucb': regret = ucb(args.randomSeed, args.horizon, means_true, args.verbose) elif args.algorithm == 'kl-ucb': regret = ucbKL(args.randomSeed, args.horizon, means_true, args.verbose) elif args.algorithm == 'thompson-sampling': regret = thompsonSampling(args.randomSeed, args.horizon, means_true, args.verbose) elif args.algorithm == 'thompson-sampling-with-hint': regret = thompsonSamplingWithHint(args.randomSeed, args.horizon, means_true, args.verbose) else: regret = float('inf') if regret is not None: # Print output to console and write to file result = f'{args.instance}, {args.algorithm}, {args.randomSeed}, {args.epsilon}, {args.horizon}, {regret}' print(result) helper.writeFile(f'{args.output}.txt', result)
def fetch_model(url, name, head_img): '''fetch model''' model_dir = os.path.join('vivthomas', 'model') helper.mkDir(model_dir) helper.mkDir(os.path.join('vivthomas', 'photo')) # 下载头像先 helper.downloadImg(head_img, os.path.join( model_dir, '%s.jpg' % name)) if os.path.exists(os.path.join('vivthomas', 'model', '%s.json' % (name))): return # 然后去抓取详细数据 model_info = { 'name': name, 'photos': [] } pyquery = helper.get(url) country_span = pyquery('.custom-country') model_info['country'] = country_span.text() if country_span else 'unknow' # 获取照片数据 custom_content_list = pyquery('.custom-content-list') custom_content = None for item in custom_content_list: if item.getchildren()[0].getchildren()[0].text.startswith('Photos with'): custom_content = item break # if item.getchildren()[0].getchildren()[0].text: # pass if custom_content is None: helper.writeFile(json.dumps(model_info), os.path.join( 'vivthomas', 'model', '%s.json' % (name))) return # if len(custom_content_list) == 3: # custom_content = custom_content_list[1] # else: # custom_content = custom_content_list[0] list_group_item_list = custom_content.getchildren()[2].findall('li') for list_group_item in list_group_item_list: custom_list_item_detailed = list_group_item.getchildren()[1] img = custom_list_item_detailed.getchildren()[0].getchildren()[ 0].getchildren()[0] # custom_list_item_detailed.getchildren()[1].getchildren()[0].getchildren()[0].text photo_name = img.get('alt') # Released: Feb 26, 2016 date_str = custom_list_item_detailed.getchildren()[1].getchildren()[ 1].text_content().split(': ')[1] date_str = '%s-%d-%s' % (date_str.split(', ')[1], helper.getMonth( date_str.split(' ')[0]), date_str.split(' ')[1].replace(',', '')) # 模特名 arr = custom_list_item_detailed.getchildren()[1].getchildren()[ 2].getchildren() model_name_arr = [] for i in xrange(1, len(arr)): model_name_arr.append(arr[i].text) # model_name = custom_list_item_detailed.getchildren()[1].getchildren()[2].getchildren()[1].text # print(model_name_arr) # date = datetime.datetime(int(date_str.split(', ')[1]), helper.getMonth(date_str.split(' ')[0]), int(date_str.split(' ')[1].replace(',', ''))) # print date # 下载照片的封面 photo_path = os.path.join('vivthomas', 'photo', '%s_%s' % (date_str, photo_name.replace( '/', ' ')), '%s_%s.jpg' % (date_str, photo_name.replace('/', ' '))) helper.downloadImg(img.get('src'), photo_path) # 存到数据库 # mongo.newAlbun(photo_name, date) photo_json = { 'date': date_str, 'name': photo_name, 'model': model_name_arr } photo_json_str = json.dumps(photo_json) model_info.get('photos').append(photo_json) helper.writeFile(photo_json_str, os.path.join( 'vivthomas', 'photo', '%s_%s' % (date_str, photo_name), '%s_%s.json' % (date_str, photo_name))) helper.writeFile(json.dumps(model_info), os.path.join( 'vivthomas', 'model', '%s.json' % (name)))
def fetchModel(url=None, headUrl=None, name='Abril C', score=8.97): if url is None: url = 'https://www.eternaldesire.com/model/abril-c/' if headUrl is None: headUrl = 'https://static.eternaldesire.com/media/headshots/abril-c.jpg?fv=e6f189022422389d377149f795d1da13' modelPath = os.path.join('eternaldesire', 'models', name) helper.mkDir(modelPath) helper.downloadImg(headUrl, os.path.join(modelPath, '%s_EternalDesire.jpg' % name)) modelInfo = { 'name': name, 'score': score, 'Age first shot': 0, 'Eye color': '', 'Hair color': '', 'Breasts': '', 'Shaved': '', 'Measurements': '', 'Height': '', 'Weight': '', 'Country': '', 'Ethnicity': '', 'photos': [] } pq = helper.get(url, None, None, 1) infoLiArr = pq('.model_info > ul > li') for li in infoLiArr: arr = li.text_content().split(': ') for key in modelInfo: if key == arr[0]: modelInfo[key] = arr[1] break photoIndex = 1 while photoIndex < 100: photoDivArr = pq('#latest_photo_update_%d .update_cell' % photoIndex) liArr = pq('#latest_photo_update_%d .hover_container_stats li' % photoIndex) if not liArr or len(liArr) == 0: break for i in xrange(0, len(photoDivArr)): photoInfo = {'name': '', 'date': '0.0.1970', 'model': ''} img = photoDivArr[i].find('a').find('img') coverUrl = img.get('src') photoInfo['name'] = img.get('alt') photoInfo['date'] = liArr[i * 3].text_content().replace( 'Date published:', '') photoInfo['model'] = liArr[i * 3 + 1].text_content().replace( 'Featuring: ', '') jsonStr = json.dumps(photoInfo) photoPath = os.path.join( 'eternaldesire', 'photos', photoInfo.get('name') + '-' + photoInfo['model']) helper.mkDir(photoPath) helper.writeFile(jsonStr, os.path.join(photoPath, 'info.json')) helper.downloadImg( coverUrl, os.path.join( photoPath, '%s_cover_EternalDesire.jpg' % photoInfo.get('name'))) modelInfo['photos'].append(photoInfo) photoIndex += 1 helper.writeFile(json.dumps(modelInfo), os.path.join(modelPath, 'info.json'))