def setInstance(self, instance): self.currentInstance = instance # Loads the theme in the instance (if any) config.ICON = utils.loadImage("favicon.ico", self.currentInstance) config.LOGO = utils.loadImage("logo.png", self.currentInstance) config.BACKGROUND = utils.loadImage("background.png", self.currentInstance) config.BLOG_BACKGROUND = utils.getFile("blogbackground.png", self.currentInstance) config.BLOG = utils.getFile("blog.html", self.currentInstance) # Sets the theme. self.setWindowIcon(QIcon(config.ICON)) self.createImages() self.logo.setPixmap(config.LOGO.scaled(self.logo.size(), Qt.KeepAspectRatio)) with open(config.BLOG, "r", encoding="utf-8") as file: content = file.read() self.theInternet.setHtml(content.replace("&background&", urllib.parse.quote(config.BLOG_BACKGROUND.replace("\\", "/")))) for index in range(len(self.guiElements)): self.guiElements[index].raise_() # Finishing touches. The logo needs raising because it isnt on the guielements list. self.logo.raise_() self.instanceConfig = utils.loadInstanceSettings(self, self.currentInstance) try: self.pres.update(details="In launcher", large_image="pymcllogo512", state="Selected modpack: " + self.currentInstance) except: pass
def __init__(self): super().__init__() try: self.currentInstance = os.listdir(config.MC_DIR + "/instances")[0] except: self.currentInstance = "" self.loggedIn = False # Tells the launcher if the user is logged in. utils.areYouThere(config.MC_DIR + "/instances") screen_resolution = app.desktop().screenGeometry() # Gets primary monitor resolution. self.title = config.NAME + " " + config.VER config.ICON = utils.loadImage("favicon.ico", self.currentInstance) self.setWindowIcon(QIcon(config.ICON)) config.LOGO = utils.loadImage("logo.png", self.currentInstance) config.BACKGROUND = utils.loadImage("background.png", self.currentInstance) config.BLOG_BACKGROUND = utils.getFile("blogbackground.png", self.currentInstance) config.BLOG = utils.getFile("blog.html", self.currentInstance) self.left = (screen_resolution.width() / 2) - 427 self.top = (screen_resolution.height() / 2) - 240 self.launcherConfig = utils.loadSettings(self) self.instanceConfig = utils.loadInstanceSettings(self, self.currentInstance) try: self.pres = pypresence.Presence("548208354196062228") # Go ahead and use this if you want, provided you are modifying the launcher. Not that I can really stop you. self.pres.connect() self.pres.update(details="In launcher", large_image="pymcllogo512", state="Selected modpack: " + self.currentInstance) except: self.pres = None self.checkAlive(threadingEvent) self.update = utils.checkOnline() self.initUI()
def _warm(key, options): url = "http://static.stamped.com/%s" % key.name if options.verbose: utils.log("warming '%s' (%s)" % (key.name, url)) try: utils.getFile(url) except: if options.verbose: utils.log("unable to download key '%s' (%s)" % (key.name, url))
def fetch(self): if not GitSource.fetch(self): return False if not utils.getFile(self.openssl,self.downloadDir()): return False if not utils.getFile(self.dbuslib,self.downloadDir()): return False if not utils.getFile(self.mysql,self.downloadDir()): return False return True
def _getLocalSearchResponse(self, query, latLng=None, optionalParams=None): params = { 'v' : '1.0', 'q' : query, 'rsz' : 8, 'mrt' : 'localonly', 'key' : 'ABQIAAAAwHbLTrUsG9ibtIA3QrujsRRB6mhcr2m5Q6fm3mUuDbLfyI5H4xTNn-E18G_3Zu-sDQ3-BTh9hK2BeQ', } if latLng is not None: params['sll'] = self._geocoder.getEncodedLatLng(latLng) self._handleParams(params, optionalParams) url = "http://ajax.googleapis.com/ajax/services/search/local?%s" % urllib.urlencode(params) utils.log('[GoogleLocal] ' + url) try: # GET the data and parse the response as json request = urllib2.Request(url, None, {'Referer' : 'http://www.stamped.com' }) return json.loads(utils.getFile(url, request)), url except: utils.log('[GoogleLocal] unexpected error searching "' + url + '"') utils.printException() return None, url return None, url
def main(): options, args = parseCommandLine() getch = Getch() query = '' while True: diff = False ch = getch() if ch == '\x04': # EOF break elif ch == '\x7f': # backspace if len(query) > 0: query = query[:-1] diff = True elif ch in string.ascii_letters or ch in string.digits or ch == ' ' or ch == '.' or ch == '-': query = query + ch diff = True if diff: print query try: raw_result = utils.getFile('http://static.stamped.com/search/v2/%s.json.gz' % encode_s3_name(query)) except urllib2.HTTPError: # ignore errors in the (likely) event of a non-existent autocomplete file continue if raw_result: results = json.loads(raw_result) for i in xrange(min(10, len(results))): result = results[i] print "%d) %s (%s - %s)" % (i + 1, result['title'], result['subtitle'], result['category'])
def listdir(self, path): paths=self.getTruePaths(path) if len(paths)!=0: return getFile(self.book_trees, paths) else: return self.book_trees
def listdir(self, path): paths = self.getTruePaths(path) if len(paths) != 0: return getFile(self.book_trees, paths) else: return self.book_trees
def addressToLatLng(self, address): if not self.isValid: return None params = { 'location' : address, 'flags' : 'J', # indicates json output format (defaults to xml) } (offset, count) = self._initAPIKeyIndices() url = "" while True: try: # try a different API key for each attempt apiIndex = self._getAPIIndex(offset, count) if apiIndex is None: #self._isValid = False return None apiKey = self._apiKeys[apiIndex] if apiKey is None: count += 1 continue # construct the url params['appid'] = apiKey url = self.BASE_URL + '?' + urllib.urlencode(params) # GET the data and parse the response as json response = json.loads(utils.getFile(url)) resultSet = response['ResultSet'] # extract the results from the json if resultSet['Error'] != 0: utils.log('[YahooGeocoderService] error converting "' + url + '"\n' + 'ErrorCode: ' + str(resultSet['Error']) + '\n' + 'ErrorMsg: ' + resultSet['ErrorMessage'] + '\n') return None if not 'Results' in resultSet or 0 == len(resultSet['Results']): return None primary = resultSet['Results'][0] # extract the lat / lng from the primary result latLng = (float(primary['latitude']), float(primary['longitude'])) return self.getValidatedLatLng(latLng) except: #utils.log('[YahooGeocoderService] error converting "' + url + '"') # retry with another api key count += 1 return None
def getdownloadurl(self, path, name): tmp = connect_path(path, name) files = getFile(self.book_trees, self.getTruePaths(tmp)) return [ connect_path(Config.SITE_BOOK_DONWLOAD, connect_path(path, ee)) for ee in files ]
def handle_request(url, stats, count=8): print "processing '%s'" % url for i in xrange(count): try: utils.getFile(url, maxDelay=0) stats['200'] += 1 except urllib2.HTTPError, e: print "error %s '%s'" % (e.code, url) stats['%s' % e.code] += 1 time.sleep(.5) break except Exception, e: print "error %s '%s'" % (e, url) stats['error'] += 1 time.sleep(.5) break
def addressToLatLng(self, address): if not self.isValid: return False params = { 'query' : address, 'output' : 'json', } (offset, count) = self._initAPIKeyIndices() url = "" while True: try: # try a different API key for each attempt apiIndex = self._getAPIIndex(offset, count) if apiIndex is None: self._isValid = False return None apiKey = self._apiKeys[apiIndex] if apiKey is None: count += 1 continue # construct the url params['key'] = apiKey url = self.BASE_URL + '?' + urllib.urlencode(params) # GET the data and parse the response as json response = json.loads(utils.getFile(url)) status = response['statusCode'] if status != 200: if status != 400: self._apiKeys[apiIndex] = None continue else: return None # extract the primary result from the json resource = (((response['resourceSets'])[0])['resources'])[0] result = (resource['point'])['coordinates'] # extract the lat / lng from the primary result latLng = (float(result[0]), float(result[1])) return self.getValidatedLatLng(latLng) except: #utils.log('[BingGeocoderService] error converting "' + url + '"') # retry with another api key count += 1 return None
def render(self, asset_path, asset_type, context): if 'assets' not in context.render_context: context.render_context['assets'] = {} assets = context.render_context['assets'] if asset_type not in ASSET_TYPES: raise Exception("unknown asset type '%s' for asset '%s'" % (asset_type, asset_path)) if settings.DEBUG: return self.get_include(asset_path, asset_type) elif asset_type is None: asset_type = asset_path ctx = self._simplify_context(context) page = 'default' if 'page' in ctx: page = ctx['page'] uri0 = 'gen/%s.%s' % (page, asset_type) uri1 = 'gen/%s.min.%s' % (page, asset_type) path0 = os.path.join(settings.STATIC_DOC_ROOT, uri0) path1 = os.path.join(settings.STATIC_DOC_ROOT, uri1) f = codecs.open(path0, encoding='utf-8', mode='w') f.write(assets[asset_path]) f.close() munge = (asset_type == 'js') self._compress(path0, path1, asset_type, munge) return self.get_include(uri1, asset_type) else: urlinfo = urlparse.urlparse(asset_path) if self._is_local(asset_path): path = os.path.join(settings.STATIC_DOC_ROOT, asset_path) f = codecs.open(path, encoding='utf-8', mode='r') r = f.read() f.close() else: r = unicode(utils.getFile(asset_path)) r = u"/*! %s */\n%s\n" % (asset_path, r) if asset_type in assets: assets[asset_type] += r else: assets[asset_type] = r return ''
def main(): options, args = parseCommandLine() stampedAPI = MongoStampedAPI() accountDB = stampedAPI._accountDB rs = accountDB._collection.find() for result in rs: account = accountDB._convertFromMongo(result) pprint(account) image = utils.getFile(account.profile_image) image = base64.encodestring(image) stampedAPI.updateProfileImage(account.screen_name, image)
def util_test_images(self, path, func): for index in xrange(len(self.images)): image = self.images[index] entityId = 'test_id_%d' % index func(entityId, image) baseurl = "%s/%s/%s" % (self.baseurl, path, entityId) url = "%s.jpg" % (baseurl, ) try: f = utils.getFile(url) except HTTPError: logs.warn("unable to download '%s'" % url) raise image2 = self.imageDB.getImage(f) self.assertEqual(image.size, image2.size)
def getFiles(): files = utils.getFile(DIR_DATA, ['txt']) # validation # ---------- # query params initRotation = request.args.get('initRotation') if (initRotation is None): return bad_request('missing required query param \'initRotation\'') if initRotation not in ROTATE_TRANSLATE_MAP: return bad_request('invalid query param \'initRotation\', it must be one of ' + str(list(ROTATE_TRANSLATE_MAP)) + '. You can change this in \'/web-app/src/config.js\'') # no .txt files num_files = len(files) if (num_files == 0): return bad_request('no .txt files in ' + DIR_DATA) # no more than one .txt file if (num_files > 1): return bad_request('only one .txt file should be in ' + DIR_DATA) file = files[0] file_data = utils.parseFile(files[0]['path'], ACCEPTED_CHARS) # bad character in file if ('error' in file_data): return bad_request(file_data['error']) # process data # ------------ file_data_processed = processor.processCharData(file_data, initRotation, ROTATE_TRANSLATE_MAP, CHAR_ACTION) # response # -------- response = jsonify({ 'fileName': file['name'], 'fileData': file_data_processed }) return(response)
def addressToLatLng(self, address): if not self.isValid: return False params = { 'address' : address, 'sensor' : 'false', } url = "" try: # construct the url url = self.BASE_URL + '?' + urllib.urlencode(params) # GET the data and parse the response as json response = json.loads(utils.getFile(url)) # extract the primary result from the json if response['status'] != 'OK': if response['status'] == 'OVER_QUERY_LIMIT': utils.log("GoogleGeocoderService over quota usage") self._isValid = False return None else: utils.log('[GoogleGeocoderService] error converting "' + url + '"\n' + 'ErrorStatus: ' + response['status'] + '\n') return None result = response['results'][0] location = result['geometry']['location'] # extract the lat / lng from the primary result latLng = (float(location['lat']), float(location['lng'])) return self.getValidatedLatLng(latLng) except: utils.log('[GoogleGeocoderService] error converting "' + url + '"') return None
def _parseResultsPage(self, pool, queue, url, name, base=False): utils.log('[%s] parsing page %s (%s)' % (self, name, url)) try: html = utils.getFile(url) html = html.replace("header>", "div>") soup = BeautifulSoup(html) except: #utils.printException() utils.log("[%s] error downloading page %s (%s)" % (self, name, url)) return # extract and parse the rest of the paginated results if base: page = soup.find('nav').find('span').getText() num_pages = int(self.page_re.match(page).groups()[0]) for i in xrange(2, num_pages + 1): href = '%s&pg=%d' % (url, i) queue.put_nowait((href, name)) results = soup.findAll('section', {'class' : 'CWListing'}) for result in results: entity = Entity() entity.subcategory = "book" entity.awardAnnals = {} entity.title = result.find('h4').find('a').getText().strip() entity.author = result.find('p', {'class' : 'creators'}).getText() key = (entity.title, entity.author) if key in self.seen: continue self.seen.add(key) self._output.put(entity)
def _getResource(self, url, token=None, parameters={}): if not re.match('http',url): url = "http://%s%s" % (HOST, url) parameters['output'] = 'json' oauthRequest = oauth.OAuthRequest.from_consumer_and_token(self.consumer, http_url=url, parameters=parameters, token=token) oauthRequest.sign_request(self.signature_method_hmac_sha1, self.consumer, token) if (self.verbose): print oauthRequest.to_url() self.connection.request('GET', oauthRequest.to_url()) response = self.connection.getresponse() location = response.getheader('location') # follow the redirect if available if location is not None: return utils.getFile(location) return response.read()
def _parseResultsPage(self, pool, queue, url, name, base=False): utils.log('[%s] parsing results page %s (%s)' % (self, name, url)) try: html = utils.getFile(url) html = html.replace('{"typeFilterHTML":"', '')[0:-2].replace('\\', '') soup = BeautifulSoup(html) except: #utils.printException() utils.log("[%s] error downloading page %s (%s)" % (self, name, url)) return #self._globals['books'] = soup results = soup.findAll('div', {'class' : 'ev_result_block'}) if 0 == len(results): return # extract and parse the rest of the paginated results if base: num_pages = 16 page = int(self.page_re.match(url).groups()[0]) url2 = url[0:url.find('&page=')] for i in xrange(1, num_pages): cur_page = page + i cur_url = "%s&page=%d" % (url2, cur_page) name2 = "page %d" % cur_page queue.put_nowait((self._parseResultsPage, cur_url, name2, i == num_pages - 1)) for result in results: link = result.find('a') href = link.get('href') name2 = link.getText().strip() queue.put_nowait((self._parseRestaurantPage, href, name2, False))
def exists(self, path): files = getFile(self.book_trees, self.getTruePaths(path)) return files != None
def processImage(): print("\n_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_") canvas = Canvas(back, width=r.winfo_width(), height=r.winfo_height(), highlightthickness=0, bg='black') canvas.pack() imgfile = utils.getFile() print("Processing: ", imgfile) try: print("Resizing...") img = utils.resizeImage(imgfile) print("Resize successful.") except Exception as e: print(type(e).__name__ + ":\n", e.args) canvas.create_text((r.winfo_width()/2), (r.winfo_height()/2), anchor='center', font=("Consolas", 16), fill='green', text='Error: This filetype is not supported.\nPlease reset and try again.', justify='center') raise try: print("Preparing for ASCII Conversion...") optimizedImg = optimize.optimizeImage(img) print("Ready to convert.") except Exception as e: canvas.create_text((r.winfo_width()/2), (r.winfo_height()/2), anchor='center', font=("Consolas", 16), fill='green', text='Something went wrong.\nPlease reset and try again.', justify='center') raise # get dimensions & resolution w, h = optimizedImg.width, optimizedImg.height res = w * h ### Use this to simplify the JPEG ### # optimizedImg.save("optimizedImg.jpeg", 'JPEG', quality=150) # optimizedImg = Image.open("optimizedImg.jpeg") px = optimizedImg.load() ### Use this in conjunction with above option to delete the copy it makes ### # import os # os.remove("optimizedImg.jpeg") try: asciiImg = convert.convert(w,h,px) print("Finished processing successfully!") except Exception as e: canvas.create_text((r.winfo_width()/2), (r.winfo_height()/2), anchor='center', font=("Consolas", 16), fill='green', text='Something went wrong.\nPlease reset and try again.', justify='center') raise canvas.create_text((r.winfo_width()/2), ((r.winfo_height()/2)-20), anchor='center', font=("Consolas", 4), fill='green', text=asciiImg) try: print("Saving .txt file...") utils.saveAsTxt(asciiImg) except Exception as e: canvas.create_text((r.winfo_width()/2), (r.winfo_height()/2), anchor='center', font=("Consolas", 16), fill='green', text='Something went wrong.\nPlease reset and try again.', justify='center') raise print("Process complete.") print("_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n")
def _parse_feed(self, feedname, **kwargs): webobject = feedname in self._webobject_feeds # extract keyword arguments and defaults region = kwargs.pop("region", "us") limit = kwargs.pop("limit", 10) genre = kwargs.pop("genre", None) explicit = kwargs.pop("explicit", True) transform = kwargs.pop("transform", True) format = kwargs.pop("format", "xml" if webobject else self.DEFAULT_FORMAT) if format not in ["xml", "json"]: raise AppleRSSError("invalid request format") if webobject: url = "http://itunes.apple.com/WebObjects/MZStore.woa/wpa/MRSS/%s/sf=143441/" % (feedname,) else: url = "http://itunes.apple.com/%s/rss/%s/" % (region, feedname) if limit is not None: url = "%slimit=%d/" % (url, limit) if genre is not None: url = "%sgenre=%s/" % (url, genre) if explicit is not None: url = "%sexplicit=%s/" % (url, str(explicit).lower()) if webobject: url += "rss.%s" % format else: url += format # attempt to download feed utils.log(url) data = utils.getFile(url) """ f=open('out.xml', 'w') f.write(data) f.close() """ if not transform: return data try: data = json.loads(data) except ValueError: utils.printException() utils.log(data) return [] entries = data["feed"]["entry"] entities = [] if isinstance(entries, dict): entries = [entries] def _parse_entry(entities, entry): try: # We skip pre-orders because we can't actually look them up by ID. This is actually a pretty fatal # error because right now we don't even use the data in the feed itself; we immediately re-crawl based # on the iTunes ID. Anyway, there doesn't seem to be any big advantage to having preorders, so this is # hardly the most pressing problem. if "id" in entry and "label" in entry["id"]: if "/preorder/" in entry["id"]["label"]: logs.info("Skipping preorder!") return else: logs.info("WARNING: Missing id.label!") entity = self._parse_entity(entry) if entity is not None: entities.append(entity) except: utils.printException() pool = Pool(16) for entry in entries: pool.spawn(_parse_entry, entities, entry) pool.join() return entities
EXAMENS_ID = 10925 # TODO REMOVE if len(sys.argv) > 1: try: EXAMENS_ID = sys.argv[1] conf.STATIC_MEDECIN = "/data/medecins.csv" conf.STATIC_TYPE_INTERVENTION = "/data/types_intervention.csv" with open("php_call.txt", "a") as f: f.writelines( f"System called at {datetime.datetime.now().strftime('%Y/%m/%d')} : argv : {sys.argv}\n") except: logger.critical( f"[+] Can't find the EXAMEN_ID in the arguments passed : {sys.argv} [+]") try: # Read the CSV files EVOLUCARE_MEDECIN = utils.getFile(conf.STATIC_MEDECIN) EVOLUCARE_TYPES_INTERVENTION = utils.getFile(conf.STATIC_TYPE_INTERVENTION) except Exception as e: logger.critical( "[-] Cannot init EVOLUCARE_MEDECIN and EVOLUCARE_TYPES_INTERVENTION [-]") logger.critical(f"[-] ERROR : {e} [-]") EXAMEN_ROW = None # Will contain the examen row PATIENT_ROW = None # Will contain the patient row # Will contain the medeincs rows (intervenant + prescripteur) MEDECIN_ROWS = None # Will contain the medeincs rows (intervenant + prescripteur) TYPES_INTERVENTION_ROW = None ORM_MSG = None # Will contain the orm message in STR
def exists(self, path): files=getFile(self.book_trees,self.getTruePaths(path)) #logging.info(len(files)!=0) return len(files)!=0
def exists(self, path): files=getFile(self.book_trees,self.getTruePaths(path)) return files != None
def exists(self, path): files = getFile(self.book_trees, self.getTruePaths(path)) #logging.info(len(files)!=0) return len(files) != 0
def downloadExternalExercise(self): root = self.root links = self.download_queue_assignment if not links: print("Empty Links") return False error_list = [] total_count = 0 skipped_count = 0 processed_count = 0 self.setGuiFileDownloaderInfo(week="Loading", topic="External Exercise", filename="", url="", output="", eta="", speed="", dl_size="", file_size="", progress=0, current_no=0, total_files=0) total_links = len(links) for link_idx, item in enumerate(links): path = item["path"] tmp = path.split("\\") week = tmp[0] topic = tmp[1] prefix = week.replace("Week ", "0") + topic[:2] base_link = item["url"] html = utils.getFile(base_link) soup = BeautifulSoup(html, 'html.parser') # print(soup.get_text) title_tag = soup.find('title') link_tags = soup.find_all('link') script_tags = soup.find_all('script') img_tags = soup.find_all('img') title = title_tag.text folder_name = prefix + "_" + utils.getFormattedFileName( title.lower().replace(" ", "_")) resource_path = os.path.join(root, "Resources", 'html', folder_name) media_path = os.path.join(root, "Resources", 'html', "media") # index_file_name = utils.getFormattedFileName(title) + ".html" index_file_name = item['filename'] # print(folder_name) print(len(link_tags), "links(s) found") print(len(script_tags), "script(s) found") print(len(img_tags), "image(s) found") # print(link_tags) link_total_count = len(link_tags) + len(script_tags) total_count += link_total_count # print(script_tags) for idx, link_tag in enumerate(link_tags): src = link_tag.get("href") url = utils.getFullUrl(base_link, src) # print(url) # Update GUI Progress progress = (idx + 1) / link_total_count * 100 dl_size = "{} of {}".format(idx + 1, link_total_count) self.setGuiFileDownloaderInfo(week=week, topic=topic, filename=index_file_name, url=url, output=resource_path, dl_size=dl_size, file_size="", progress=progress, current_no=link_idx + 1, total_files=total_links) print("Link {}/{}:".format(idx + 1, len(link_tags)), end=" ") if src == "": error_list.append({"error": "blank href", "path": path}) print("Error: Blank href") continue try: link_filename = utils.downloadFile(url, resource_path) processed_count += 1 link_tag[ 'href'] = "../../Resources/html/" + folder_name + "/" + link_filename except Exception as e: print("Error:", e) error_list.append({ "error": "url", "url": url, "path": path }) continue for idx, script_tag in enumerate(script_tags): progress = (len(link_tags) + idx + 1) / link_total_count * 100 dl_size = "{} of {}".format( len(link_tags) + idx + 1, link_total_count) # Update GUI Progress self.setGuiFileDownloaderInfo(week=week, topic=topic, filename=index_file_name, output=resource_path, dl_size=dl_size, file_size="", progress=progress, current_no=link_idx + 1, total_files=total_links) src = script_tag.get("src") if src is None: print( "External src not found. Maybe internal script. Skipping..." ) skipped_count += 1 continue url = utils.getFullUrl(base_link, src) # Update GUI Progress self.setGuiFileDownloaderInfo(week=week, topic=topic, filename=index_file_name, url=url, output=resource_path, dl_size=dl_size, file_size="", progress=progress, current_no=link_idx + 1, total_files=total_links) print("Script {}/{}:".format(idx + 1, len(link_tags)), end=" ") if src == "": error_list.append({"error": "blank src", "path": path}) print("Error: Blank src") continue try: if src.find("main") >= 0: js_file = utils.getFile(url).decode("utf-8") count_static = js_file.count("static") external_links = re.findall( "(static[/a-zA-Z._0-9-@]*)", js_file) external_links_count = len(external_links) print( "Found {} external links in main.js, now downloading" .format(external_links_count)) for ext_idx, external_link in enumerate( external_links): external_link_url = urljoin( base_link, external_link) # Update GUI Progress curr_progress = (ext_idx + 1) / len(external_links) prev_progress = (len(link_tags) + idx) / link_total_count * 100 progress = prev_progress + (100 * curr_progress / link_total_count) # progress = (len(link_tags) + idx + 1 + ext_idx + 1) / (link_total_count + len(external_links)) * 100 # dl_size = "{} of {}".format(len(link_tags) + idx + 1 + ext_idx + 1, link_total_count + len(external_links)) dl_size = "{} of {}".format( len(link_tags) + idx + 1, link_total_count) self.setGuiFileDownloaderInfo( week=week, topic=topic, filename=index_file_name, url=external_link_url, output=resource_path, dl_size=dl_size, file_size="", progress=progress, current_no=link_idx + 1, total_files=total_links) print("External Link {}/{}:".format( ext_idx + 1, external_links_count), end=" ") utils.downloadFile(external_link_url, media_path) if count_static != external_links_count: print( "WARNING: Downloaded {} external links but found {}" .format(external_links_count, count_static)) js_file = js_file.replace("static/", "../../Resources/html/") js_file_path = os.path.join(root, "Resources", 'html', folder_name, "main.js") link_filename = utils.savePlainFile( js_file_path, js_file) else: link_filename = utils.downloadFile(url, resource_path) processed_count += 1 script_tag[ 'src'] = "../../Resources/html/" + folder_name + "/" + link_filename except Exception as e: print("Error:", e) error_list.append({ "error": "url", "url": url, "path": path }) continue save_path = os.path.join(root, path, index_file_name) utils.savePlainFile(save_path, str(soup)) print() print("Total:", total_count, "file(s)") print("Processed:", processed_count, "file(s)") print("Skipped:", skipped_count, "file(s)") print("Errors:", len(error_list)) print(error_list)
def getdownloadurl(self, path, name): tmp = connect_path(path,name) files=getFile(self.book_trees, self.getTruePaths(tmp)) return [connect_path(Config.SITE_BOOK_DONWLOAD,connect_path(path, ee)) for ee in files]