def downloadThumbnail(self): thumbUrl = self.list[self.idx][3] if not thumbUrl.startswith("http://"): thumbUrl = "%s%s"%(MAIN_PAGE, thumbUrl) try: req = urllib2.Request(thumbUrl) url_handle = urllib2.urlopen(req) headers = url_handle.info() contentType = headers.getheader("content-type") except: contentType = None if contentType: if 'image/jpeg' in contentType: self.thumb = "/tmp/zdf.jpg" elif 'image/gif' in contentType: self.thumb = "/tmp/zdf.gif" elif 'image/png' in contentType: self.thumb = "/tmp/zdf.png" else: print "[ZDF Mediathek] Unknown thumbnail content-type:", contentType self.thumb = None else: self.thumb = None if self.thumb: downloadPage(thumbUrl, self.thumb).addCallback(self.downloadThumbnailCallback).addErrback(self.downloadThumbnailError) else: self.buildEntry(None)
def getOFDB(self): self.resetLabels() if self.eventName is "": s = self.session.nav.getCurrentService() info = s and s.info() event = info and info.getEvent(0) # 0 = now, 1 = next if event: self.eventName = event.getEventName() if self.eventName is not "": try: pos = self.eventName.index(" (") self.eventName=self.eventName[0:pos] except ValueError: pass if self.eventName[-3:] == "...": self.eventName = self.eventName[:-3] for article in ["The", "Der", "Die", "Das"]: if self.eventName[:4].capitalize() == article + " ": self.eventName = self.eventName[4:] + ", " + article self["statusbar"].setText(_("Query OFDb: %s...") % (self.eventName)) try: self.eventName = urllib.quote(self.eventName) except: self.eventName = urllib.quote(self.eventName.decode('utf8').encode('ascii','ignore')) localfile = "/tmp/ofdbquery.html" fetchurl = "http://www.ofdb.de/view.php?page=suchergebnis&Kat=DTitel&SText=" + self.eventName print "[OFDb] Downloading Query " + fetchurl + " to " + localfile downloadPage(fetchurl,localfile).addCallback(self.OFDBquery).addErrback(self.fetchFailed) else: self["statusbar"].setText(_("Could't get Eventname"))
def upgradeCB(self, result = None): print "[UMMeteo] - upgrade:", result if result: print "\n[UMMeteo] - upgrade yes\n" downloadPage('http://e2.areq.eu.org/ummeteo/update.py',PluginPath + '/update.py').addCallback(self.goupCB).addErrback(self.errorUpdate) else: print "[UMMeteo] upgrade cancel\n"
def getCover(self, url): self.getCoverTimerStart() print "getCover:", url if url: downloadPage(url, self.COVER_PIC_PATH).addCallback(self.showCover).addErrback(self.dataErrorP) else: self.showCoverNone()
def downloadThumbnail(self,thumbUrl): if thumbUrl is not None: thumbID = thumbUrl.rsplit("/",1)[1] thumbFile = None if not thumbUrl.startswith("http://"): thumbUrl = "%s%s"%(MAIN_PAGE, thumbUrl) try: req = urllib2.Request(thumbUrl) url_handle = urllib2.urlopen(req) headers = url_handle.info() contentType = headers.getheader("content-type") except: contentType = None if contentType: if 'image/jpeg' in contentType: thumbFile = "/tmp/" + thumbID + ".jpg" elif 'image/gif' in contentType: thumbID = None # thumbFile = "/tmp/" + thumbID + ".gif" elif 'image/png' in contentType: thumbFile = "/tmp/" + thumbID + ".png" else: print "[ZDF Mediathek] Unknown thumbnail content-type:", contentType if thumbFile is not None: if (os_path.exists(thumbFile) == True): #already downloaded self.downloadThumbnailCallback(None, thumbFile, thumbID) else: if self.png_cache.get(thumbID, None) is None: downloadPage(thumbUrl, thumbFile).addCallback(self.downloadThumbnailCallback, thumbFile, thumbID).addErrback(self.downloadThumbnailError, thumbID) else: self.updateEntry(thumbID, thumbFile)
def GoogleImageCallback(self, result): global coverfiles if self.nextGoogle: self.currentGoogle = self.nextGoogle self.nextGoogle = None sendUrlCommand(self.currentGoogle, None, 10).addCallback(self.GoogleImageCallback).addErrback(self.Error) return self.currentGoogle = None foundPos = result.find("unescapedUrl\":\"") foundPos2 = result.find("\",\"url\":\"") if foundPos != -1 and foundPos2 != -1: url=result[foundPos+15:foundPos2] if len(url)>15: url= url.replace(" ", "%20") print "download url: %s " % url validurl = True else: validurl = False print "[SHOUTcast] invalid cover url or pictureformat!" if config.plugins.shoutcast.showcover.value: self["cover"].doHide() if validurl: self.currentcoverfile = (self.currentcoverfile + 1) % len(coverfiles) try: os.unlink(coverfiles[self.currentcoverfile-1]) except: pass coverfile = coverfiles[self.currentcoverfile] print "[SHOUTcast] downloading cover from %s to %s" % (url, coverfile) downloadPage(url, coverfile).addCallback(self.coverDownloadFinished, coverfile).addErrback(self.coverDownloadFailed)
def buildList(self): if len(self.movies): movie = self.movies[0] thumbUrl = movie[4] try: req = urllib2.Request(thumbUrl) url_handle = urllib2.urlopen(req) headers = url_handle.info() contentType = headers.getheader("content-type") except: contentType = None if contentType: if 'image/jpeg' in contentType: self.thumb = "/tmp/ard.jpg" elif 'image/gif' in contentType: self.thumb = "/tmp/ard.gif" elif 'image/png' in contentType: self.thumb = "/tmp/ard.png" else: print "[ARD Mediathek] Unknown thumbnail content-type:", contentType self.thumb = None else: self.thumb = None if self.thumb: downloadPage(thumbUrl, self.thumb).addCallback(self.downloadThumbnailCallback).addErrback(self.downloadThumbnailError) else: self.buildEntry(None) else: self["list"].setList(self.listMovies) self.deactivateCacheDialog()
def IMDBquery(self,string): print "[IMDBquery]" self["statusbar"].setText(_("IMDb Download completed")) self.html2utf8(open("/tmp/imdbquery.html", "r").read()) self.generalinfos = self.generalinfomask.search(self.inhtml) if self.generalinfos: self.IMDBparse() else: if re.search("<title>(?:IMDb.{0,9}Search|IMDb Titelsuche)</title>", self.inhtml): searchresultmask = re.compile("<tr> <td.*?img src.*?>.*?<a href=\".*?/title/(tt\d{7,7})/\".*?>(.*?)</td>", re.DOTALL) searchresults = searchresultmask.finditer(self.inhtml) self.resultlist = [(self.htmltags.sub('',x.group(2)), x.group(1)) for x in searchresults] self["menu"].l.setList(self.resultlist) if len(self.resultlist) > 1: self.Page = 1 self.showMenu() else: self["detailslabel"].setText(_("No IMDb match.")) self["statusbar"].setText(_("No IMDb match.")) else: splitpos = self.eventName.find('(') if splitpos > 0 and self.eventName.endswith(')'): self.eventName = self.eventName[splitpos+1:-1] self["statusbar"].setText(_("Re-Query IMDb: %s...") % (self.eventName)) event_quoted = urllib.quote(self.eventName.decode('utf8').encode('latin-1','ignore')) localfile = "/tmp/imdbquery.html" fetchurl = "http://" + self.IMDBlanguage + "imdb.com/find?q=" + event_quoted + "&s=tt&site=aka" print "[IMDB] Downloading Query " + fetchurl + " to " + localfile downloadPage(fetchurl,localfile).addCallback(self.IMDBquery).addErrback(self.fetchFailed) else: self["detailslabel"].setText(_("IMDb query failed!"))
def mvidown(self, stadt): downlink = "http://www.meinestadt.de/"+ stadt +"/bilder" downname = "/tmp/.stadtindex" stadd = stadt if fileExists(downname): os.system("rm -rf "+ downname) downloadPage(downlink, downname).addCallback(self.jpgdown, stadd).addErrback(self.error)
def get_xmlfile(self): if self.isServerOnline(): xmlfile = "http://xml.weather.yahoo.com/forecastrss?w=%s&d=10&u=c" % config.plugins.yweather.weather_city.value downloadPage(xmlfile, "/tmp/yweather.xml").addCallback(self.downloadFinished).addErrback(self.downloadFailed) else: self["text_now"].text = _('weatherserver not respond') self.notdata = True
def getResults(self, data): data = data.replace('\/','') if config.plugins.tmdb.firsthit.value: # list = re.findall('"id":(.*?),.*?original_title":"(.*?)".*?"poster_path":"(.*?)".*?title":"(.*?)"', data, re.S) list = re.findall('.*?"poster_path":"(.*?)".*?"id":(.*?),.*?"original_title":"(.*?)".*?"title":"(.*?)"', data, re.S) if list: for coverPath,id,otitle,title in list: url_cover = "http://image.tmdb.org/t/p/%s/%s" % (config.plugins.tmdb.themoviedb_coversize.value, coverPath) url = "http://api.themoviedb.org/3/movie/%s?api_key=8789cfd3fbab7dccf1269c3d7d867aff&append_to_response=releases,trailers,casts&language=%s" % (id, config.plugins.tmdb.lang.value) cover = self.tempDir+id+".jpg" downloadPage(url_cover, cover).addCallback(self.openMovie, title, url, cover, id).addErrback(self.dataError) break else: print "[TMDb] no movie found." self['searchinfo'].setText(_("No Movie information found for %s") % self.text) else: urls = [] # list = re.findall('"id":(.*?),.*?original_title":"(.*?)".*?"poster_path":"(.*?)".*?title":"(.*?)"', data, re.S) list = re.findall('.*?"poster_path":"(.*?)".*?"id":(.*?),.*?"original_title":"(.*?)".*?"title":"(.*?)"', data, re.S) if list: for coverPath,id,otitle,title in list: url_cover = "http://image.tmdb.org/t/p/%s/%s" % (config.plugins.tmdb.themoviedb_coversize.value, coverPath) url = "http://api.themoviedb.org/3/movie/%s?api_key=8789cfd3fbab7dccf1269c3d7d867aff&append_to_response=releases,trailers,casts&language=%s" % (id, config.plugins.tmdb.lang.value) #print "[tmbd] " + title, url_cover, "\n", url urls.append(((title, url_cover, url, id),)) self['list'].setList(urls) self.getInfo() else: print "[TMDb] no movie found." self['searchinfo'].setText(_("No Movie information found for %s") % self.text)
def showDetails(self): self["ratinglabel"].show() self["castlabel"].show() self["detailslabel"].show() if self.resultlist and self.Page == 0: link = self["menu"].getCurrent()[1] title = self["menu"].getCurrent()[0] self["statusbar"].setText(_("Re-Query OFDb: %s...") % (title)) localfile = "/tmp/ofdbquery2.html" fetchurl = "http://www.ofdb.de/film/" + link print "[OFDb] downloading query " + fetchurl + " to " + localfile downloadPage(fetchurl,localfile).addCallback(self.OFDBquery2).addErrback(self.fetchFailed) self["menu"].hide() self.resetLabels() self.Page = 1 if self.Page == 2: self["extralabel"].hide() self["poster"].show() if self.ratingstars > 0: self["starsbg"].show() self["stars"].show() self["stars"].setValue(self.ratingstars) self.Page = 1
def get_xmlfile(self): # xmlfile = "http://weather.service.msn.com/data.aspx?weadegreetype=C&culture=ru-RU&weasearchstr=Moscow,Moscow-City,Russia&src=outlook" xmlfile = "http://weather.service.msn.com/data.aspx?weadegreetype=%s&culture=%s&weasearchstr=%s&src=outlook" % (self.degreetype, self.language, self.city) xmlfile1 = "http://weather.service.msn.com/data.aspx?weadegreetype=%s&culture=en-US&weasearchstr=%s&src=outlook" % (self.degreetype, self.city) downloadPage(xmlfile, "/tmp/weathermsn.xml").addCallback(self.downloadFinished).addErrback(self.downloadFailed) downloadPage(xmlfile1, "/tmp/weathermsn.xml").addCallback(self.downloadFinished).addErrback(self.downloadFailed)
def loadThumbnail(self, index, callback): print "[YTB] YouTubeEntry::loadThumbnail()" thumbnailUrl = self.getThumbnailUrl(index) if thumbnailUrl is not None and self.getYouTubeId() is not None: thumbnailFile = "/tmp/" + self.getYouTubeId() + "_" + str(index) + ".jpg" self.thumbnail[str(index)] = None cookie = {"entry" : self, "file" : thumbnailFile, "callback" : callback, "index" : index} downloadPage(thumbnailUrl, thumbnailFile).addCallback(fetchFinished, cookie).addErrback(fetchFailed, cookie)
def getTempCover(posterUrl): if posterUrl is not None and config.EMC.movieinfo.coversave.value: try: coverpath = "/tmp/previewCover.jpg" url = "http://image.tmdb.org/t/p/%s%s" % (config.EMC.movieinfo.coversize.value, posterUrl) downloadPage(url, coverpath).addErrback(dataError) except Exception, e: print('[EMC] MovieInfo getTempCover exception failure: ', str(e))
def getInfo(self): url_cover = self['list'].getCurrent()[1] id = self['list'].getCurrent()[3] if not fileExists(self.tempDir+id+".jpg"): downloadPage(url_cover, self.tempDir+id+".jpg").addCallback(self.getData, self.tempDir+id+".jpg").addErrback(self.dataError) else: self.showCover(self.tempDir+id+".jpg")
def __init__(self, session, image, captchaCB, dest='/tmp/captcha.png'): self.session = session self.captchaCB = captchaCB self.dest = dest if os.path.isfile(image): self.openCaptchaDialog(image) else: downloadPage(image, dest).addCallback(self.downloadCaptchaCB).addErrback(self.downloadCaptchaError)
def do_download(self, sourcefile, afterDownload, downloadFail): path = bigStorage(9000000, '/tmp', '/media/cf', '/media/usb', '/media/hdd') filename = os.path.join(path, 'epgimport') if sourcefile.endswith('.gz'): filename += '.gz' sourcefile = sourcefile.encode('utf-8') print>>log, "[EPGImport] Downloading: " + sourcefile + " to local path: " + filename downloadPage(sourcefile, filename).addCallbacks(afterDownload, downloadFail, callbackArgs=(filename,True)) return filename
def _download(base, ongoing): path = os.path.join(base, ongoing['path']) _ensure_dir(path) fullname = os.path.join(path, ongoing['name']).encode('utf-8') url = ongoing['url'].encode('utf-8') downloadPage(url, fullname).addCallbacks( cbDownloaded, cbErrorDownload, callbackArgs=[base, ongoing], errbackArgs=[base, ongoing])
def downloadFile(self, sourcefile, afterDownload, vdownloadFail): path = bigStorage(2000000, '/tmp', '/media/cf', '/media/usb', '/media/hdd') s = sourcefile.split("/") filename = os.path.join(path, s[len(s)-1]) #self.status = "Download: " + str(self.download_active) + "," + filename + " from: " + sourcefile self.filename = filename #downloadPage(sourcefile, filename).addCallbacks(afterDownload, vdownloadFail, callbackArgs=(filename,True)) downloadPage(sourcefile, filename).addCallbacks(afterDownload, vdownloadFail) return filename
def createPictures(self, content): for entry in content: if entry[1]: image = entry[1].rsplit('/', 1)[1] if image not in self.pictures: image = os.path.join('/tmp/', image) downloadPage(entry[1], image)\ .addCallback(boundFunction(self.downloadFinished, image))\ .addErrback(boundFunction(self.downloadFailed, image))
def __init__(self, session, captchaCB, imagePath, destPath='/tmp/captcha.png'): self.session = session self.captchaCB = captchaCB self.destPath = destPath.encode('utf-8') imagePath = imagePath.encode('utf-8') if os.path.isfile(imagePath): self.openCaptchaDialog(imagePath) else: downloadPage(imagePath, destPath).addCallback(self.downloadCaptchaSuccess).addErrback(self.downloadCaptchaError)
def fillEpisodeList(self, mediaList): for x in mediaList: if x[5]: tmp_icon = self.getThumbnailName(x[5]) thumbnailFile = self.imagedir + tmp_icon if not os_path.exists(thumbnailFile): client.downloadPage(x[5], thumbnailFile) self.l.setList(mediaList) self.selectionChanged()
def main(): doc_num = 1103 doc_max = 1264 for no in range(doc_num, doc_max + 1): xls_url = "http://web3.moeaboe.gov.tw/ECW/populace/content/\ wHandStatistics_File.ashx?statistics_id={}&serial_no=2".format(no) dl_path = "/tmp/{}.xls".format(no) downloadPage(xls_url, dl_path).addBoth(stop) reactor.run() excel_to_csv(dl_path)
def get_xmlfile(self): if self.isServerOnline(): # xmlfile = "http://weather.yahooapis.com/forecastrss?w=%s&d=10&u=c" % config.plugins.yweather.weather_city.value xmlfile = 'http://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20weather.forecast%20where%20(woeid='+config.plugins.yweather.weather_city.value+'%20and%20u=%27C%27)&format=xml' # print "[YWeather] link: %s" % xmlfile downloadPage(xmlfile, "/tmp/yweather.xml").addCallback(self.downloadFinished).addErrback(self.downloadFailed) # reactor.run() else: self["text_now"].text = _('weatherserver not respond') self.notdata = True
def selectionChanged(self, direction): if len(self.movies) > 0: self.working = True self.selectedEntry += direction if self.selectedEntry < 0: self.selectedEntry = len(self.movies) - 1 elif self.selectedEntry > len(self.movies) - 1: self.selectedEntry = 0 downloadPage(self.pics[self.selectedEntry], self.pic).addCallback(self.downloadPicCallback).addErrback(self.downloadPicError) else: self.downloadListError()
def downloadPoster(self): ''' ''' printl("", self, "S") download_url = getTranscodeUrl("ArtPoster", self.selection, str(182), str(268)) #download_url = self.getPosterUrl() printl( "download url " + download_url, self, "D") downloadPage(str(download_url), getPictureData(self.selection, self.poster_postfix)).addCallback(lambda _: self.downloadCallback()) printl("", self, "C")
def startDownload(self): self["info"].setText(_("Downloading...")) try: # experimenters can put their URL here url = open('/tmp/buienradar.url', 'rb').readline().strip() except: # This looks okay though: # url = "http://buienradar.mobi/image.gif?k=1&l=0" url = "http://www.buienradar.nl/images.aspx?jaar=-3" print "[BR] URL='%s'" % url downloadPage(url, '/tmp/radar.gif').addCallbacks(self.afterDownload, self.downloadFail)
def downloadBackdrop(self): ''' ''' printl("", self, "S") download_url = getTranscodeUrl("ArtBackdrop", self.selection, str(450), str(260)) #download_url = self.getBackdropUrl() printl( "download url " + download_url, self, "D") downloadPage(download_url, getPictureData(self.selection, self.backdrop_postfix)).addCallback(lambda _: self.downloadCallback()) printl("", self, "C")
def parseWebpage(self, data, which, type, filename, title, url, season, episode): self.counting += 1 if not self.background: self.callback_infos("Cover(s): %s / %s - Scan: %s" % (str(self.counting), str(self.count), title)) if type == "movie": list = [] try: list = re.search('poster_path":"(.+?)".*?"original_title":"(.+?)"', str(data), re.S).groups(1) except: list = re.search('original_title":"(.+?)".*?"poster_path":"(.+?)"', str(data), re.S).groups(1) if list: self.guilist.append(((title, True, filename),)) purl = "http://image.tmdb.org/t/p/%s%s" % (str(config.movielist.cover.themoviedb_coversize.value), str(list[0].replace('\\',''))) downloadPage(purl, filename).addCallback(self.countFound).addErrback(self.dataErrorDownload) else: self.guilist.append(((title, False, filename),)) self.notfound += 1 if not self.background: self.callback_notfound(self.notfound) # get description if config.movielist.cover.getdescription.value: idx = [] idx = re.findall('"id":(.*?),', data, re.S) if idx: iurl = "http://api.themoviedb.org/3/movie/%s?api_key=8789cfd3fbab7dccf1269c3d7d867aff&language=de" % idx[0] getPage(iurl, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.getInfos, id, type, filename).addErrback(self.dataError) elif type == "serie": list = [] list = re.findall('<seriesid>(.*?)</seriesid>', data, re.S) if list: self.guilist.append(((title, True, filename),)) purl = "http://www.thetvdb.com/banners/_cache/posters/%s-1.jpg" % list[0] downloadPage(purl, filename).addCallback(self.countFound).addErrback(self.dataErrorDownload) else: self.notfound += 1 self.guilist.append(((title, False, filename),)) if not self.background: self.callback_notfound(self.notfound) # get description if config.movielist.cover.getdescription.value: if season and episode: iurl = "http://www.thetvdb.com/api/2AAF0562E31BCEEC/series/%s/default/%s/%s/de.xml" % (list[0], str(int(season)), str(int(episode))) getPage(iurl, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.getInfos, id, type, filename).addErrback(self.dataError) else: self.notfound += 1 if not self.background: self.callback_notfound(self.notfound) if not self.background: self.callback_menulist(self.guilist) self.checkDone()
def getIMDB(self): self.resetLabels() if self.eventName is "": s = self.session.nav.getCurrentService() info = s.info() event = info.getEvent(0) # 0 = now, 1 = next if event: self.eventName = event.getEventName() if self.eventName is not "": self["statusbar"].setText( _("Query IMDb: %s...") % (self.eventName)) event_quoted = urllib.quote( self.eventName.decode('utf8').encode('latin-1', 'ignore')) localfile = "/tmp/imdbquery.html" fetchurl = "http://" + self.IMDBlanguage + "imdb.com/find?q=" + event_quoted + "&s=tt&site=aka" print "[IMDB] Downloading Query " + fetchurl + " to " + localfile downloadPage(fetchurl, localfile).addCallback( self.IMDBquery).addErrback(self.fetchFailed) else: self["statusbar"].setText(_("Could't get Eventname"))
def testDownloadPageError3(self): # make sure failures in open() are caught too. This is tricky. # Might only work on posix. tmpfile = open("unwritable", "wb") tmpfile.close() os.chmod("unwritable", 0) # make it unwritable (to us) d = self.assertFailure( client.downloadPage(self.getURL("file"), "unwritable"), IOError) d.addBoth(self._cleanupDownloadPageError3) return d
def downloadToTempFile(url): ''' 传递一个URL,并返回一个Deferred对象用于下载完成时的回调 ''' filenum, tempfilename = tempfile.mkstemp() # 返回示例(6, '/var/folders/6y/kjgmpy6n1kl93r4tykvrcj1h0000gn/T/tmpkb_srsl5') # print(tempfilename) os.close(filenum) return client.downloadPage(url, tempfilename).addCallback( returnFilename, tempfilename)
def downloadBackdrop(self): ''' ''' printl("", self, "S") download_url = self.extraData["fanart_image"] printl("download url " + download_url, self, "D") if download_url == "" or download_url == "/usr/lib/enigma2/python/Plugins/Extensions/DreamPlex/resources/plex.png": printl("no pic data available", self, "D") else: printl("starting download", self, "D") downloadPage( download_url, getPictureData(self.details, self.image_prefix, self.backdrop_postfix)).addCallback( lambda _: self.showBackdrop()) printl("", self, "C")
def testDownloadPageError2(self): class errorfile: def write(self, data): pass def close(self): raise IOError("badness happened during close") ef = errorfile() return self.assertFailure(client.downloadPage(self.getURL("file"), ef), IOError)
def download(self, cover): from twisted.web import client from .SerienRecorderHelpers import toBinary path = self._tempDir + str(cover['id']) + '.jpg' print("[SerienRecorder] Temp cover path = " + path) if not fileExists(path): print("[SerienRecorder] Downloading cover %s => %s" % (cover['url'], path)) return client.downloadPage(toBinary(cover['url']), path) else: return True
def do_download(self, sourcefile, afterDownload, downloadFail): path = bigStorage(9000000, '/tmp', '/media/cf', '/media/mmc', '/media/usb', '/media/hdd') filename = os.path.join(path, 'epgimport') ext = os.path.splitext(sourcefile)[1] # Keep sensible extension, in particular the compression type if ext and len(ext) < 6: filename += ext sourcefile = sourcefile.encode('utf-8') sslcf = SNIFactory(sourcefile) if sourcefile.startswith('https:') else None print("[EPGImport] Downloading: " + sourcefile + " to local path: " + filename, file=log) if self.source.nocheck == 1: print("[EPGImport] Not cheching the server since nocheck is set for it: " + sourcefile, file=log) downloadPage(sourcefile, filename, contextFactory=sslcf).addCallbacks(afterDownload, downloadFail, callbackArgs=(filename, True)) return filename else: if self.checkValidServer(sourcefile) == 1: downloadPage(sourcefile, filename, contextFactory=sslcf).addCallbacks(afterDownload, downloadFail, callbackArgs=(filename, True)) return filename else: self.downloadFail("checkValidServer reject the server")
def downloadCover(self): try: os.remove(dir_tmp + 'original.jpg') except: pass size = [] stream_url = '' desc_image = '' if glob.currentchannelist: stream_url = glob.currentchannelist[glob.currentchannelistindex][3] desc_image = glob.currentchannelist[glob.currentchannelistindex][5] if stream_url != 'None': imagetype = "cover" size = [147, 220] if screenwidth.width() > 1280: size = [220, 330] if desc_image and desc_image != "n/A" and desc_image != "": temp = dir_tmp + 'temp.jpg' desc_image = desc_image.encode() try: downloadPage(desc_image, temp, timeout=3).addCallback(self.checkdownloaded, size, imagetype, temp) except: if desc_image.startswith('https'): desc_image = desc_image.replace('https', 'http') try: downloadPage(desc_image, temp, timeout=3).addCallback( self.checkdownloaded, size, imagetype, temp) except: pass self.loadDefaultImage() else: self.loadDefaultImage() else: self.loadDefaultImage()
def testDownloadPageError3(self): if not os.geteuid(): raise unittest.SkipTest('this does not work as root') # make sure failures in open() are caught too. This is tricky. # Might only work on posix. tmpfile = open("unwritable", "wb") tmpfile.close() os.chmod("unwritable", 0) # make it unwritable (to us) d = unittest.assertFailure( client.downloadPage(self.getURL("file"), "unwritable"), IOError) d.addBoth(self._cleanupDownloadPageError3) return d
def download_file(self, data): """ Helper function to download the module as a zip file. """ logger.debug("download_file data: {data}", data=data) download_uri = data['download_uri'] zip_file = data['zip_file'] logger.debug("getting uri: {download_uri} saving to:{zip_file}", download_uri=download_uri, zip_file=zip_file) d = downloadPage(data['download_uri'], data['zip_file']) return d
def updateMenu(self): self.tmplist = [] if len(self.mediaList) > 0: pos = 0 for x in self.mediaList: self.tmplist.append(MPanelEntryComponent(channel = x[self.UG_CHANNELNAME], text = (x[self.UG_PROGNAME] + '\n' + x[self.UG_PROGDATE] + '\n' + x[self.UG_SHORT_DESCR]), png = self.png)) tmp_icon = self.getThumbnailName(x) thumbnailFile = self.imagedir + tmp_icon self.pixmaps_to_load.append(tmp_icon) if not self.Details.has_key(tmp_icon): self.Details[tmp_icon] = { 'thumbnail': None} if x[self.UG_ICON] != '': if (os_path.exists(thumbnailFile) == True): self.fetchFinished(True, picture_id = tmp_icon, failed = False) else: if config.plugins.OpenUitzendingGemist.showpictures.value: client.downloadPage(x[self.UG_ICON], thumbnailFile).addCallback(self.fetchFinished, tmp_icon).addErrback(self.fetchFailed, tmp_icon) pos += 1 self["list"].setList(self.tmplist)
def executeFetch(self,uuid,modality): """Send HTTPS GET request for downloading trace file from a database service.""" try: global cachepath self.url = modality.datasource.url+"?"+urllib.urlencode(modality.datasource.queryparameters) self.extension=modality.datasource.extension self.filename=cachepath+'/'+str(uuid)+'.'+self.extension self.deferred = downloadPage(self.url,self.filename,contextFactory = self.contextFactory).addCallback(self.success,uuid,modality) tracelayer.log("HTTPSFileDownloadRequestHandler-executeFetch-timestamp:",str(modality.name)+" "+str("%0.20f" % time.time())+" "+str(uuid)) return defer.gatherResults([self.deferred]) except: traceback.print_exc()
def launch_webapp(self, request, handler, package, url, icon_url): def download_callback(result): log.msg('Icon saved: ~/.local/share/icons/%s.png' % package) icon_base_path = '%s/.local/share/icons' % os.getenv('HOME') if not os.path.exists(icon_base_path): os.makedirs(icon_base_path) downloadPage(str(icon_url), os.path.join(icon_base_path, '%s.png' % package), timeout=30).addCallback(download_callback) args = '' if self._is_guest() or self._is_live(): args = '--incognito' if os.path.exists('/usr/bin/jolicloud-webapps-engine'): self.launch( request, handler, 'jolicloud-webapps-engine %s --app=%s --icon-id=%s' % (args, str(url), str(package))) else: self.launch(request, handler, 'google-chrome %s --app=%s' % (args, str(url)))
def download_page_to_tmp(self, key): """""" url = benchmark_url_dict[key] tmpfd, tmpname = tempfile.mkstemp() os.close(tmpfd) self.benchmark_bandwidth[key]['start_time'] = time.time() defer_ = client.downloadPage(url, tmpname) defer_.addCallback(self.finish_page_download, key, tmpname) defer_.addErrback(self.handler_err) return defer_
def do_download(self, sourcefile, afterDownload, downloadFail): path = bigStorage(9000000, '/tmp', '/media/DOMExtender', '/media/cf', '/media/mmc', '/media/usb', '/media/hdd') filename = os.path.join(path, 'epgimport') ext = os.path.splitext(sourcefile)[1] # Keep sensible extension, in particular the compression type if ext and len(ext) < 6: filename += ext # sourcefile = sourcefile.encode('utf-8') sourcefile = str(sourcefile) print("[EPGImport] Downloading: " + str(sourcefile) + " to local path: " + str(filename), file=log) ip6 = sourcefile6 = None if has_ipv6 and version_info >= (2, 7, 11) and ((version.major == 15 and version.minor >= 5) or version.major >= 16): host = sourcefile.split('/')[2] # getaddrinfo throws exception on literal IPv4 addresses try: ip6 = getaddrinfo(host, 0, AF_INET6) sourcefile6 = sourcefile.replace(host, '[' + list(ip6)[0][4][0] + ']') except: pass if ip6: print("[EPGImport] Trying IPv6 first: " + str(sourcefile6), file=log) if sourcefile.startswith("https") and sslverify: parsed_uri = urlparse(sourcefile) domain = parsed_uri.hostname sniFactory = SNIFactory(domain) if pythonVer == 3: sourcefile6 = sourcefile6.encode() downloadPage(sourcefile6, filename, sniFactory, headers={'host': host}).addCallback(afterDownload, filename, True).addErrback(self.legacyDownload, afterDownload, downloadFail, sourcefile, filename, True) else: if pythonVer == 3: sourcefile6 = sourcefile6.encode() downloadPage(sourcefile6, filename, headers={'host': host}).addCallback(afterDownload, filename, True).addErrback(self.legacyDownload, afterDownload, downloadFail, sourcefile, filename, True) else: print("[EPGImport] No IPv6, using IPv4 directly: " + str(sourcefile), file=log) if sourcefile.startswith("https") and sslverify: parsed_uri = urlparse(sourcefile) domain = parsed_uri.hostname sniFactory = SNIFactory(domain) if pythonVer == 3: sourcefile = sourcefile.encode() downloadPage(sourcefile, filename, sniFactory).addCallbacks(afterDownload, downloadFail, callbackArgs=(filename, True)) else: if pythonVer == 3: sourcefile = sourcefile.encode() downloadPage(sourcefile, filename).addCallbacks(afterDownload, downloadFail, callbackArgs=(filename, True)) return filename
def downloadCover(self): try: os.remove(dir_tmp + 'original.jpg') except: pass size = [147, 220] if screenwidth.width() > 1280: size = [220, 330] if glob.currentchannelist: stream_url = glob.currentchannelist[glob.currentchannelistindex][3] desc_image = glob.currentchannelist[glob.currentchannelistindex][5] if stream_url != 'None': imagetype = "cover" if desc_image and desc_image != "n/A": temp = dir_tmp + 'temp.jpg' try: if desc_image.startswith("https") and sslverify: parsed_uri = urlparse(desc_image) domain = parsed_uri.hostname sniFactory = SNIFactory(domain) if pythonVer == 3: desc_image = desc_image.encode() downloadPage(desc_image, temp, sniFactory, timeout=3).addCallback( self.checkdownloaded, size, imagetype, temp) else: if pythonVer == 3: desc_image = desc_image.encode() downloadPage(desc_image, temp, timeout=3).addCallback( self.checkdownloaded, size, imagetype, temp) except: self.loadDefaultImage() else: self.loadDefaultImage()
def load_poster(self): global tmp_image if self.index == 'cat': descriptionX = self['menulist'].getCurrent()[0][4] print('description: ', descriptionX) self['text'].setText(descriptionX) else: self['text'].setText('') jp_link = self['menulist'].getCurrent()[0][2] tmp_image = jpg_store = '/tmp/filmon/poster.png' if tmp_image != None or idx != -1: pixmaps = six.ensure_binary(jp_link) print("debug: pixmaps:",pixmaps) print("debug: pixmaps:",type(pixmaps)) path = urlparse(pixmaps).path ext = splitext(path)[1] tmp_image = b'/tmp/posterx' + ext if file_exists(tmp_image): tmp_image = b'/tmp/posterx' + ext else: m = hashlib.md5() m.update(pixmaps) tmp_image = m.hexdigest() try: if pixmaps.startswith(b"https") and sslverify: parsed_uri = urlparse(pixmaps) domain = parsed_uri.hostname sniFactory = SNIFactory(domain) if PY3 == 3: pixmaps = pixmaps.encode() print('uurrll: ', pixmaps) downloadPage(pixmaps, tmp_image, sniFactory, timeout=5).addCallback(self.downloadPic, tmp_image).addErrback(self.downloadError) else: downloadPage(pixmaps, tmp_image).addCallback(self.downloadPic, tmp_image).addErrback(self.downloadError) except Exception as e: print(str(e)) print("Error: can't find file or read data") return
def load(self, url = None): if url is None: self.instance.setPixmap(None) return url = isinstance(url,unicode) and url.encode('utf-8') or url if os_isfile(url): self.tmpfile = url self.onLoadFinished(None) return tmpfile = ''.join((self.cachedir, quote_plus(url), '.jpg')) if os_path_isdir(self.cachedir) is False: print "cachedir not existing, creating it" os_mkdir(self.cachedir) if os_isfile(tmpfile): self.tmpfile = tmpfile self.onLoadFinished(None) elif url is not None: self.tmpfile = tmpfile agt = "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.0.2) Gecko/2008091620 Firefox/3.0.2" downloadPage(url,self.tmpfile, agent=agt).addCallback(self.onLoadFinished).addErrback(self.onLoadFailed) elif self.default: self.picload.startDecode(self.default)
def downloadPhoto(self, photo, thumbnail=False): if not photo: return cache = os.path.join(self.cache, 'thumb', photo.albumid.text) if thumbnail else os.path.join(self.cache, photo.albumid.text) try: os.makedirs(cache) except OSError: pass url = photo.media.thumbnail[0].url if thumbnail else photo.media.content[0].url filename = url.split('/')[-1] fullname = os.path.join(cache, filename) d = Deferred() # file exists, assume it's valid... if os.path.exists(fullname): reactor.callLater(0, d.callback, (fullname, photo)) else: downloadPage(six.ensure_binary(url), fullname).addCallbacks( lambda value: d.callback((fullname, photo)), lambda error: d.errback((error, photo))) return d
def maybe_download_ca_cert(self, ignored): """ :rtype: deferred """ path = self._get_ca_cert_path() if is_file(path): return defer.succeed('ca_cert_path_already_exists') uri = self._get_ca_cert_uri() mkdir_p(os.path.split(path)[0]) d = downloadPage(uri, path) d.addErrback(log.err) return d
def got_response(self, result): convert_from = convert_to = '' result = etree.fromstring(result) image_tag = result.find( './/{%s}%s' % (aws_ns, aws_image_size.get(self.image_size, 'large'))) if image_tag is not None: image_url = image_tag.findtext('{%s}URL' % aws_ns) if self.filename is None: d = client.getPage(image_url) else: _, file_ext = os.path.splitext(self.filename) if file_ext == '': _, image_ext = os.path.splitext(image_url) if image_ext != '': self.filename = ''.join((self.filename, image_ext)) else: _, image_ext = os.path.splitext(image_url) if image_ext != '' and file_ext != image_ext: #print "hmm, we need a conversion..." convert_from = image_ext convert_to = file_ext if len(convert_to): d = client.getPage(image_url) else: d = client.downloadPage(image_url, self.filename) d.addCallback(self.got_image, convert_from=convert_from, convert_to=convert_to) d.addErrback(self.got_error, image_url) else: if self._errcall is not None: if isinstance(self._errcall, tuple): if len(self._errcall) == 3: c, a, kw = self._errcall if not isinstance(a, tuple): a = (a, ) c(*a, **kw) if len(self._errcall) == 2: c, a = self._errcall if isinstance(a, dict): c(**a) else: if not isinstance(a, tuple): a = (a, ) c(*a) if len(self._errcall) == 1: c = self._errcall c() else: self._errcall()
def showDetails(self): try: self["ratinglabel"].show() self["detailslabel"].show() self["baseFilmInfo"].show() self["poster"].show() self["statusbar"].hide() self["menu"].hide() if self.resultlist and self.Page == 0: if not self.unikatni: self.link = self["menu"].getCurrent()[1] self.nazevkomplet = self["menu"].getCurrent()[0] self.unikatni = False self["statusbar"].setText("Downloading movie information: '%s'" % (self.link)) localfile = os.path.join(config.plugins.archivCZSK.tmpPath.value, "archivCSFDquery2.html") fetchurl = "https://www.csfd.cz/film/" + self.link + "/komentare/?all=1" + str(randint(1000, 9999)) downloadPage(fetchurl,localfile).addCallback(self.CSFDquery2).addErrback(self.fetchFailed) self["menu"].hide() self.resetLabels() self.setTitle(self.nazevkomplet) #self["titlelabel"].show() self.Page = 1 if self.Page == 2: #self["titlelabel"].show() self["extralabel"].hide() self["poster"].show() if self.ratingstars > 0: self["starsbg"].show() self["stars"].show() self["stars"].setValue(self.ratingstars) self.Page = 1 except: self["statusbar"].show() self["statusbar"].setText("Fatal ERROR") log.logError("Action showDetails failed.\n%s"%traceback.format_exc())
def downloadThumbnail(self, thumbUrl): if thumbUrl is not None: thumbID = thumbUrl.rsplit("/", 1)[1] thumbFile = None if not thumbUrl.startswith("http://"): thumbUrl = "%s%s" % (MAIN_PAGE, thumbUrl) try: req = Request(thumbUrl) url_handle = urlopen2(req) headers = url_handle.info() contentType = headers.getheader("content-type") except: contentType = None if contentType: if 'image/jpeg' in contentType: thumbFile = "/tmp/" + thumbID + ".jpg" elif 'image/gif' in contentType: thumbID = None # thumbFile = "/tmp/" + thumbID + ".gif" elif 'image/png' in contentType: thumbFile = "/tmp/" + thumbID + ".png" else: print("[ZDF Mediathek] Unknown thumbnail content-type:", contentType) if thumbFile is not None: if (os_path.exists(thumbFile) == True): #already downloaded self.downloadThumbnailCallback(None, thumbFile, thumbID) else: if self.png_cache.get(thumbID, None) is None: downloadPage(six.ensure_binary(thumbUrl), thumbFile).addCallback( self.downloadThumbnailCallback, thumbFile, thumbID).addErrback( self.downloadThumbnailError, thumbID) else: self.updateEntry(thumbID, thumbFile)
def loadPic(self, sTitle=None): try: self.stimer.stop() self.stimer.callback.remove(self.slideshow) except: pass if sTitle is None or sTitle.strip() == '': try: streamPic = self.playlist[self.playIdx][2] except: streamPic = None if streamPic is None or streamPic == '': if os.path.exists(PLUGIN_PATH + '/skin/micons/TuneinRadio.png'): cover = PLUGIN_PATH + '/skin/micons/TuneinRadio.png' os.system('cp ' + cover + '/tmp/cover.jpg') copyfile(cover, '/tmp/cover.jpg') self.ShowCover(streamPic) else: downloadPage(streamPic, '/tmp/cover.jpg').addCallback( self.ShowCover).addErrback(self.showerror) else: sTitle = sTitle.replace('\n', ' ') sTitle = sTitle.replace('-', ' ') sTitle = sTitle[:50] sTitle = sTitle.replace(' ', '+') gimage_url = 'https://www.google.co.in/search?q=' + sTitle + '&source=lnms&tbm=isch' print 'gimage_urlxxx', gimage_url, sTitle self.gimage_url = gimage_url self.all_images = [] link, self.all_images = getfirst_image(gimage_url, self.imageindex) if link is None: return self.ShowCover2(link) self.startslideshow() return
def downloadBackdrop(self): ''' ''' printl("", self, "S") download_url = self.extraData["fanart_image"] download_url = download_url.replace('&width=560&height=315', '&width=1280&height=720') #http://192.168.45.190:32400/photo/:/transcode?url=http%3A%2F%2Flocalhost%3A32400%2Flibrary%2Fmetadata%2F6209%2Fart%2F1354571799&width=560&height=315' printl("download url " + download_url, self, "D") if download_url == "" or download_url == "/usr/lib/enigma2/python/Plugins/Extensions/DreamPlex/resources/plex.png": printl("no pic data available", self, "D") else: printl("starting download", self, "D") downloadPage( download_url, getPictureData(self.details, self.image_prefix, self.backdrop_postfix)).addCallback( lambda _: self.showBackdrop()) printl("", self, "C")
def getIMDB(self): self.resetLabels() if not self.eventName: s = self.session.nav.getCurrentService() info = s and s.info() event = info and info.getEvent(0) # 0 = now, 1 = next if event: self.eventName = event.getEventName() if self.eventName: self["statusbar"].setText( _("Query IMDb: %s...") % (self.eventName)) event_quoted = quoteEventName(self.eventName) localfile = "/tmp/imdbquery.html" fetchurl = "http://imdb.com/find?q=" + event_quoted + "&s=tt&site=aka" #if self.IMDBlanguage: # fetchurl = "http://" + self.IMDBlanguage + "imdb.com/find?q=" + event_quoted + "&s=tt&site=aka" #else: # fetchurl = "http://akas.imdb.com/find?s=tt;mx=20;q=" + event_quoted print("[IMDB] Downloading Query " + fetchurl + " to " + localfile) downloadPage(fetchurl, localfile).addCallback( self.IMDBquery).addErrback(self.fetchFailed) else: self["statusbar"].setText(_("Could't get Eventname"))
def do_download(self, sourcefile, afterDownload, downloadFail): path = bigStorage(9000000, '/tmp', '/media/DOMExtender', '/media/cf', '/media/mmc', '/media/usb', '/media/hdd') filename = os.path.join(path, 'epgimport') ext = os.path.splitext(sourcefile)[1] # Keep sensible extension, in particular the compression type if ext and len(ext) < 6: filename += ext sourcefile = sourcefile.encode('utf-8') print>>log, "[EPGImport] Downloading: " + sourcefile + " to local path: " + filename host = sourcefile.split("/")[2] ip6 = None try: ip6 = getaddrinfo(host,0, AF_INET6) except: pass if ip6 and has_ipv6 and version_info >= (2,7,11) and ((version.major == 15 and version.minor >= 5) or version.major >= 16): sourcefile6 = sourcefile.replace(host,"[" + list(ip6)[0][4][0] + "]") print>>log, "[EPGImport] Trying IPv6 first: " + sourcefile6 downloadPage(sourcefile6, filename, headers={'host': host}).addCallback(afterDownload, filename, True).addErrback(self.legacyDownload, afterDownload, downloadFail, sourcefile, filename, True) else: print>>log, "[EPGImport] No IPv6, using IPv4 directly: " + sourcefile downloadPage(sourcefile, filename).addCallbacks(afterDownload, downloadFail, callbackArgs=(filename,True)) return filename
def loadFromUrl(self, url, destPath): def loadSuccess(callback=None): self.__currentUrl = destPath self.picload.startDecode(destPath) def loadFailed(failure): failure.printException() if self.instance: self.load(self.default) agent = "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.0.2) Gecko/2008091620 Firefox/3.0.2" d = downloadPage(url, destPath, agent=agent) d.addCallback(loadSuccess) d.addErrback(loadFailed)
def test_downloadTimeout(self): """ If the timeout indicated by the C{timeout} parameter to L{client.HTTPDownloader.__init__} elapses without the complete response being received, the L{defer.Deferred} returned by L{client.downloadPage} fires with a L{Failure} wrapping a L{defer.TimeoutError}. """ self.cleanupServerConnections = 2 # Verify the behavior if no bytes are ever written. first = client.downloadPage( self.getURL("wait"), self.mktemp(), timeout=0.01) # Verify the behavior if some bytes are written but then the request # never completes. second = client.downloadPage( self.getURL("write-then-wait"), self.mktemp(), timeout=0.01) return defer.gatherResults([ self.assertFailure(first, defer.TimeoutError), self.assertFailure(second, defer.TimeoutError)])
def _download(self, charm_url, cache_path): url = "%s/charm/%s" % (self.url_base, urllib.quote(charm_url.path)) downloads = os.path.join(self.cache_path, "downloads") _makedirs(downloads) f = tempfile.NamedTemporaryFile(prefix=_cache_key(charm_url), suffix=".part", dir=downloads, delete=False) f.close() downloading_path = f.name try: yield downloadPage(url, downloading_path) except Error: raise CharmNotFound(self.url_base, charm_url) os.rename(downloading_path, cache_path)