def get_reader_updater_pkg_url(self, major_version): '''Returns download URL for Adobe Reader Updater DMG''' try: url_handle = urlopen(AR_UPDATER_BASE_URL + AR_MANIFEST_TEMPLATE % major_version) version_string = url_handle.read() url_handle.close() except Exception as err: raise ProcessorError("Can't open manifest template: %s" % (err)) os_maj, os_min = self.env["os_version"].split(".") version_string = version_string.replace(AR_MAJREV_IDENTIFIER, major_version) version_string = version_string.replace(OSX_MAJREV_IDENTIFIER, os_maj) version_string = version_string.replace(OSX_MINREV_IDENTIFIER, os_min) version_string = version_string.replace(AR_PROD_IDENTIFIER, AR_PROD) version_string = version_string.replace(AR_PROD_ARCH_IDENTIFIER, AR_PROD_ARCH) try: url_handle = urlopen(AR_UPDATER_BASE_URL + version_string) plist = plistlib.readPlistFromString(url_handle.read()) url_handle.close() except Exception as err: raise ProcessorError("Can't get or read manifest: %s" % (err)) url = AR_UPDATER_DOWNLOAD_URL2 + plist['PatchURL'] return url
def get_reader_updater_dmg_url(self, major_version): '''Returns download URL for Adobe Reader Updater DMG''' try: url_handle = urlopen(AR_UPDATER_BASE_URL + AR_URL_TEMPLATE % major_version) version_string = url_handle.read() url_handle.close() except Exception as err: raise ProcessorError("Can't open URL template: %s" % (err)) os_maj, os_min = self.env["os_version"].split(".") version_string = version_string.replace(AR_MAJREV_IDENTIFIER, major_version) version_string = version_string.replace(OSX_MAJREV_IDENTIFIER, os_maj) version_string = version_string.replace(OSX_MINREV_IDENTIFIER, os_min) try: url_handle = urlopen(AR_UPDATER_BASE_URL + version_string) version = url_handle.read() url_handle.close() except Exception as err: raise ProcessorError("Can't get version string: %s" % (err)) versioncode = version.replace('.', '') url = AR_UPDATER_DOWNLOAD_URL % (major_version, version, versioncode) return (url, version)
def get_lightroom_dmg_url(self, check_url, major_version, platform, lang): """Get the .dmg url Go to the product page, which provides a link to the download page. Then the download page has to be scraped for the dmg link. """ product_url = LIGHTROOM_PRODUCT_URL % (major_version, platform, lang) try: url_handle = urlopen(product_url) html_response = url_handle.read() url_handle.close() except Exception as e: raise ProcessorError( "Can't get Lightroom product page using url %s" % product_url) # Match the link to the download page download_url_match = re.compile(RE_HTML_DOWNLOAD_URL).search( html_response) if download_url_match: download_url = download_url_match.group(1) else: raise ProcessorError( "Can't get Lightroom download URL from product site.") download_url_fqdn = ADOBE_DOWNLOAD_PREFIX + '/' + unquote( download_url).replace('&', '&') # ok now we have to get the href of the ACTUAL dmg from the thankyou page, so time to request the download page self.output('Request download page: %s' % download_url_fqdn) try: url_handle = urlopen(download_url_fqdn) html_response = url_handle.read() url_handle.close() except Exception as e: raise ProcessorError( "Can't get Lightroom download page using url %s" % download_url_fqdn) dmg_url_match = re.compile(RE_HTML_DOWNLOAD_URL).search(html_response) if dmg_url_match: dmg_url = dmg_url_match.group(1) else: raise ProcessorError("Can't get Lightroom dmg link") return dmg_url
def get_xmind_url(self, base_url): try: url = urlopen(base_url).read() return url except BaseException as err: raise Exception("Can't read %s: %s" % (base_url, err))
def get_video_info(self, video_url): html_doc = '' video_info = {} try: logging.info(video_url) html_doc = urllib.urlopen(video_url).read() except: logging.error('Unable to connect to server.Please try again after using a proxy.') video_info = self.parser_video_info_html(html_doc) video_info['URL'] = video_url return video_info
def get_url_response(self, url): '''Get data from a url''' #pylint: disable=no-self-use try: url_handle = urlopen(url) response = url_handle.read() url_handle.close() except Exception as err: raise ProcessorError( "Can't read response from URL %s: %s" % (url, err)) return response
def get_version(self, FEED_URL): """Parse the macadmins.software/versions.xml feed for the latest O365 version number""" try: raw_xml = urlopen(FEED_URL) xml = raw_xml.read() except Exception as e: raise ProcessorError("Can't download %s: %s" % (FEED_URL, e)) root = ET.fromstring(xml) latest = root.find('latest') for vers in root.iter('latest'): version = vers.find('o365').text return version
def xhr_release_info(self, product_code): """Request release information from JetBrains XHR Endpoint""" url = RELEASE_XHR_ENDPOINT.format(product_code, "123123123") try: handle = urlopen(url) response = handle.read() handle.close() except Exception as e: raise ProcessorError( "Cannot retrieve product information from JetBrains") product_info = json.loads(response) return product_info[product_code][0]
def get_video_url_list(self): logging.info('Start get video list.') self.is_last_page = False html_doc = '' video_url_list = [] while self.is_last_page == False: try: logging.info(self.next_page_url) html_doc = urllib.urlopen(self.next_page_url).read() except: logging.error('Unable to connect to server.Please try again after using a proxy.') video_url_list += self.parser_video_url_list_html(html_doc) logging.info('Total ' + str(len(video_url_list)) + ' video url.') return video_url_list
def getLatestFilemakerInstaller(self): version_str = self.env.get("major_version") try: f = urlopen(UPDATE_FEED) data = f.read() f.close() except Exception as e: raise ProcessorError("Can't get to Filemaker Updater feed: %s" % e) metadata = json.loads(data) # extract all the Mac updates mac_updates = self.extractMacUpdates(metadata) mac_updates = self.filterOutServerUpdates(mac_updates) mac_updates = self.extractMajorUpdates(mac_updates) update = self.findLatestUpdate(mac_updates) return update
def get_reader_updater_dmg_url(self, major_version): '''Returns download URL for Adobe Reader Updater DMG''' try: url_handle = urlopen(AR_UPDATER_BASE_URL + AR_URL_TEMPLATE % major_version) version_string = url_handle.read() url_handle.close() except Exception as err: raise ProcessorError("Can't open URL template: %s" % (err)) version_string = version_string.replace(AR_MAJREV_IDENTIFIER, major_version) versioncode = version_string.replace('.', '') readerversion = "AcroRdrDC" url = AR_UPDATER_DOWNLOAD_URL % (major_version, versioncode, readerversion, versioncode) return (url, version_string)
def main(self): """Return a download URL for a PuppetLabs item""" download_url = DL_INDEX prod = self.env["product_name"] if prod == 'agent': os_version = self.env.get("get_os_version", OS_VERSION) version_re = r"\d+\.\d+\.\d+" # e.g.: 10.10/PC1/x86_64/puppet-agent-1.2.5-1.osx10.10.dmg download_url += str('/' + os_version + "/PC1/x86_64") re_download = ("href=\"(puppet-agent-(%s)-1.osx(%s).dmg)\"" % (version_re, os_version)) else: # look for "product-1.2.3.dmg" # skip anything with a '-' following the version no. ('rc', etc.) version_re = self.env.get("get_version") if not version_re or version_re == DEFAULT_VERSION: version_re = r"\d+[\.\d]+" re_download = ("href=\"(%s-(%s)+.dmg)\"" % (self.env["product_name"].lower(), version_re)) try: data = urlopen(download_url).read() except Exception as err: raise ProcessorError( "Unexpected error retrieving download index: '%s'" % err) # (dmg, version) candidates = re.findall(re_download, data) if not candidates: raise ProcessorError( "Unable to parse any products from download index.") # sort to get the highest version highest = candidates[0] if len(candidates) > 1: for prod in candidates: if LooseVersion(prod[1]) > LooseVersion(highest[1]): highest = prod ver, url = highest[1], "%s/%s" % (download_url, highest[0]) self.env["version"] = ver self.env["url"] = url self.output("Found URL %s" % self.env["url"])
def main(self): '''Find the download URL''' def compare_version(this, that): '''compare LooseVersions''' return cmp(LooseVersion(this), LooseVersion(that)) prod = self.env.get("product_name") if prod not in URLS: raise ProcessorError( "product_name %s is invalid; it must be one of: %s" % (prod, ', '.join(URLS))) url = URLS[prod] try: manifest_str = urlopen(url).read() except Exception as err: raise ProcessorError( "Unexpected error retrieving product manifest: '%s'" % err) try: plist = plistlib.readPlistFromString(manifest_str) except Exception as err: raise ProcessorError( "Unexpected error parsing manifest as a plist: '%s'" % err) entries = plist.get("SUFeedEntries") if not entries: raise ProcessorError( "Expected 'SUFeedEntries' manifest key wasn't found.") sorted_entries = sorted( entries, key=itemgetter("SUFeedEntryShortVersionString"), cmp=compare_version) metadata = sorted_entries[-1] url = metadata["SUFeedEntryDownloadURL"] min_os_version = metadata["SUFeedEntryMinimumSystemVersion"] version = metadata["SUFeedEntryShortVersionString"] self.env["version"] = version self.env["minimum_os_version"] = min_os_version self.env["url"] = url self.output("Found URL %s" % self.env["url"])
def get_praat_dmg_url(self, base_url): """Find the download URL in the HTML returned from the base_url""" arch = self.env.get('arch_edition', '32') re_praat_dmg = re.compile(PRAAT_DMG_RE.format(arch)) # Read HTML index. try: fref = urlopen(base_url) html = fref.read() fref.close() except Exception as err: raise ProcessorError("Can't download %s: %s" % (base_url, err)) # Search for download link. match = re_praat_dmg.search(html) if not match: raise ProcessorError("Couldn't find Praat download URL in %s" % base_url) # Return URL. url = PRAAT_BASE_URL.rsplit("/", 1)[0] + "/" + match.group("url") return url
def get_lightroom_update_info(self, check_url, major_version, lang): """Request lightroom updates LUA script which contains the download URL and localised update string Currently unused """ check_url_version = check_url % major_version try: url_handle = urlopen(check_url_version) lua_response = url_handle.read() url_handle.close() except Exception as e: raise ProcessorError( "Can't get Lightroom update information for version %s, using check url %s" % (major_version, check_url_version)) v_match = re.compile(RE_VERSION, re.MULTILINE).search(lua_response) if v_match: version = v_match.group(1) self.output("Got version %s" % version) else: self.output("Could not find version string on update url") product_url_match = re.compile(RE_PRODUCT_URL).search(lua_response) if product_url_match: product_url = product_url_match.group(1) self.output("Got product url %s" % product_url) else: raise ProcessorError( "Can't get Lightroom product URL from update text.") description_match = re.compile(RE_LOCAL_DESCRIPTION % lang, re.MULTILINE).search(lua_response) if description_match: update_description = description_match.group(1) self.output("Got update description %s" % update_description) else: self.output("Could not find update description")
def get_meta_plist(self, product_name): url = BASE_URL + product_name + ".plist" try: urlfd = urlopen(url) plist_data = urlfd.read() urlfd.close() except Exception as e: raise ProcessorError( "Could not download HairerSoft metadata plist, error: %s" % e) httpcode = urlfd.getcode() if httpcode != 200: raise ProcessorError( "Got HTTP error %s trying to download URL %s" % (httpcode, url)) try: plist = plistlib.readPlistFromString(plist_data) except Exception as e: raise ProcessorError("Error parsing metadata plist! Error: %s" % e) return plist
def get_url(self, version_series): try: fusion_url = FUSION_URL_BASE + '/fusion.xml' f = urlopen(fusion_url) fusion_xml = f.read() f.close() except Exception as e: raise ProcessorError('Could not retrieve XML feed %s' % fusion_url) build_re = re.compile(r'^fusion\/([\d\.]+)\/(\d+)\/') last_build_no = 0 last_url_part = None fusion_feed = parseString(fusion_xml) for i in fusion_feed.getElementsByTagName('metadata'): productId = i.getElementsByTagName( 'productId')[0].firstChild.nodeValue version = i.getElementsByTagName('version')[0].firstChild.nodeValue url = i.getElementsByTagName('url')[0].firstChild.nodeValue if productId == 'fusion' and version == version_series: match = build_re.search(url) if match: build_ver = match.group(1) build_no = match.group(2) url_part = match.group(0) if build_no > last_build_no: last_build_no = build_no last_url_part = url_part if last_url_part: return FUSION_URL_BASE + last_url_part + DARWIN_TOOLS_URL_APPEND else: raise ProcessorError('Could not find suitable version/build')
def main(self): prod = self.env.get("product_name") if prod not in URLS: raise ProcessorError( "product_name %s is invalid; it must be one of: %s" % (prod, ', '.join(URLS))) url = URLS[prod] valid_plats = PLATS plat = self.env.get("platform_name") if plat not in valid_plats: raise ProcessorError( "platform_name %s is invalid; it must be one of: %s" % (plat, valid_plats)) try: self.env["object"] = urlopen(url).read() except BaseException as err: raise ProcessorError( "Unexpected error retrieving product manifest: '%s'" % err) substring_version = '<li>Version: ([0-9]+[.]?[0-9]*)' substring_buildid = 'Build id: <a href="/build_history.html#[0-9]{10}">([0-9]{10})</a>' version = re.search(substring_version, self.env["object"]) buildid = re.search(substring_buildid, self.env["object"]) self.env["version"] = version.group(1) self.env["buildid"] = buildid.group(1) if plat == 'Eclipse': download_url = ( "https://www.oxygenxml.com/InstData/%s/%s/com.oxygenxml.%s_%s.0.v%s.zip" % (prod, plat, prod.lower(), self.env["version"], self.env["buildid"])) elif prod == 'web-author': download_url = "http://mirror.oxygenxml.com/InstData/WebAuthor/All/oxygenxml-web-author-all-platforms.zip" elif prod == 'WebHelp': download_url = "https://www.oxygenxml.com/InstData/Editor/Webhelp/oxygen-webhelp.zip" elif prod == 'Editor': if plat == 'Windows64': download_url = ( "http://mirror.oxygenxml.com/InstData/%s/%s/VM/oxygen-64bit.exe" % (prod, plat)) elif plat == 'Linux64': download_url = ( "https://www.oxygenxml.com/InstData/%s/%s/VM/oxygen-64bit.sh" % (prod, plat)) elif plat == 'MacOSX': download_url = ( "https://www.oxygenxml.com/InstData/%s/%s/VM/oxygen.dmg" % (prod, plat)) else: download_url = ( "http://mirror.oxygenxml.com/InstData/%s/%s/VM/oxygen.tar.gz" % (prod, plat)) else: if plat == 'Windows64': download_url = ( "http://mirror.oxygenxml.com/InstData/%s/%s/VM/oxygen%s-64bit.exe" % (prod, plat, prod)) elif plat == 'Linux64': download_url = ( "https://www.oxygenxml.com/InstData/%s/%s/VM/oxygen%s-64bit.sh" % (prod, plat, prod)) elif plat == 'MacOSX': download_url = ( "https://www.oxygenxml.com/InstData/%s/%s/VM/oxygen%s.dmg" % (prod, plat, prod)) else: download_url = ( "http://mirror.oxygenxml.com/InstData/%s/%s/VM/oxygen%s.tar.gz" % (prod, plat, prod)) self.env["url"] = download_url self.env["filename"] = re.search(r'/([0-9a-zA-Z-_.]*$)', download_url).group(1) self.output("Found Version %s" % self.env["version"]) self.output("Found Build id %s" % self.env["buildid"]) self.output("Use URL %s" % self.env["url"]) self.output("Use filename %s" % self.env["filename"])
def internet_on(): response=urllib.urlopen('http://74.125.113.99') if response == 404 : return False else : return False
def internet_on(): response = urllib.urlopen('http://74.125.113.99') if response == 404: return False else: return False