def get_fedora_releases(): releases = [] html = urlread(FEDORA_RELEASES) for release in re.findall(r'<a href="(\d+)/">', html)[-2:][::-1]: for arch in ARCHES: arch_url = FEDORA_RELEASES + '%s/Live/%s/' % (release, arch) try: files = urlread(arch_url) except URLGrabError: continue for link in re.findall(r'<a href="(.*)">', files): if link.endswith('-CHECKSUM'): checksum = urlread(arch_url + link) for line in checksum.split('\n'): try: sha256, filename = line.split() if filename[0] != '*': continue filename = filename[1:] chunks = filename[:-4].split('-') chunks.remove('Live') name = ' '.join(chunks) releases.append(dict( name = name, url = arch_url + filename, sha256 = sha256, )) except ValueError: pass return releases
def getfips(self): #skip header row, go through rest of data #This block of code attaches FIPS to addresses for rownum in range(self.sh.nrows)[1:self.sh.nrows]: address = self.sh.row_values(rownum)[self.addressfield] + "," # Hard coding in Massachusetts! city = self.sh.row_values(rownum)[self.cityfield] + ", Ma" zipcode = self.sh.row_values(rownum)[self.zipfield] buildurl = 'http://rpc.geocoder.us/service/csv?address=' + address + '+' + city + '+' + zipcode # get rid of ridiculous unicode nonbreaking spaces and all spaces buildurl = buildurl.replace(u'\xa0', u'').replace(' ', '+') # switch type to string burlstr = buildurl.encode('ascii', 'ignore') print burlstr outp = urlgrabber.urlread(burlstr) # If address not resolved, skip it, assign 999999 tract code: if outp != "2: couldn't find this address! sorry": lat = outp.split(",")[0] lon = outp.split(",")[1] buildcensurl = 'http://data.fcc.gov/api/block/2010/find?latitude=' + lat + '&longitude=' + lon outblock = urlgrabber.urlread(buildcensurl) e = ET.fromstring(outblock) block = e.find('{http://data.fcc.gov/api}Block') fipstract = block.attrib['FIPS'][0:11] else: fipstract = '99999999999' self.fipslist.append(fipstract)
def _weather_get(self, city, raw=False): url = 'http://api.openweathermap.org/data/2.5/weather?{}={}' if all(c in '0123456789' for c in city): try: resp = urlgrabber.urlread(url.format('zip', city), size=2097152*10) except urlgrabber.grabber.URLGrabError: resp = 'Failed to fetch weather for {}'.format(repr(city)) else: try: resp = urlgrabber.urlread(url.format('q', self._weather_parse_city(city)), size=2097152*10) except urlgrabber.grabber.URLGrabError: resp = 'Failed to fetch weather for {}'.format(repr(city)) if raw: return resp try: json_data = json.loads(resp) return 'Current weather for {city}: {desc}, low:{low:.1f} high:{cur:.1f} currently:{high:.1f}'.format( city=json_data['name'], desc=json_data['weather'][0]['description'], low=self._weather_convert(json_data['main']['temp_min']), cur=self._weather_convert(json_data['main']['temp']), high=self._weather_convert(json_data['main']['temp_max']), ) except (KeyError, ValueError): return 'API error for {}: {}'.format(repr(city), resp)
def get_fedora_releases(): releases = [] html = urlread(FEDORA_RELEASES) for release in re.findall(r'<a href="(\d+)/">', html)[-2:][::-1]: for arch in ARCHES: arch_url = FEDORA_RELEASES + '%s/Live/%s/' % (release, arch) files = urlread(arch_url) for link in re.findall(r'<a href="(.*)">', files): if link.endswith('-CHECKSUM'): checksum = urlread(arch_url + link) for line in checksum.split('\n'): try: sha256, filename = line.split() if filename[0] != '*': continue filename = filename[1:] chunks = filename[:-4].split('-') chunks.remove('Live') name = ' '.join(chunks) releases.append(dict( name = name, url = arch_url + filename, sha256 = sha256, )) except ValueError: pass return releases
def getfips(self): #skip header row, go through rest of data #This block of code attaches FIPS to addresses for rownum in range(self.sh.nrows)[1:self.sh.nrows]: address=self.sh.row_values(rownum)[self.addressfield]+"," # Hard coding in Massachusetts! city=self.sh.row_values(rownum)[self.cityfield]+", Ma" zipcode=self.sh.row_values(rownum)[self.zipfield] buildurl='http://rpc.geocoder.us/service/csv?address='+address+'+'+city+'+'+zipcode # get rid of ridiculous unicode nonbreaking spaces and all spaces buildurl=buildurl.replace(u'\xa0', u'').replace(' ','+') # switch type to string burlstr=buildurl.encode('ascii','ignore') print burlstr outp=urlgrabber.urlread(burlstr) # If address not resolved, skip it, assign 999999 tract code: if outp != "2: couldn't find this address! sorry": lat = outp.split(",")[0] lon = outp.split(",")[1] buildcensurl = 'http://data.fcc.gov/api/block/2010/find?latitude='+lat+'&longitude='+lon outblock = urlgrabber.urlread(buildcensurl) e = ET.fromstring(outblock) block = e.find('{http://data.fcc.gov/api}Block') fipstract = block.attrib['FIPS'][0:11] else: fipstract='99999999999' self.fipslist.append(fipstract)
def create_profile(self, distro_name): """ Create a test profile with random name associated with the given distro. Returns a tuple of profile ID and name. """ profile_name = "%s%s" % (TEST_PROFILE_PREFIX, random.randint( 1, 1000000)) profile_id = self.api.new_profile(self.token) self.api.modify_profile(profile_id, "name", profile_name, self.token) self.api.modify_profile(profile_id, "distro", distro_name, self.token) self.api.modify_profile(profile_id, "kickstart", FAKE_KICKSTART, self.token) self.api.modify_profile(profile_id, "kopts", { "dog": "fido", "cat": "fluffy" }, self.token) self.api.modify_profile(profile_id, "kopts-post", { "phil": "collins", "steve": "hackett" }, self.token) self.api.modify_profile(profile_id, "ksmeta", "good=sg1 evil=gould", self.token) self.api.modify_profile(profile_id, "breed", "redhat", self.token) self.api.modify_profile(profile_id, "owners", "sam dave", self.token) self.api.modify_profile(profile_id, "mgmt-classes", "blip", self.token) self.api.modify_profile(profile_id, "comment", "test profile", self.token) self.api.modify_profile(profile_id, "redhat_management_key", "1-ABC123", self.token) self.api.modify_profile(profile_id, "redhat_management_server", "mysatellite.example.com", self.token) self.api.modify_profile(profile_id, "virt_bridge", "virbr0", self.token) self.api.modify_profile(profile_id, "virt_cpus", "2", self.token) self.api.modify_profile(profile_id, "virt_file_size", "3", self.token) self.api.modify_profile(profile_id, "virt_path", "/opt/qemu/%s" % profile_name, self.token) self.api.modify_profile(profile_id, "virt_ram", "1024", self.token) self.api.modify_profile(profile_id, "virt_type", "qemu", self.token) self.api.save_profile(profile_id, self.token) self.cleanup_profiles.append(profile_name) # Check cobbler services URLs: url = "http://%s/cblr/svc/op/ks/profile/%s" % (cfg['cobbler_server'], profile_name) data = urlgrabber.urlread(url) self.assertEquals(FAKE_KS_CONTENTS, data) url = "http://%s/cblr/svc/op/list/what/profiles" % cfg['cobbler_server'] data = urlgrabber.urlread(url) self.assertNotEquals(-1, data.find(profile_name)) return (profile_id, profile_name)
def get_fedora_releases(): global releases fedora_releases = [] try: html = urlread(PUB_URL) versions = re.findall(r'<a href="(\d+)/">', html) latest = sorted([int(v) for v in versions], reverse=True)[0:2] for release in latest: if release >= 21: products = ('Workstation', 'Server', 'Cloud', 'Live', 'Spins') else: products = ('Live', 'Spins') for product in products: for arch in ARCHES: baseurl = PUB_URL if product == 'Live': isodir = '/' elif product == 'Spins': baseurl = ALT_URL isodir = '/' else: isodir = '/iso/' arch_url = baseurl + '%s/%s/%s%s' % (release, product, arch, isodir) print(arch_url) try: files = urlread(arch_url) except URLGrabError: continue for link in re.findall(r'<a href="(.*)">', files): if link.endswith('-CHECKSUM'): print('Reading %s' % arch_url + link) checksum = urlread(arch_url + link) for line in checksum.split('\n'): try: sha256, filename = line.split() if filename[0] != '*': continue filename = filename[1:] name = filename.replace('.iso', '') fedora_releases.append( dict( name=name, url=arch_url + filename, sha256=sha256, )) except ValueError: pass releases = fedora_releases except: traceback.print_exc() return releases
def GET(self): web.header("Pragma", "no-cache") web.header("Cache-Control", "no-cache") self.restrict_access() input = web.input(href='') if input.href: if debug: web.debug('opening ' + input.href) if input.href.find('url=file') < 0: webbrowser.open_new_tab(input.href) else: urlgrabber.urlread(input.href)
def get_fedora_releases(): global releases fedora_releases = [] try: html = urlread(PUB_URL) versions = re.findall(r'<a href="(\d+)/">', html) latest = sorted([int(v) for v in versions], reverse=True)[0:2] for release in latest: if release >= 21: products = ('Workstation', 'Server', 'Cloud', 'Live', 'Spins') else: products = ('Live', 'Spins') for product in products: for arch in ARCHES: baseurl = PUB_URL if product == 'Live': isodir = '/' elif product == 'Spins': baseurl = ALT_URL isodir = '/' else: isodir = '/iso/' arch_url = baseurl + '%s/%s/%s%s' % (release, product, arch, isodir) print(arch_url) try: files = urlread(arch_url) except URLGrabError: continue for link in re.findall(r'<a href="(.*)">', files): if link.endswith('-CHECKSUM'): print('Reading %s' % arch_url + link) checksum = urlread(arch_url + link) for line in checksum.split('\n'): try: sha256, filename = line.split() if filename[0] != '*': continue filename = filename[1:] name = filename.replace('.iso', '') fedora_releases.append(dict( name=name, url=arch_url + filename, sha256=sha256, )) except ValueError: pass releases = fedora_releases except: traceback.print_exc() return releases
def create_profile(self, distro_name): """ Create a test profile with random name associated with the given distro. Returns a tuple of profile ID and name. """ profile_name = "%s%s" % (TEST_PROFILE_PREFIX, random.randint(1, 1000000)) profile_id = self.api.new_profile(self.token) self.api.modify_profile(profile_id, "name", profile_name, self.token) self.api.modify_profile(profile_id, "distro", distro_name, self.token) self.api.modify_profile(profile_id, "kickstart", FAKE_KICKSTART, self.token) self.api.modify_profile(profile_id, "kopts", { "dog" : "fido", "cat" : "fluffy" }, self.token) self.api.modify_profile(profile_id, "kopts-post", { "phil" : "collins", "steve" : "hackett" }, self.token) self.api.modify_profile(profile_id, "ksmeta", "good=sg1 evil=gould", self.token) self.api.modify_profile(profile_id, "breed", "redhat", self.token) self.api.modify_profile(profile_id, "owners", "sam dave", self.token) self.api.modify_profile(profile_id, "mgmt-classes", "blip", self.token) self.api.modify_profile(profile_id, "comment", "test profile", self.token) self.api.modify_profile(profile_id, "redhat_management_key", "1-ABC123", self.token) self.api.modify_profile(profile_id, "redhat_management_server", "mysatellite.example.com", self.token) self.api.modify_profile(profile_id, "virt_bridge", "virbr0", self.token) self.api.modify_profile(profile_id, "virt_cpus", "2", self.token) self.api.modify_profile(profile_id, "virt_file_size", "3", self.token) self.api.modify_profile(profile_id, "virt_path", "/opt/qemu/%s" % profile_name, self.token) self.api.modify_profile(profile_id, "virt_ram", "1024", self.token) self.api.modify_profile(profile_id, "virt_type", "qemu", self.token) self.api.save_profile(profile_id, self.token) self.cleanup_profiles.append(profile_name) # Check cobbler services URLs: url = "http://%s/cblr/svc/op/ks/profile/%s" % (cfg['cobbler_server'], profile_name) data = urlgrabber.urlread(url) self.assertEquals(FAKE_KS_CONTENTS, data) url = "http://%s/cblr/svc/op/list/what/profiles" % cfg['cobbler_server'] data = urlgrabber.urlread(url) self.assertNotEquals(-1, data.find(profile_name)) return (profile_id, profile_name)
def parse_aur(self): ''' Reads the aur file and creates initial tasks for the defined packages. ''' self.gen_repo_data() notify = False aur = 'http://aur.archlinux.org/rpc.php?type=info&arg=' for line in open(self.opts['aur_file'], 'r').readlines(): line = line.strip() if line.startswith('#'): continue data = eval(urlgrabber.urlread(aur + line)) if data['type'] == 'error': # log something continue if self.aur.has_key(line): ver = data['results']['Version'] if aur[line] < ver: notify = True else: notify = True if notify: notp = {'type': 'aur_pkg', 'action': 'build_aur_pkg', 'name': line} notn = str(int(time.time()))\ + str(random.randint(1000,9999)) path = os.path.join(self.opts['not_dir'], notn + 'p') pickle.dump(notp, open(path, 'w'))
def upgradeLink(link, user_agent, graball=False): link = link.encode('utf-8') # TODO: handle other exceptions # XXX: also, better way to check file types would be content-type headers # and don't mess with anything that isn't a webpage.. if (not (link.startswith("http://news.ycombinator.com") or link.endswith(".pdf"))): linkFile = "upgraded/" + re.sub(PUNCTUATION, "_", link) if linkFile in CACHE: return CACHE[linkFile] else: content = u"" try: html = urlgrabber.urlread(link, keepalive=0, user_agent=user_agent) content = grabContent(link, html) CACHE[linkFile] = content except IOError: pass return content else: return u""
def readKickstart(self, f, reset=True): """Process a kickstart file, given by the filename f.""" if reset: self._reset() # an %include might not specify a full path. if we don't try to figure # out what the path should have been, then we're unable to find it # requiring full path specification, though, sucks. so let's make # the reading "smart" by keeping track of what the path is at each # include depth. if not os.path.exists(f): if self.currentdir.has_key(self._includeDepth - 1): if os.path.exists( os.path.join(self.currentdir[self._includeDepth - 1], f)): f = os.path.join(self.currentdir[self._includeDepth - 1], f) cd = os.path.dirname(f) if not cd.startswith("/"): cd = os.path.abspath(cd) self.currentdir[self._includeDepth] = cd try: s = urlread(f) except grabber.URLGrabError, e: raise KickstartError, formatErrorMsg( 0, msg=_("Unable to open input kickstart file: %s") % e.strerror)
def _retrievePublicKey(self, keyurl, repo=None): """ Retrieve a key file @param keyurl: url to the key to retrieve Returns a list of dicts with all the keyinfo """ key_installed = False # Go get the GPG key from the given URL try: url = yum.misc.to_utf8(keyurl) if repo is None: rawkey = urlgrabber.urlread(url, limit=9999) else: # If we have a repo. use the proxy etc. configuration for it. # In theory we have a global proxy config. too, but meh... # external callers should just update. ug = URLGrabber(bandwidth=repo.bandwidth, retry=repo.retries, throttle=repo.throttle, progress_obj=repo.callback, proxies=repo.proxy_dict) ug.opts.user_agent = default_grabber.opts.user_agent rawkey = ug.urlread(url, text=repo.id + "/gpgkey") except urlgrabber.grabber.URLGrabError, e: raise ChannelException('GPG key retrieval failed: ' + yum.i18n.to_unicode(str(e)))
def srpm_from_ticket(self): '''Retrieve the latest srpmURL from the buzilla URL. ''' try: bugzillaURL = self.checklist.properties['ticketURL'].value except KeyError: # No ticket URL was given, set nothing return if not bugzillaURL: # No ticket URL was given, set nothing return data = urlgrabber.urlread(bugzillaURL) srpmList = re.compile('"((ht|f)tp(s)?://.*?\.src\.rpm)"', re.IGNORECASE).findall(data) if srpmList == []: # No SRPM was found. Just decide not to set anything. return # Set the srpm to the last SRPM listed on the page srpmURL = srpmList[-1][0] if not srpmURL: # No srpm found. Just decide not to set anything. return # Download the srpm to the temporary directory. urlgrabber.urlgrab(srpmURL, self.tmpDir) # Fill the SRPMfile properties with the srpm in the temp directory self.checklist.properties['SRPMfile'].value = ( self.tmpDir + os.path.basename(srpmURL))
def create_distro(self): """ Create a test distro with a random name, store it for cleanup during teardown. Returns a tuple of the objects ID and name. """ distro_name = "%s%s" % (TEST_DISTRO_PREFIX, random.randint(1, 1000000)) did = self.api.new_distro(self.token) self.api.modify_distro(did, "name", distro_name, self.token) self.api.modify_distro(did, "kernel", FAKE_KERNEL, self.token) self.api.modify_distro(did, "initrd", FAKE_INITRD, self.token) self.api.modify_distro(did, "kopts", { "dog" : "fido", "cat" : "fluffy" }, self.token) self.api.modify_distro(did, "ksmeta", "good=sg1 evil=gould", self.token) self.api.modify_distro(did, "breed", "redhat", self.token) self.api.modify_distro(did, "os-version", "rhel5", self.token) self.api.modify_distro(did, "owners", "sam dave", self.token) self.api.modify_distro(did, "mgmt-classes", "blip", self.token) self.api.modify_distro(did, "comment", "test distro", self.token) self.api.modify_distro(did, "redhat_management_key", "1-ABC123", self.token) self.api.modify_distro(did, "redhat_management_server", "mysatellite.example.com", self.token) self.api.save_distro(did, self.token) self.cleanup_distros.append(distro_name) url = "http://%s/cblr/svc/op/list/what/distros" % cfg['cobbler_server'] data = urlgrabber.urlread(url) self.assertNotEquals(-1, data.find(distro_name)) return (did, distro_name)
def _retrievePublicKey(self, keyurl, repo=None): """ Retrieve a key file @param keyurl: url to the key to retrieve Returns a list of dicts with all the keyinfo """ key_installed = False # Go get the GPG key from the given URL try: url = yum.misc.to_utf8(keyurl) if repo is None: rawkey = urlgrabber.urlread(url, limit=9999) else: # If we have a repo. use the proxy etc. configuration for it. # In theory we have a global proxy config. too, but meh... # external callers should just update. ug = URLGrabber(bandwidth = repo.bandwidth, retry = repo.retries, throttle = repo.throttle, progress_obj = repo.callback, proxies=repo.proxy_dict) ug.opts.user_agent = default_grabber.opts.user_agent rawkey = ug.urlread(url, text=repo.id + "/gpgkey") except urlgrabber.grabber.URLGrabError, e: raise ChannelException('GPG key retrieval failed: ' + yum.i18n.to_unicode(str(e)))
def __init__(self): data = StringIO.StringIO(urlgrabber.urlread("http://itunes.com/version")) stream = gzip.GzipFile(fileobj=data) data = stream.read() updates = plistlib.readPlistFromString(data) devs = self.findPods() for (dev, name, family, firmware) in devs: if not family: family, firmware = self.getIPodData(dev) print "Found %s with family %s and firmware %s" % (name, family, firmware) if updates["iPodSoftwareVersions"].has_key(unicode(family)): uri = updates["iPodSoftwareVersions"][unicode(family)]["FirmwareURL"] print "Latest firmware: %s" % uri print "Fetching firmware..." path = urlgrabber.urlgrab( uri, progress_obj=urlgrabber.progress.text_progress_meter(), reget="check_timestamp" ) print "Extracting firmware..." zf = zipfile.ZipFile(path) for name in zf.namelist(): if name[:8] == "Firmware": print "Firmware found." outfile = open("Firmware", "wb") outfile.write(zf.read(name)) outfile.close() infile = open("Firmware", "rb") outfile = open(dev, "wb") # FIXME: do the following in pure python? print "Making backup..." commands.getoutput("dd if=%s of=Backup" % dev) print "Uploading firmware..." commands.getoutput("dd if=Firmware of=%s" % dev) print "Done."
def daModule(line): url = buildUrl(line) try: st = urlread(url) except: return '[' +line+ '] yok böyle bi şii' return parsit(st)
def get_html_url(vals, url): if vals['debug'] > 1: print("processing %s" % (url)) # download url try: html_code = urlgrabber.urlread(url) except urlgrabber.grabber.URLGrabError: # 404 error error_str = "URL down: %s" % (url) return (-1, error_str) return (0, html_code)
def get_html_url(vals, url): if vals['debug'] > 1: print("processing %s" % (url)); # download url try: html_code = urlgrabber.urlread(url); except urlgrabber.grabber.URLGrabError: # 404 error error_str = "URL down: %s" % (url); return (-1, error_str); return (0, html_code);
def _retrievePublicKey(self, keyurl, repo=None): """ Retrieve a key file @param keyurl: url to the key to retrieve Returns a list of dicts with all the keyinfo """ key_installed = False # Go get the GPG key from the given URL try: url = yum.misc.to_utf8(keyurl) if repo is None: rawkey = urlgrabber.urlread(url, limit=9999) else: # If we have a repo. use the proxy etc. configuration for it. # In theory we have a global proxy config. too, but meh... # external callers should just update. ug = URLGrabber(bandwidth=repo.bandwidth, retry=repo.retries, throttle=repo.throttle, progress_obj=repo.callback, proxies=repo.proxy_dict) ug.opts.user_agent = default_grabber.opts.user_agent rawkey = ug.urlread(url, text=repo.id + "/gpgkey") except urlgrabber.grabber.URLGrabError as e: raise ChannelException('GPG key retrieval failed: ' + yum.i18n.to_unicode(str(e))) # Parse the key try: keys_info = yum.misc.getgpgkeyinfo(rawkey, multiple=True) except ValueError as err: raise ChannelException( 'GPG key information retrieval failed: {}'.format(err)) except Exception as err: raise ChannelException( 'Unhandled GPG key failure occurred: {}'.format(err)) keys = [] for keyinfo in keys_info: thiskey = {} for info in ('keyid', 'timestamp', 'userid', 'fingerprint', 'raw_key'): if not keyinfo.has_key(info): raise ChannelException( 'GPG key parsing failed: key does not have value %s' % info) thiskey[info] = keyinfo[info] thiskey['keyid'] = str( "%016x" % (thiskey['keyid'] & 0xffffffffffffffff)).upper() thiskey['hexkeyid'] = yum.misc.keyIdToRPMVer( keyinfo['keyid']).upper() keys.append(thiskey) return keys
def prereposetup_hook(conduit): skipped = [] modded = [] blocked = [] blocklist = [] if os.path.isfile(os.path.join(confdir, 'disabled')): blocklist = readconfig(os.path.join(confdir, 'disabled')) repos = conduit.getRepos() for repo in repos.listEnabled(): for pat in blocklist: if fnmatch.fnmatch(repo.id, pat): repo.enabled = False blocked.append(repo.id) if not repo.enabled: continue # No longer needed? # if not repo.skip_if_unavailable: # repo.skip_if_unavailable = True # Mod the mirrorlist if the local alternative exists if os.path.isfile(os.path.join(confdir, repo.id)): repo.mirrorlist = 'file://' + os.path.join(confdir, repo.id) modded.append(repo.id) if repo.metalink: repo.metalink = None # Get the mirrorlist url or if it's empty, the baseurl if repo.mirrorlist is not None: url = repo.mirrorlist elif repo.baseurl: url = repo.baseurl[0] # If the url is no http... then do nothing if str(url).startswith('http'): # Try to get the url, if it fails, disable the repo # also set mirrorlist and urls to file:/// so that # fastestmirror plugin will ignore it try: data = urlgrabber.urlread(url) except: repo.mirrorlist = 'file:///' repo.urls = ['file:///'] skipped.append(repo.id) repo.enabled = False # report which repos we have messed with if skipped: conduit.info(2, "* Autoskipped: " + ", ".join(skipped)) if modded: conduit.info(2, "* Automodded: " + ", ".join(modded)) if blocked: conduit.info(2, "* Autoblocked: " + ", ".join(blocked))
def compare_sha256(d, filename, graburl): """ looks for a FileDetails object that matches the given URL """ found = False s = urlgrabber.urlread(graburl) sha256 = hashlib.sha256(s).hexdigest() for fd in list(d.fileDetails): if fd.filename == filename and fd.sha256 is not None: if fd.sha256 == sha256: found = True break return found
def _url(self, args): """Usage: `{cmd_prefix}url *urls`""" if not args: return None output = [] for url in args: if not any(url.startswith(i) for i in ('https://', 'http://')): url = 'http://{}'.format(url) bs = BeautifulSoup.BeautifulSoup(urlgrabber.urlread(url, size=2097152*10)) output.append(bs.title.string) return '\n'.join(output)
def _reddit(self, args): """Usage: `{cmd_prefix}reddit [*subreddits]`""" output = [] args = args if args else [''] for arg in args: if arg: site = 'http://www.reddit.com/r/{}'.format(arg) logger.log((site, ), (None, )) else: site = 'http://www.reddit.com/' bs = BeautifulSoup.BeautifulSOAP(urlgrabber.urlread(site, size=2097152*10)) output.extend(bs.findAll('a', 'title')) return '\n'.join('{}: {} {}'.format(i + 1, o.string, o.get('href')) for i, o in enumerate(output[:5]))
def get_fedora_releases(): global releases fedora_releases = [] try: html = urlread(FEDORA_RELEASES) for release in re.findall(r'<a href="(\d+)/">', html)[-2:][::-1]: for arch in ARCHES: arch_url = FEDORA_RELEASES + '%s/Live/%s/' % (release, arch) try: files = urlread(arch_url) except URLGrabError: continue for link in re.findall(r'<a href="(.*)">', files): if link.endswith('-CHECKSUM'): checksum = urlread(arch_url + link) for line in checksum.split('\n'): try: sha256, filename = line.split() if filename[0] != '*': continue filename = filename[1:] chunks = filename[:-6].split('-') chunks.remove('Live') release = chunks.pop() chunks.insert(1,release) name = ' '.join(chunks) fedora_releases.append(dict( name=name, url=arch_url + filename, sha256=sha256, )) except ValueError: pass releases = fedora_releases + other_releases except: # Can't fetch releases from the internet. releases += other_releases return releases
def fetch_url ( url ): # Slow this pig down a little time.sleep ( 1 ) logging.debug ( "Fetching {0}".format( url ) ) page_data = None attempts = 1 while ( attempts <= MAX_ATTEMPTS ): try: page_data = urlread ( url ) break except Exception as e: logging.error ( "Error: {0}".format(e) ) print "Error: {0}".format(e) attempts = attempts + 1 time.sleep ( 5 ) return page_data
def sparqlQuery(query, baseURL, format="application/json", logger=None): params={ "default-graph": "", "query": query, "debug": "on", "timeout": "", "format": format, "save": "display", "fname": "" } querypart=urllib.urlencode(params) #response = urllib.urlopen(baseURL,querypart).read() #return json.loads(response) response = urlgrabber.urlread(baseURL + "?" + querypart) return json.loads(response)
def verify_cert(): """ Check that the user cert is valid. things to check/return not revoked Expiry time warn if less than 21 days """ my_cert = _open_cert() serial_no = my_cert.get_serial_number() valid_until = my_cert.get_notAfter()[:8] crl = urlgrabber.urlread("https://admin.fedoraproject.org/ca/crl.pem") dateFmt = '%Y%m%d' delta = datetime.datetime.now() + datetime.timedelta(days=21) warn = datetime.datetime.strftime(delta, dateFmt) print 'cert expires: %s-%s-%s' % (valid_until[:4], valid_until[4:6], valid_until[6:8]) if valid_until < warn: print 'WARNING: Your cert expires soon.'
def find_autoinstall(self, system=None, profile=None, **rest): self.__xmlrpc_setup() serverseg = "http//%s" % self.collection_mgr._settings.server name = "?" if system is not None: url = "%s/cblr/svc/op/autoinstall/system/%s" % (serverseg, name) elif profile is not None: url = "%s/cblr/svc/op/autoinstall/profile/%s" % (serverseg, name) else: name = self.autodetect(**rest) if name.startswith("FAILED"): return "# autodetection %s" % name url = "%s/cblr/svc/op/autoinstall/system/%s" % (serverseg, name) try: return urlgrabber.urlread(url) except: return "# automatic installation file retrieval failed (%s)" % url
def findks(self,system=None,profile=None,**rest): self.__xmlrpc_setup() serverseg = "http//%s" % self.config._settings.server name = "?" type = "system" if system is not None: url = "%s/cblr/svc/op/ks/system/%s" % (serverseg, name) elif profile is not None: url = "%s/cblr/svc/op/ks/profile/%s" % (serverseg, name) else: name = self.autodetect(**rest) if name.startswith("FAILED"): return "# autodetection %s" % name url = "%s/cblr/svc/op/ks/system/%s" % (serverseg, name) try: return urlgrabber.urlread(url) except: return "# kickstart retrieval failed (%s)" % url
def findks(self, system=None, profile=None, **rest): self.__xmlrpc_setup() serverseg = "http//%s" % self.config._settings.server name = "?" type = "system" if system is not None: url = "%s/cblr/svc/op/ks/system/%s" % (serverseg, name) elif profile is not None: url = "%s/cblr/svc/op/ks/profile/%s" % (serverseg, name) else: name = self.autodetect(**rest) if name.startswith("FAILED"): return "# autodetection %s" % name url = "%s/cblr/svc/op/ks/system/%s" % (serverseg, name) try: return urlgrabber.urlread(url) except: return "# kickstart retrieval failed (%s)" % url
def translation(phrase, input="en", output="", usecache=True): if not output: output = default_lang() cache_file = 'translate_cache/%s-%s' % (output, phrase) if usecache and os.path.isfile(cache_file): fp = codecs.open(cache_file, 'r', 'utf-8') data = fp.read() fp.close() return data #phrase = 'hello world' lang = "&langpair=" + input + "%7C" + output try: requrl = url % web.urlquote(phrase) + lang #web.debug('translate url: %s' % requrl) """ req = urllib2.Request(requrl) req.add_header('Referer', 'http://www.andrewtrusty.com/') r = urllib2.urlopen(req) content = r.read() """ content = urlgrabber.urlread(requrl).decode('utf-8') data = cjson.decode(content) translation = data['responseData']['translatedText'] if usecache: fp = codecs.open(cache_file, 'w', 'utf-8') fp.write(translation) #.encode('utf-8')) fp.close() return translation except Exception, e: import traceback traceback.print_exc(e) return ""
def get_images(active=True, outdir='player_images', outlist='player_names.csv'): import bs4, urlgrabber, httplib if active: list = 'http://stats.nba.com/frags/stats-site-page-players-directory-active.html' else: list = 'http://stats.nba.com/players.html' # prepare player list flist = open(outlist, 'w') flist.write('# name\n') # fetch and parse the NBA player list player_page = urlgrabber.urlread(list) soup = bs4.BeautifulSoup(player_page) # loop through the player list for p in soup('a', 'playerlink'): phref = str(p['href']) ## exclude "historical" players #if (len(phref.split('HISTADD')) == 1): # verify that player pages exist pname = phref.split('/')[-1] conn = httplib.HTTPConnection('i.cdn.turner.com') conn.request('HEAD', '/nba/nba/.element/img/2.0/sect/statscube/players/large/'+pname+'.png') if (conn.getresponse().status != 404): # download and save player images img_link = 'http://i.cdn.turner.com/nba/nba/.element/img/2.0/sect/statscube/players/large/'+pname+'.png' urlgrabber.urlgrab(img_link, filename=outdir+'/'+pname+'.png') # write player names to list flist.write(pname+'\n') # close name list flist.close() return
def get_file(self, path, local_base=None): try: try: temp_file = "" if local_base is not None: target_file = os.path.join(local_base, path) target_dir = os.path.dirname(target_file) if not os.path.exists(target_dir): os.makedirs(target_dir, int('0755', 8)) temp_file = target_file + '..download' if os.path.exists(temp_file): os.unlink(temp_file) downloaded = urlgrabber.urlgrab(path, temp_file) os.rename(downloaded, target_file) return target_file else: return urlgrabber.urlread(path) except urlgrabber.URLGrabError: return finally: if os.path.exists(temp_file): os.unlink(temp_file)
def create_distro(self): """ Create a test distro with a random name, store it for cleanup during teardown. Returns a tuple of the objects ID and name. """ distro_name = "%s%s" % (TEST_DISTRO_PREFIX, random.randint(1, 1000000)) did = self.api.new_distro(self.token) self.api.modify_distro(did, "name", distro_name, self.token) self.api.modify_distro(did, "kernel", FAKE_KERNEL, self.token) self.api.modify_distro(did, "initrd", FAKE_INITRD, self.token) self.api.modify_distro(did, "kopts", { "dog": "fido", "cat": "fluffy" }, self.token) self.api.modify_distro(did, "ksmeta", "good=sg1 evil=gould", self.token) self.api.modify_distro(did, "breed", "redhat", self.token) self.api.modify_distro(did, "os-version", "rhel5", self.token) self.api.modify_distro(did, "owners", "sam dave", self.token) self.api.modify_distro(did, "mgmt-classes", "blip", self.token) self.api.modify_distro(did, "comment", "test distro", self.token) self.api.modify_distro(did, "redhat_management_key", "1-ABC123", self.token) self.api.modify_distro(did, "redhat_management_server", "mysatellite.example.com", self.token) self.api.save_distro(did, self.token) self.cleanup_distros.append(distro_name) url = "http://%s/cblr/svc/op/list/what/distros" % cfg['cobbler_server'] data = urlgrabber.urlread(url) self.assertNotEquals(-1, data.find(distro_name)) return (did, distro_name)
def run(self): """ Start worker. :return: Dictionary of the hosts in the worker scope. """ self.log.debug("Fetching %s", self.url) if not urlparse.urlsplit(self.url).scheme: self.url = "file://%s" % self.url try: output = json.loads(urlgrabber.urlread(str(self.url), timeout=300)) except Exception as exc: self.log.error("Unable to fetch '{0}': {1}".format( str(self.url), exc)) return None # pylint: disable=W1622 first = output.itervalues().next() if "vms" not in first: # run() should return a dict of host entries # but here the first value is a virtual host manager # and not a host entry return first return output
def get_file(self, path, local_base=None): try: try: temp_file = "" try: if not urlparse(path).scheme: (s,b,p,q,f,o) = urlparse(self.url) if p[-1] != '/': p = p + '/' p = p + path path = urlunparse((s,b,p,q,f,o)) except (ValueError, IndexError, KeyError) as e: return None if local_base is not None: target_file = os.path.join(local_base, path) target_dir = os.path.dirname(target_file) if not os.path.exists(target_dir): os.makedirs(target_dir, int('0755', 8)) temp_file = target_file + '..download' if os.path.exists(temp_file): os.unlink(temp_file) urlgrabber_opts = {} self.set_download_parameters(urlgrabber_opts, path, temp_file) downloaded = urlgrabber.urlgrab(path, temp_file, **urlgrabber_opts) os.rename(downloaded, target_file) return target_file else: urlgrabber_opts = {} self.set_download_parameters(urlgrabber_opts, path) return urlgrabber.urlread(path, **urlgrabber_opts) except urlgrabber.grabber.URLGrabError: return finally: if os.path.exists(temp_file): os.unlink(temp_file)
def readKickstart(self, f, reset=True): """Process a kickstart file, given by the filename f.""" if reset: self._reset() # an %include might not specify a full path. if we don't try to figure # out what the path should have been, then we're unable to find it # requiring full path specification, though, sucks. so let's make # the reading "smart" by keeping track of what the path is at each # include depth. if not os.path.exists(f): if self.currentdir.has_key(self._includeDepth - 1): if os.path.exists(os.path.join(self.currentdir[self._includeDepth - 1], f)): f = os.path.join(self.currentdir[self._includeDepth - 1], f) cd = os.path.dirname(f) if not cd.startswith("/"): cd = os.path.abspath(cd) self.currentdir[self._includeDepth] = cd try: s = urlread(f) except grabber.URLGrabError, e: raise KickstartError, formatErrorMsg(0, msg=_("Unable to open input kickstart file: %s") % e.strerror)
import urlgrabber import xml.etree.ElementTree as ET file=open('a.csv','r') for line in file: buildurl='http://rpc.geocoder.us/service/csv?address='+line buildurl=buildurl.replace(' ','+') out=urlgrabber.urlread(buildurl) lat=out.split(",")[0] lon=out.split(",")[1] buildcensurl='http://data.fcc.gov/api/block/2010/find?latitude='+lat+'&longitude='+lon outblock=urlgrabber.urlread(buildcensurl) e = ET.fromstring(outblock) block = e.find('{http://data.fcc.gov/api}Block') print block.attrib['FIPS']
def getRemoteSum(): try: return urlgrabber.urlread(self.remote_db_sum).split()[0] except urlgrabber.grabber.URLGrabError: return ''
def test_services_access(): import remote remote._test_setup_settings(pxe_once=1) remote._test_bootstrap_restart() remote._test_remove_objects() __test_setup() time.sleep(5) api = cobbler_api.BootAPI() # test mod_python service URLs -- more to be added here templates = ["sample.ks", "sample_end.ks", "legacy.ks"] for template in templates: ks = "/var/lib/cobbler/kickstarts/%s" % template p = api.find_profile("profile0") assert p is not None p.set_kickstart(ks) api.add_profile(p) url = "http://127.0.0.1/cblr/svc/op/ks/profile/profile0" data = urlgrabber.urlread(url) assert data.find("look_for_this1") != -1 url = "http://127.0.0.1/cblr/svc/op/ks/system/system0" data = urlgrabber.urlread(url) assert data.find("look_for_this2") != -1 # see if we can pull up the yum configs url = "http://127.0.0.1/cblr/svc/op/yum/profile/profile0" data = urlgrabber.urlread(url) print "D1=%s" % data assert data.find("repo0") != -1 url = "http://127.0.0.1/cblr/svc/op/yum/system/system0" data = urlgrabber.urlread(url) print "D2=%s" % data assert data.find("repo0") != -1 for a in ["pre", "post"]: filename = "/var/lib/cobbler/triggers/install/%s/unit_testing" % a fd = open(filename, "w+") fd.write("#!/bin/bash\n") fd.write( "echo \"TESTING %s type ($1) name ($2) ip ($3)\" >> /var/log/cobbler/kicklog/cobbler_trigger_test\n" % a) fd.write("exit 0\n") fd.close() utils.os_system("chmod +x %s" % filename) urls = [ "http://127.0.0.1/cblr/svc/op/trig/mode/pre/profile/profile0" "http://127.0.0.1/cblr/svc/op/trig/mode/post/profile/profile0" "http://127.0.0.1/cblr/svc/op/trig/mode/pre/system/system0" "http://127.0.0.1/cblr/svc/op/trig/mode/post/system/system0" ] for x in urls: print "reading: %s" % url data = urlgrabber.urlread(x) print "read: %s" % data time.sleep(5) assert os.path.exists("/var/log/cobbler/kicklog/cobbler_trigger_test") os.unlink("/var/log/cobbler/kicklog/cobbler_trigger_test") os.unlink("/var/lib/cobbler/triggers/install/pre/unit_testing") os.unlink("/var/lib/cobbler/triggers/install/post/unit_testing") # trigger testing complete # now let's test the nopxe URL (Boot loop prevention) sys = api.find_system("system0") sys.set_netboot_enabled(True) api.add_system(sys) # save the system to ensure it's set True url = "http://127.0.0.1/cblr/svc/op/nopxe/system/system0" data = urlgrabber.urlread(url) time.sleep(2) sys = api.find_system("system0") assert str(sys.netboot_enabled).lower() not in ["1", "true", "yes"] # now let's test the listing URLs since we document # them even know I don't know of anything relying on them. url = "http://127.0.0.1/cblr/svc/op/list/what/distros" assert urlgrabber.urlread(url).find("distro0") != -1 url = "http://127.0.0.1/cblr/svc/op/list/what/profiles" assert urlgrabber.urlread(url).find("profile0") != -1 url = "http://127.0.0.1/cblr/svc/op/list/what/systems" assert urlgrabber.urlread(url).find("system0") != -1 url = "http://127.0.0.1/cblr/svc/op/list/what/repos" assert urlgrabber.urlread(url).find("repo0") != -1 url = "http://127.0.0.1/cblr/svc/op/list/what/images" assert urlgrabber.urlread(url).find("image0") != -1 # the following modes are implemented by external apps # and are not concerned part of cobbler's core, so testing # is less of a priority: # autodetect # findks # these features may be removed in a later release # of cobbler but really aren't hurting anything so there # is no pressing need. # now let's test the puppet external nodes support # and just see if we get valid YAML back without # doing much more url = "http://127.0.0.1/cblr/svc/op/puppet/hostname/hostname0" data = urlgrabber.urlread(url) assert data.find("alpha") != -1 assert data.find("beta") != -1 assert data.find("gamma") != -1 assert data.find("3") != -1 data = yaml.load(data) assert data.has_key("classes") assert data.has_key("parameters") # now let's test the template file serving # which is used by the snippet download_config_files # and also by koan's --update-files url = "http://127.0.0.1/cblr/svc/op/template/profile/profile0/path/_tmp_t1-rendered" data = urlgrabber.urlread(url) assert data.find("profile0") != -1 assert data.find("$profile_name") == -1 url = "http://127.0.0.1/cblr/svc/op/template/system/system0/path/_tmp_t2-rendered" data = urlgrabber.urlread(url) assert data.find("system0") != -1 assert data.find("$system_name") == -1 os.unlink("/tmp/cobbler_t1") os.unlink("/tmp/cobbler_t2") remote._test_remove_objects()
def test_urlread(self): "module-level urlread() function" s = urlgrabber.urlread('http://www.python.org')
CACHEFILE = '/tmp/apache-snmp' # check for cache file newer CACHETIME seconds ago import os import time if os.path.isfile ( CACHEFILE ) \ and ( time.time() - os.stat ( CACHEFILE )[8] ) < CACHETIME: # use cached data f = open(CACHEFILE, 'r') data = f.read() f.close() else: # grab the status URL (fresh data) # need debian package python-urlgrabber from urlgrabber import urlread data = urlread('http://localhost/server-status?auto', user_agent='SNMP Apache Stats') # write file f = open(CACHEFILE + '.TMP.' + ` os.getpid() `, 'w') f.write(data) f.close() os.rename(CACHEFILE + '.TMP.' + ` os.getpid() `, CACHEFILE) # dice up the data scoreboardkey = ['_', 'S', 'R', 'W', 'K', 'D', 'C', 'L', 'G', 'I', '.'] params = {} for line in data.splitlines(): fields = line.split(': ') if len(fields) <= 1: continue # "localhost" as first line cause out of index error elif fields[0] == 'Scoreboard': # count up the scoreboard into states
def test_urlread(self): "module-level urlread() function" s = urlgrabber.urlread('http://abat.au.example.com')
#!/usr/bin/env python from urlgrabber import urlread import re from BeautifulSoup import BeautifulSoup, SoupStrainer import datetime import shutil import MySQLdb now = datetime.datetime.now() nowstring = str(now) urlgrab = urlread('https://www.facebase.org/node/215') output = """{ title: "axel genes to enhancers", chr: "http://localhost", start: 1111, end: 2222, items: [""" conn = MySQLdb.connect(host='genome-mysql.cse.ucsc.edu', user='******', db='mm9') curs = conn.cursor() for link in BeautifulSoup(urlgrab, parseOnlyThese=SoupStrainer('a')): if link.has_key('href'): if 'transgenic' in link['href']: url2 = 'https://www.facebase.org'+link['href'] url2 = str(url2) urlgrab2 = urlread(url2)
#!/usr/bin/python from urlgrabber import urlread from xml.dom import minidom def text(n, x): return n.getElementsByTagName(x)[0].firstChild.nodeValue.strip() URL = "https://acadinfo.wustl.edu/sis_ws_courses/SIScourses.asmx/GetCoursesByCourseNumbyASemester?ApplicationToken=c09fbe82-7375-4df6-9659-b4f2bf21e4b9&ApplicationPwd=876KKmp*cR478Q&SortSemester=201502&DeptCd=E81&SchoolCd=E&CrsNum=" for course in ['365S', '550S']: xml = urlread(URL + course) xmldoc = minidom.parseString(xml) title = text(xmldoc, 'CourseTitle') print title for tag in xmldoc.getElementsByTagName('CourseSection'): section = text(tag, 'Section') enrolled = text(tag, 'EnrollCnt') limit = text(tag, 'EnrollLimit') wait = text(tag, 'WaitCnt') print '\t%3s %03d/%03d Waiting: %d' % (section, int(enrolled), int(limit), int(wait))
CACHEFILE = '/var/local/snmp/cache/apache' # check for cache file newer CACHETIME seconds ago import os import time if os.path.isfile ( CACHEFILE ) \ and ( time.time() - os.stat ( CACHEFILE )[8] ) < CACHETIME: # use cached data f = open ( CACHEFILE, 'r' ) data = f.read() f.close() else: # grab the status URL (fresh data) # need debian package python-urlgrabber from urlgrabber import urlread data = urlread ( 'http://localhost/server-status?auto', user_agent = 'SNMP Apache Stats' ) # write file f = open ( CACHEFILE+'.TMP.'+`os.getpid()`, 'w' ) f.write ( data ) f.close() os.rename ( CACHEFILE+'.TMP.'+`os.getpid()`, CACHEFILE ) # dice up the data scoreboardkey = [ '_', 'S', 'R', 'W', 'K', 'D', 'C', 'L', 'G', 'I', '.' ] params = {} for line in data.splitlines(): fields = line.split( ': ' ) if fields[0] == 'Scoreboard': # count up the scoreboard into states states = {}
def test_services_access(): import remote remote._test_setup_settings(pxe_once=1) remote._test_bootstrap_restart() remote._test_remove_objects() __test_setup() time.sleep(5) api = cobbler_api.BootAPI() # test mod_python service URLs -- more to be added here templates = ["sample.ks", "sample_end.ks", "legacy.ks"] for template in templates: ks = "/var/lib/cobbler/kickstarts/%s" % template p = api.find_profile("profile0") assert p is not None p.set_kickstart(ks) api.add_profile(p) url = "http://127.0.0.1/cblr/svc/op/ks/profile/profile0" data = urlgrabber.urlread(url) assert data.find("look_for_this1") != -1 url = "http://127.0.0.1/cblr/svc/op/ks/system/system0" data = urlgrabber.urlread(url) assert data.find("look_for_this2") != -1 # see if we can pull up the yum configs url = "http://127.0.0.1/cblr/svc/op/yum/profile/profile0" data = urlgrabber.urlread(url) print "D1=%s" % data assert data.find("repo0") != -1 url = "http://127.0.0.1/cblr/svc/op/yum/system/system0" data = urlgrabber.urlread(url) print "D2=%s" % data assert data.find("repo0") != -1 for a in ["pre", "post"]: filename = "/var/lib/cobbler/triggers/install/%s/unit_testing" % a fd = open(filename, "w+") fd.write("#!/bin/bash\n") fd.write('echo "TESTING %s type ($1) name ($2) ip ($3)" >> /var/log/cobbler/kicklog/cobbler_trigger_test\n' % a) fd.write("exit 0\n") fd.close() utils.os_system("chmod +x %s" % filename) urls = [ "http://127.0.0.1/cblr/svc/op/trig/mode/pre/profile/profile0" "http://127.0.0.1/cblr/svc/op/trig/mode/post/profile/profile0" "http://127.0.0.1/cblr/svc/op/trig/mode/pre/system/system0" "http://127.0.0.1/cblr/svc/op/trig/mode/post/system/system0" ] for x in urls: print "reading: %s" % url data = urlgrabber.urlread(x) print "read: %s" % data time.sleep(5) assert os.path.exists("/var/log/cobbler/kicklog/cobbler_trigger_test") os.unlink("/var/log/cobbler/kicklog/cobbler_trigger_test") os.unlink("/var/lib/cobbler/triggers/install/pre/unit_testing") os.unlink("/var/lib/cobbler/triggers/install/post/unit_testing") # trigger testing complete # now let's test the nopxe URL (Boot loop prevention) sys = api.find_system("system0") sys.set_netboot_enabled(True) api.add_system(sys) # save the system to ensure it's set True url = "http://127.0.0.1/cblr/svc/op/nopxe/system/system0" data = urlgrabber.urlread(url) time.sleep(2) sys = api.find_system("system0") assert str(sys.netboot_enabled).lower() not in ["1", "true", "yes"] # now let's test the listing URLs since we document # them even know I don't know of anything relying on them. url = "http://127.0.0.1/cblr/svc/op/list/what/distros" assert urlgrabber.urlread(url).find("distro0") != -1 url = "http://127.0.0.1/cblr/svc/op/list/what/profiles" assert urlgrabber.urlread(url).find("profile0") != -1 url = "http://127.0.0.1/cblr/svc/op/list/what/systems" assert urlgrabber.urlread(url).find("system0") != -1 url = "http://127.0.0.1/cblr/svc/op/list/what/repos" assert urlgrabber.urlread(url).find("repo0") != -1 url = "http://127.0.0.1/cblr/svc/op/list/what/images" assert urlgrabber.urlread(url).find("image0") != -1 # the following modes are implemented by external apps # and are not concerned part of cobbler's core, so testing # is less of a priority: # autodetect # findks # these features may be removed in a later release # of cobbler but really aren't hurting anything so there # is no pressing need. # now let's test the puppet external nodes support # and just see if we get valid YAML back without # doing much more url = "http://127.0.0.1/cblr/svc/op/puppet/hostname/hostname0" data = urlgrabber.urlread(url) assert data.find("alpha") != -1 assert data.find("beta") != -1 assert data.find("gamma") != -1 assert data.find("3") != -1 data = yaml.safe_load(data) assert data.has_key("classes") assert data.has_key("parameters") # now let's test the template file serving # which is used by the snippet download_config_files # and also by koan's --update-files url = "http://127.0.0.1/cblr/svc/op/template/profile/profile0/path/_tmp_t1-rendered" data = urlgrabber.urlread(url) assert data.find("profile0") != -1 assert data.find("$profile_name") == -1 url = "http://127.0.0.1/cblr/svc/op/template/system/system0/path/_tmp_t2-rendered" data = urlgrabber.urlread(url) assert data.find("system0") != -1 assert data.find("$system_name") == -1 os.unlink("/tmp/cobbler_t1") os.unlink("/tmp/cobbler_t2") remote._test_remove_objects()
# check for cache file newer CACHETIME seconds ago import os import time if os.path.isfile ( CACHEFILE ) \ and ( time.time() - os.stat ( CACHEFILE )[8] ) < CACHETIME: # use cached data f = open(CACHEFILE, 'r') data = f.read() f.close() else: # grab the status URL (fresh data) # need debian package python-urlgrabber from urlgrabber import urlread data = urlread('https://<SERVER>/server-status?auto', user_agent='SNMP Apache Stats') # write file f = open(CACHEFILE + '.TMP.' + ` os.getpid() `, 'w') f.write(data) f.close() os.rename(CACHEFILE + '.TMP.' + ` os.getpid() `, CACHEFILE) # dice up the data scoreboardkey = ['_', 'S', 'R', 'W', 'K', 'D', 'C', 'L', 'G', 'I', '.'] params = {} for line in data.splitlines(): if line == '<SERVER>': continue if line == 'TLSSessionCacheStatus': continue fields = line.split(': ')