def initStats(self): """Query ActiveMQ REST management API and parse its response.""" # fetch the stats for the specified brokerName url = "%s://%s:%d/%s/read/org.apache.activemq:type=Broker," \ "brokerName=%s" % (self._proto, self._host, self._port, self._statuspath, self._brokername) response = util.get_url(url, self._user, self._password) self._statusDict = {} p_response = json.loads(response) self._statusDict = p_response['value'] # query all objectNames which can be found in lists for key in self._statusDict.keys(): if isinstance(self._statusDict[key],list): for i in xrange(len(self._statusDict[key])): try: objectName=self._statusDict[key][i]['objectName'] except KeyError: continue url = "%s://%s:%d/%s/read/%s" % (self._proto, self._host, self._port, self._statuspath, objectName) response = util.get_url(url, self._user, self._password) p_response = json.loads(response) self._statusDict[key][i] = p_response['value']
def analyze(self): self._soup = BeautifulSoup(self._data) index = 0 for td in self._soup.find_all('td'): paras = td.find_all('p') # print "td index : ",index index += 1 if paras != []: if '2016' in self._url: #Keynote title = paras[0].b.text + paras[0].a.text value = paras[0].a['href'] url = util.get_url(self._url,value) filename = title + urlsplit(value)[-1] descp = paras[1].b.text + paras[1].text paras = paras[1:] tindex = 0 for p in paras: # print "\ttindex: ",tindex tindex += 1 title = p.b.text hrefs = p.find_all('a') for h in hrefs: value = h['href'] url = util.get_url(self._url,value) filename = title + util.name_file(url) filename = h.text + util.regulate_filename(filename) descp = '' self.results.add(Item(url,filename,descp))
def login(driver, user, pwd): url = "http://www.moonbbs.com/member.php?mod=logging&action=login" util.get_url(driver, url) driver.find_element_by_name("username").send_keys(user) driver.find_element_by_name("password").send_keys(pwd) driver.find_element_by_name("loginsubmit").click(); return
def getIds(driver, post): #get the thread id from post url first = post.index('-') + 1 second = post[first:].index('-') thread_id = post[first:first + second] if thread_id in tidSet: raise RuntimeError("pick another one") else: tidSet.add(thread_id) util.get_url(driver, post) #get the user id by finding the href that contains "op=send" l = driver.find_elements_by_class_name("xi2") uidList = [] for s in l: idx = s.get_attribute("href").find("op=send") if idx != -1: uidList.append(s.get_attribute("href")) if len(uidList) < 2: raise RuntimeError("pick another post") rand = random.randrange(0, len(uidList)) first = uidList[rand].find('uid') uid = uidList[rand][first+4:] if uid in uidSet: raise RuntimeError("pick another one") else: uidSet.add(uid) ids = [thread_id, uid] return ids
def liu_yan(driver, uid): # LiuYan url = "http://www.moonbbs.com/space-uid-"+uid+".html" util.get_url(driver, url) s = random.randrange(1,6) driver.find_element_by_id("comment_message").send_keys("[em:"+str(s)+":]") driver.find_element_by_id("commentsubmit_btn").click() time.sleep(1)
def qian_dao (driver, donenum): print "number of qiandao to be execute: " + str(15 - donenum) # goto each page and click QianDao pageid = ['46', '41', '52', '58', '69', '48', '65', '45', '57', '68', '47', '56', '54', '76', '44'] for i in pageid[donenum:]: url = "http://www.moonbbs.com/forum-"+i+"-1.html" util.get_url(driver, url) driver.find_element_by_id("qiandao").click(); time.sleep(1)
def get_numbers(driver, name, date): numbers = 0 url = "http://www.moonbbs.com/home.php?mod=spacecp&ac=credit&op=log&suboperation=creditrulelog" util.get_url(driver, url) element = driver.find_element_by_link_text(name) ancestor_tr = element.find_element_by_xpath(".//ancestor::tr") try : ee = ancestor_tr.find_elements_by_tag_name("td") if (ee[8].text.startswith(date)): numbers = ee[2].text except: pass return numbers
def get_qiandao_num (driver, date): url = "http://www.moonbbs.com/home.php?mod=spacecp&ac=credit&op=log" util.get_url(driver, url) stri = u"每日签到积分" elements = driver.find_elements_by_link_text(stri.encode("utf-8")); qiandao = 0 for e in elements: ancestor_tr = e.find_element_by_xpath(".//ancestor::tr") try : ee = ancestor_tr.find_elements_by_tag_name("td") if (ee[3].text.startswith(date)): qiandao += 1 except: pass return qiandao
def QiangGC (driver): url = "http://www.moonbbs.com/thread-161629-1-1.html" util.get_url(driver, url) element = driver.find_elements_by_xpath("//div/dl/li/em"); cnt = int(element[4].text[2]) if (cnt <= 0): print "No GC avaliable ", cnt return element = driver.find_element_by_name("ijoin") element.click() time.sleep(1) element = driver.find_element_by_tag_name("button") print element.text element.click() print "get GC, please check if success"
def display_path(pathid): path_data = get_url("flow-analysis/%s" % pathid)['response'] print(json.dumps(path_data,indent=2)) for network_element in path_data['networkElementsInfo']: element_type = network_element['type'] if element_type == "wired" or element_type == "wireless": print("{ip}:{element_type}".format(ip=network_element["ip"],element_type=element_type)) else: try: ingress = network_element["ingressInterface"]["physicalInterface"]["name"] except KeyError: ingress = None try: egress = network_element["egressInterface"]["physicalInterface"]["name"] except KeyError: egress = None if ingress is not None: print("{ingress_int}".format(ingress_int=ingress)) try: link_info=network_element['linkInformationSource'] except KeyError: link_info="" print("{name:>18}:{ip:12} {link_info}". format(name=network_element['name'], ip=network_element['ip'], link_info=link_info)) if egress is not None: print("{egress_int}".format(egress_int=egress)) print("")
def get_story_data(story): """ Get story data from fimfic: Returns a dict: { 'name': name of story, 'story_wc': word count of story, 'per_chapter_wc': list of word counts for each chapter, 'chapter_wc_sum': sum of all chapter word counts (sum(get_story_data(story)['per_chapter_wc'])) } story - url to story, assumed relative to FIMFICTION """ if story not in url_cache: #print('Loading story data of', story) soup = bs4.BeautifulSoup(get_url(FIMFICTION + story), 'lxml') name = str(soup(class_="story_name")[0].string) chapters = soup(class_="word_count") story_wc = int(deprettify(chapters[-1].b.string)) chapters = chapters[:-1] chapter_indiv_wc = [int(deprettify(x.get_text())) for x in chapters] chapter_wc_sum = sum(chapter_indiv_wc) if story_wc != chapter_wc_sum: print('WARNING: chapter word count ({}) did not match story word count ({}) for story {}'\ .format(story_wc, chapter_wc_sum, name)) url_cache[story] = { 'name': name, 'story_wc': story_wc, 'per_chapter_wc': chapter_indiv_wc, 'chapter_wc_sum': chapter_wc_sum } #print(story + "'s data:", url_cache[story]) return url_cache[story]
def index(): if request.method == "POST": f = request.form url = get_url(request.form["url"]) return render_template("dl.html", url=url) return "at your service" else: return render_template("index.html")
def get_date (driver): url = "http://www.moonbbs.com/home.php?mod=spacecp&ac=profile&op=info" util.get_url(driver, url) currenttime = driver.find_elements_by_class_name("mtn"); datetime = currenttime[len(currenttime)-1].text[7:] #remove h m s date = datetime[0: datetime.find(" ")] #modify date, 2015-9-10 -> 2015-09-10 datelist = date.split('-', 3) date = datelist[0] + '-' if (len(datelist[1]) == 1): date += '0' date += datelist[1] + '-' if (len(datelist[2]) == 1): date += '0' date += datelist[2] return date
def initStats(self, extras=None): """Query and parse Web Server Status Page. @param extras: Include extra metrics, which can be computationally more expensive. """ url = "%s://%s:%d/%s" % (self._proto, self._host, self._port, self._monpath) response = util.get_url(url, self._user, self._password) #with open('/tmp/zopinfo.json') as f: # response = f.read() self._statusDict = json.loads(response)
def initStats(self): """Query and parse Apache Web Server Status Page.""" url = "%s://%s:%d/%s?auto" % (self._proto, self._host, self._port, self._statuspath) response = util.get_url(url, self._user, self._password) self._statusDict = {} for line in response.splitlines(): mobj = re.match('(\S.*\S)\s*:\s*(\S+)\s*$', line) if mobj: self._statusDict[mobj.group(1)] = util.parse_value(mobj.group(2)) if self._statusDict.has_key('Scoreboard'): self._statusDict['MaxWorkers'] = len(self._statusDict['Scoreboard'])
def test(): response = flask.make_response(flask.render_template( 'aws/debug.html', title='Test', agent=util.user_agent(), timestamp=datetime.utcnow(), get_url=util.get_url(), host=flask.request.host, country=util.geoip_country(), ip=flask.request.headers.get('X-Forwarded-For') or flask.request.remote_addr, )) return response
def prepare_post_list(driver): PostList = [] url = "http://www.moonbbs.com/forum-52-1.html" util.get_url(driver, url) postlist = driver.find_elements_by_class_name("xst") index = [18, 22, 28, 29, 31, 33, 24, 34, 32, 36, 38, 40, 41, 42, 43, 44, 23] for i in index: PostList.append(postlist[i].get_attribute("href")) index = [15, 22, 28, 29, 31, 20, 24, 18] url = "http://www.moonbbs.com/forum-58-1.html" util.get_url(driver, url) postlist = driver.find_elements_by_class_name("xst") for i in index: PostList.append(postlist[i].get_attribute("href")) url = "http://www.moonbbs.com/forum-65-1.html" util.get_url(driver, url) postlist = driver.find_elements_by_class_name("xst") for i in index: PostList.append(postlist[i].get_attribute("href")) random.shuffle(PostList) return PostList[:20]
def getStats(self): """Query and parse Web Server Status Page. """ url = "%s://%s:%d/%s" % (self._proto, self._host, self._port, self._monpath) response = util.get_url(url, self._user, self._password) stats = {} for line in response.splitlines(): mobj = re.match('([\w\s]+):\s+(\w+)$', line) if mobj: stats[mobj.group(1)] = util.parse_value(mobj.group(2)) return stats
def get_user_shelves(username, password): (username, password) = possibly_req_auth(username, password) library = get_url(FIMFICTION + ('/user/{}/library'.format(username))) libsoup = bs4.BeautifulSoup(library) shlvs = libsoup(class_='bookshelf-card') lib = [] for shlf in shlvs: count = int(shlf('a')[0].b.string) if count != 0: link = str(shlf('a')[0]['href']) match = patterns['bookshelfid'].match(link) lib.append(int(match.group(1))) return lib
def initStats(self): """Query and parse Web Server Status Page. """ url = "%s://%s:%d/%s" % (self._proto, self._host, self._port, self._monpath) response = util.get_url(url, self._user, self._password) self._statusDict = {} for line in response.splitlines(): cols = line.split(':') if not self._statusDict.has_key(cols[0]): self._statusDict[cols[0]] = {} self._statusDict[cols[0]][cols[1]] = util.parse_value(cols[2])
def _retrieve(self): """Query Apache Tomcat Server Status Page in XML format and return the result as an ElementTree object. @return: ElementTree object of Status Page XML. """ url = "%s://%s:%d/manager/status" % (self._proto, self._host, self._port) params = {} params['XML'] = 'true' response = util.get_url(url, self._user, self._password, params) tree = ElementTree.XML(response) return tree
def initStats(self): """Query and parse Nginx Web Server Status Page.""" url = "%s://%s:%d/%s" % (self._proto, self._host, self._port, self._statuspath) response = util.get_url(url, self._user, self._password) self._statusDict = {} for line in response.splitlines(): mobj = re.match('\s*(\d+)\s+(\d+)\s+(\d+)\s*$', line) if mobj: idx = 0 for key in ('accepts','handled','requests'): idx += 1 self._statusDict[key] = util.parse_value(mobj.group(idx)) else: for (key,val) in re.findall('(\w+):\s*(\d+)', line): self._statusDict[key.lower()] = util.parse_value(val)
def initStats(self, extras=None): """Query and parse Web Server Status Page. @param extras: Include extra metrics, which can be computationally more expensive. """ if extras is not None: self._extras = extras if self._extras: detail = 1 else: detail = 0 url = "%s://%s:%d/%s?detail=%s" % (self._proto, self._host, self._port, self._monpath, detail) response = util.get_url(url, self._user, self._password) self._statusDict = {} for line in response.splitlines(): cols = line.split(':') if not self._statusDict.has_key(cols[0]): self._statusDict[cols[0]] = {} self._statusDict[cols[0]][cols[1]] = util.parse_value(cols[2])
def list_single_device(ip): return get_url("network-device/ip-address/%s" % ip)
def get_hosts(ip): return get_url("host?connectedDeviceIp=%s" % ip)
def delete_group(group_id): response = requests.delete(util.get_url() + "/api/v2/canvas/group/" + str(group_id), verify=util.get_verify(), auth=util.get_auth()) return response
def generate_script(header_dict, details_dict, searchString=None): """Generate the ruby script for the passed request. :param dict header_dict: Header dictionary containing fields like 'Host','User-Agent'. :param dict details_dict: Request specific details like body and method for the request. :param str searchString: String to search for in the response to the request. By default remains None. :return: None """ method = details_dict['method'].strip() url = get_url(header_dict['Host']) path = details_dict['path'] if path != "": url += path encoding_list = ['HEAD', 'OPTIONS', 'GET'] if details_dict['data'] and (details_dict['method'].strip() in encoding_list): encoded_data = quote(details_dict['data'], '') url = url + encoded_data if not check_valid_url(url): print("Please enter a valid URL with correct domain name and try again ") sys.exit(-1) skeleton_code = """ require 'net/http' require 'uri' uri = URI('%s')""" % (url) if method == "GET": skeleton_code += """ req = Net::HTTP::Get.new(uri.request_uri) %s %s %s response = http.request(req)""" % (generate_request_headers(header_dict), is_proxy(details_dict), is_https(url)) elif method == "POST": body = details_dict['data'] skeleton_code += """ req = Net::HTTP::Post.new(uri.request_uri) %s req.body = %s\n %s %s response = http.request(req) """ % (generate_request_headers(header_dict), str(body), is_proxy(details_dict), is_https(url)) if searchString: skeleton_code += """ puts 'Response #{response.code} #{response.message}:' begin require 'colorize' rescue LoadError puts "search option will need colorize to work properly" puts "You can install it by gem install colorize" end matched = response.body.match /%s/ original = response.body if matched then for i in 0..matched.length original.gsub! /#{matched[i]}/, "#{matched[i]}".green end end puts original """ % (searchString) else: skeleton_code += """ puts "Response #{response.code} #{response.message}: #{response.body}" """ print(skeleton_code)
def get_groupdevice(group_device_id): response = requests.get(util.get_url() + "/api/v2/canvas/groupdevice/" + str(group_device_id), verify=util.get_verify(), auth=util.get_auth()) return response.json()
def da_zhao_hu(driver, uid): # DaZhaoHu url = "http://www.moonbbs.com/home.php?mod=spacecp&ac=poke&op=send&uid="+uid; util.get_url(driver, url) driver.find_element_by_id("pokesubmit_btn").click() time.sleep(1)
def delete_toolboxitem(toolbox_item_id): response = requests.delete(util.get_url() + "/api/v2/canvas/toolboxitem/" + str(toolbox_item_id), verify=util.get_verify(), auth=util.get_auth()) return response
def list_device(**kwargs): response = util.unpaginate(util.get_url(), '', util.get_verify(), util.get_auth(), kwargs) return response
def download_errata(): """ Download CentOS errata from https://cefs.steve-meier.de/ """ res = get_url('https://cefs.steve-meier.de/errata.latest.xml.bz2') return download_url(res, 'Downloading CentOS Errata:')
def download_errata_checksum(): """ Download CentOS errata checksum from https://cefs.steve-meier.de/ """ res = get_url('https://cefs.steve-meier.de/errata.latest.sha1') return download_url(res, 'Downloading Errata Checksum:')
def get_host(ip=None, mac=None): if ip is not None: url = "host?hostIp=%s" % ip elif mac is not None: url = "host?hostMac=%s" % mac return get_url(url)
def get_wlc(id): return get_url("network-device/%s" % id)
def show_video_list(handle, videos): for video in videos: list_item = create_video_list_item(video) url = get_url(action="video", id=video["_id"]) xbmcplugin.addDirectoryItem(handle, url, list_item, False)
def get_json(): content = request.json return jsonify(get_url(content["url"]))
def delete_device(device_id): response = requests.delete(util.get_url() + "/api/v2/canvas/device/" + str(device_id), verify=util.get_verify(), auth=util.get_auth()) return response
def list_toolboxitem(**kwargs): response = util.unpaginate(util.get_url(), '/api/v2/canvas/toolboxitem/', util.get_verify(), util.get_auth(), kwargs) return response
def delete_stream(stream_id): response = requests.delete(util.get_url() + "/api/v2/canvas/stream/" + str(stream_id), verify=util.get_verify(), auth=util.get_auth()) return response
def get_toolbox(toolbox_id): response = requests.get(util.get_url() + "/api/v2/canvas/toolbox/" + str(toolbox_id), verify=util.get_verify(), auth=util.get_auth()) return response.json()
def fen_xiang(driver, thread_id): # FenXiang url = "http://www.moonbbs.com/home.php?mod=spacecp&ac=share&type=thread&id="+thread_id; util.get_url(driver, url) driver.find_element_by_id("sharesubmit_btn").click() time.sleep(1)
def voting_done(servers): for server in servers: util.get_url(server + '/add')
def list_groupdevice(**kwargs): response = util.unpaginate(util.get_url(), '/api/v2/canvas/groupdevice/', util.get_verify(), util.get_auth(), kwargs) return response
def calculate_vote_result(servers): for server in servers: util.get_url(server + '/compute_result')
def get_interface(interface_id): response = requests.get(util.get_url() + "/api/v2/canvas/interface/" + str(interface_id), verify=util.get_verify(), auth=util.get_auth()) return response.json()
def delete_topology(topology_id): response = requests.delete(util.get_url() + "/api/v2/canvas/topology/" + str(topology_id), verify=util.get_verify(), auth=util.get_auth()) return response
def ip_to_id(ip): return get_url("network-device/ip-address/%s" % ip)['response']['id']
def get_link(link_id): response = requests.get(util.get_url() + "/api/v2/canvas/link/" + str(link_id), verify=util.get_verify(), auth=util.get_auth()) return response.json()
def list_network_devices(): return get_url("network-device")
def get_interfaces(id): return get_url("interface/network-device/%s" % id)
# http://www.pythonchallenge.com/pc/def/peak.html import pickle import sys import util soup = util.get_url_soup("http://www.pythonchallenge.com/pc/def/peak.html") data_file = soup.find("peakhell").attrs["src"] data = util.get_url("http://www.pythonchallenge.com/pc/def/" + data_file) obj = pickle.loads(data.encode("utf8")) for y in obj: for x in y: sys.stdout.write(x[0] * x[1]) sys.stdout.write("\n") util.print_url("channel")
def list_link(**kwargs): response = util.unpaginate(util.get_url(), '/api/v2/canvas/link/', util.get_verify(), util.get_auth(), kwargs) return response
def delete_process(process_id): response = requests.delete(util.get_url() + "/api/v2/canvas/process/" + str(process_id), verify=util.get_verify(), auth=util.get_auth()) return response
def get_titanic_data(): query = "SELECT * FROM passengers" db = "titanic_db" return pd.read_sql(query, util.get_url(db))
def show_wlan(): response = get_url('dna/intent/api/v1/enterprise-ssid') print(json.dumps(response, indent=2))
def get_data(query, db): return pd.read_sql(query, util.get_url(db))
def list_topologyinventory(**kwargs): response = util.unpaginate(util.get_url(), '/api/v2/canvas/topologyinventory/', util.get_verify(), util.get_auth(), kwargs) return response