def __init__(self, channel): self.channel = channel self.current_q = None self.last_q = None self.qcount = 0 self.tag = None self.api = ApiClient(channel.id)
def put_product(): RESOURCE = '/Products/' try: guid = str(uuid.uuid4()) product = Product(None) product.Description = 'New product from API' product.SearchName = 'New API product' product.Comment = 'This product is created by the Python API client with id: ' + guid product.Price = 12.50 client = ApiClient(URI, HEADERS, USERNAME, PASSWORD) response = client.PUT(RESOURCE + guid, product.json()) if response.status_code == 200 and response.is_json: pprint.pprint(response.json) product = Product(response.json) print("{0:38} {1:40} {2:20} {3}".format(product.id, product.Description[:40], product.SearchName[:20], product.Price)) else: print("response error: %d - %s" % (response.status_code, response.text)) except ValueError: print("Unexpected data: ", response.text) except: print("Unexpected error:", sys.exc_info()[0])
def do_login(config=None): client = ApiClient() if not config: config = get_config() try: client.application_id = config.get('application', 'id') client.application_token = config.get('application', 'token') except (NoSectionError, NoOptionError): raise PoodledoError( "Application ID or token not specified in %s.\nGenerate such at 'https://api.toodledo.com/2/account/doc_register.php?si=1'. Dying." % CONFIGFILE) try: client._key = config.get('session', 'key') client.getAccountInfo() except (NoSectionError, NoOptionError, ToodledoError): # cached session key either wasn't there or wasn't good; get a new one and cache it client._key = None (username, password) = read_or_get_creds(config) try: client.authenticate(username, password) except ToodledoError as e: print "No login credentials were successful; please try again." raise e if not config.has_section('session'): config.add_section('session') config.set('session', 'key', client.key) store_config(config) return client
def get_products(): resource = '/Products' try: client = ApiClient(URI, HEADERS, USERNAME, PASSWORD) while True: response = client.GET(resource) if response.status_code == 200 and response.is_json: products = Products(response.json) for product in products: print("{0:38} {1:40} {2:20} {3}".format( product.id, product.Description[:40] if product.Description != None else '', product.SearchName[:20] if product.SearchName != None else '', product.Price)) else: print("response error: %d - %s" % (response.status_code, response.text)) # paging resource = response.next_link if (resource == None or response.status_code != 200): break except ValueError: print("Unexpected data: ", response.text) except: print("Unexpected error:", sys.exc_info()[0])
def __init__(self, settings: Settings): self.api_key = settings['botKey'] self.logger = logging.getLogger("bot") self.helper = BotHelper() self.api = ApiClient('ilya', settings['phone'], settings['api_id'], settings['api_hash']) self.api.reconnect() self.updater = Updater(self.api_key) # Get the dispatcher to register handlers self.dispatcher = self.updater.dispatcher # Add conversation handler conv_handler = ConversationHandler( entry_points=[CommandHandler('start', self.start)], states={self.CODE: [MessageHandler(Filters.forwarded, self.code)]}, fallbacks=[CommandHandler('cancel', self.cancel)]) self.dispatcher.add_handler(CommandHandler('stats', self.stats)) self.dispatcher.add_handler(conv_handler) # log all errors self.dispatcher.add_error_handler(self.error_callback) # Start the Bot self.updater.start_polling() self.logger.info("BotStarted")
def __init__(self): self.showWindows = None self.parseArguments() self.settings = SettingsManager() self.motiondetector = MotionDetector(threshold=0.2, showWindows=self.showWindows) self.apiClient = ApiClient() self.motion = False self.timer = None self.onTimer = Timer(7.0, self.resetOnTimer) self.cameraId = None self.motionScore = 0.0 self.initializeApp()
def get_userinfo(): RESOURCE = '/UserInfo?$expand=*' try: client = ApiClient(URI, HEADERS, USERNAME, PASSWORD) response = client.GET(RESOURCE) if response.status_code == 200 and response.is_json: pprint.pprint(response.json) else: print("response error: %d - %s" % (response.status_code, response.text)) except ValueError: print("Unexpected data: ", response.text) except: print("Unexpected error:", sys.exc_info()[0])
def getJobList(listCollectionName): print " --- get daily job by language and top cities---" lang_names = jobaly.utils.loadArrayFromFile("lang_list.txt") cities = jobaly.utils.loadArrayFromFile("state_list.txt") # lang_names = jobaly.utils.loadArrayFromFile("test_lang_list.txt") # cities = jobaly.utils.loadArrayFromFile("test_loc_list.txt") indeedClient= ApiClient( { "fromage" : "1" } ) # client.getPage(0) dbClient = DbClient('localhost', 27017, "jobaly_daily") collection = dbClient.getCollection(listCollectionName) for city in cities: indeedClient.setLocation(city) for lang in lang_names: q = indeedClient.buildQuery(lang) print "-----prcoss location %s with language %s -------" % (city, lang) indeedClient.processQuery(collection, "q", q)
def getByLang(): print " --- get job by language and companies---" collectionName = "job_lang_top_corps" param = { "q" : "software engineer", "fromage" : "30" } lang_names = utils.loadArrayFromFile("pro_langs.txt") corps_names = utils.loadArrayFromFile("topcorps.txt") indeedClient= ApiClient( param ) # client.getPage(0) dbClient = DbClient('localhost', 27017, "jobaly") collection = dbClient.getCollection(collectionName) for corp in corps_names: for lang in lang_names: q = indeedClient.buildQuery(lang, {"company": corp }) print "-----prcoss corp %s with language %s -------" % (corp, lang) indeedClient.processQuery(collection, "q", q)
def scrape(self, chamber, term): client = ApiClient(self) t = next((item for item in self.metadata["terms"] if item["name"] == term),None) session = max(t["sessions"]) base_url = "http://iga.in.gov/legislative" api_base_url = "https://api.iga.in.gov" chamber_name = "Senate" if chamber == "upper" else "House" r = client.get("chamber_legislators",session=session,chamber=chamber_name) all_pages = client.unpaginate(r) for leg in all_pages: firstname = leg["firstName"] lastname = leg["lastName"] party = leg["party"] link = leg["link"] api_link = api_base_url+link html_link = base_url+link.replace("legislators/","legislators/legislator_") try: html = get_with_increasing_timeout(self,html_link,fail=True,kwargs={"verify":False}) except scrapelib.HTTPError: self.logger.warning("Legislator's page is not available.") continue doc = lxml.html.fromstring(html.text) doc.make_links_absolute(html_link) address, phone = doc.xpath("//address") address = address.text_content().strip() address = "\n".join([l.strip() for l in address.split("\n")]) phone = phone.text_content().strip() district = doc.xpath("//span[@class='district-heading']")[0].text.lower().replace("district","").strip() image_link = base_url+link.replace("legislators/","portraits/legislator_") legislator = Legislator(term, chamber, district, " ".join([firstname,lastname]), party=party, photo_url = image_link) legislator.add_office('capitol', 'Capitol Office', address=address, phone=phone) legislator.add_source(html_link) legislator.add_source(api_link) self.save_legislator(legislator)
def scrape(self, term, chambers): t = next( (item for item in self.metadata["terms"] if item["name"] == term), None) session = max(t["sessions"]) subcomms = self.get_subcommittee_info(session) api_base_url = "https://api.iga.in.gov" html_base_url = "http://iga.in.gov/legislative/{}/committees/".format( session) client = ApiClient(self) r = client.get("committees", session=session) all_pages = client.unpaginate(r) for comm_info in all_pages: #this is kind of roundabout, but needed in order #to take advantage of all of our machinery to make #sure we're not overloading their api comm_link = comm_info["link"] comm_name = comm_link.split("/")[-1] if "withdrawn" in comm_name or "conference" in comm_name: continue comm_json = client.get("committee", committee_link=comm_link[1:]) try: chamber = comm_json["chamber"]["name"] except KeyError: chamber = 'joint' else: if chamber == "Senate": chamber = "upper" elif chamber == "House": chamber = "lower" else: raise AssertionError( "Unknown committee chamber {}".format(chamber)) name = comm_json["name"] try: owning_comm = subcomms[name] except KeyError: name = name.replace("Statutory Committee on", "").strip() comm = Committee(chamber, name) else: name = name.replace("Statutory Committee on", "").replace("Subcommittee", "").strip() comm = Committee(chamber, owning_comm, subcommittee=name) chair = self.process_special_members(comm, comm_json, "chair") vicechair = self.process_special_members(comm, comm_json, "viceChair") ranking = self.process_special_members(comm, comm_json, "rankingMinMember") #leadership is also listed in membership #so we have to make sure we haven't seen them yet comm_members = [m for m in [chair, vicechair, ranking] if m] for mem in comm_json["members"]: mem_name = mem["firstName"] + " " + mem["lastName"] if mem_name not in comm_members: comm_members.append(mem_name) comm.add_member(mem_name) api_source = api_base_url + comm_link if comm_name[:10] == "committee_": html_source = html_base_url + comm_name[10:] comm.add_source(html_source) comm.add_source(api_source) self.save_committee(comm)
# https://github.com/Rapptz/discord.py/blob/async/examples/reply.py # import requests, json # def checkanswer(q, text): # payload = { "ans" : text} # target = "http://triviastorm.net/api/checkanswer/%s/" % (q) # r = requests.get(target, params=payload) # data = r.json() # return data['correct'] # print(checkanswer('17444', 'lich king')) from apiclient import ApiClient c = ApiClient("test", token="59d6e8551296d27034a65a05c44c82a2ddb0fc05") # print(c.getq()) # print(c.getq("movies")) # print(c.checkanswer(5344, "groundhog day")) # print(c.checkanswer(5344, "ghostbusters")) # print(c.getanswer(5344)) q = c.askq("test4") print(q) print(c.submitanswer(q["id"], "simon pegg", "myusername22")) print(c.endq())
def scrape(self, session, chambers): api_base_url = "https://api.iga.in.gov" proxy = {"url":"http://in.proxy.openstates.org"} #ah, indiana. it's really, really hard to find #pdfs in their web interface. Super easy with #the api, but a key needs to be passed #in the headers. To make these documents #viewable to the public and our scrapers, #sunlight's put up a proxy service at this link #using our api key for pdf document access. client = ApiClient(self) r = client.get("bills",session=session) all_pages = client.unpaginate(r) for b in all_pages: bill_id = b["billName"] for idx,char in enumerate(bill_id): try: int(char) except ValueError: continue disp_bill_id = bill_id[:idx]+" "+str(int(bill_id[idx:])) break bill_link = b["link"] api_source = api_base_url + bill_link bill_json = client.get("bill",session=session,bill_id=bill_id.lower()) title = bill_json["title"] if title == "NoneNone": title = None #sometimes title is blank #if that's the case, we can check to see if #the latest version has a short description if not title: title = bill_json["latestVersion"]["shortDescription"] #and if that doesn't work, use the bill_id but throw a warning if not title: title = bill_id self.logger.warning("Bill is missing a title, using bill id instead.") original_chamber = "lower" if bill_json["originChamber"].lower() == "house" else "upper" bill = Bill(session,original_chamber,disp_bill_id,title) bill.add_source(self.make_html_source(session,bill_id)) bill.add_source(api_source) #sponsors positions = {"Representative":"lower","Senator":"upper"} for s in bill_json["authors"]: bill.add_sponsor("primary", self.get_name(s), chamber=positions[s["position_title"]], official_type="author") for s in bill_json["coauthors"]: bill.add_sponsor("cosponsor", self.get_name(s), chamber=positions[s["position_title"]], official_type="coauthor") for s in bill_json["sponsors"]: bill.add_sponsor("primary", self.get_name(s), chamber=positions[s["position_title"]], official_type="sponsor") for s in bill_json["cosponsors"]: bill.add_sponsor("cosponsor", self.get_name(s), chamber=positions[s["position_title"]], official_type="cosponsor") #actions action_link = bill_json["actions"]["link"] api_source = api_base_url + action_link try: actions = client.get("bill_actions",session=session,bill_id=bill_id.lower()) except scrapelib.HTTPError: self.logger.warning("Could not find bill actions page") actions = {"items":[]} for a in actions["items"]: action_desc = a["description"] if "governor" in action_desc.lower(): action_chamber = "executive" elif a["chamber"]["name"].lower() == "house": action_chamber = "lower" else: action_chamber = "upper" date = a["date"] if not date: self.logger.warning("Action has no date, skipping") continue date = datetime.datetime.strptime(date,"%Y-%m-%dT%H:%M:%S") action_type = [] d = action_desc.lower() committee = None reading = False if "first reading" in d: action_type.append("bill:reading:1") reading = True if ("second reading" in d or "reread second time" in d): action_type.append("bill:reading:2") reading = True if ("third reading" in d or "reread third time" in d): action_type.append("bill:reading:3") if "passed" in d: action_type.append("bill:passed") if "failed" in d: action_type.append("bill:failed") reading = True if "adopted" in d and reading: action_type.append("bill:passed") if ("referred" in d and "committee on" in d or "reassigned" in d and "committee on" in d): committee = d.split("committee on")[-1].strip() action_type.append("committee:referred") if "committee report" in d: if "pass" in d: action_type.append("committee:passed") if "fail" in d: action_type.append("committee:failed") if "amendment" in d and "without amendment" not in d: if "pass" in d or "prevail" in d or "adopted" in d: action_type.append("amendment:passed") if "fail" or "out of order" in d: action_type.append("amendment:failed") if "withdraw" in d: action_type.append("amendment:withdrawn") if "signed by the governor" in d: action_type.append("governor:signed") if ("not substituted for majority report" in d or "returned to the house" in d or "referred to the senate" in d or "referred to the house" in d or "technical corrections" in d or "signed by the president" in d or "signed by the speaker" or "authored" in d or "sponsor" in d or "coauthor" in d or ("rule" in d and "suspended" in d) or "removed as author" in d or ("added as" in d and "author" in d) or "public law" in d): if len(action_type) == 0: action_type.append("other") if len(action_type) == 0: #calling it other and moving on with a warning self.logger.warning("Could not recognize an action in '{}'".format(action_desc)) action_type = ["other"] elif committee: bill.add_action(action_chamber,action_desc,date,type=action_type,committees=committee) else: bill.add_action(action_chamber,action_desc,date,type=action_type) #subjects subjects = [s["entry"] for s in bill_json["latestVersion"]["subjects"]] bill["subjects"] = subjects #versions and votes for version in bill_json["versions"][::-1]: version_json = client.get("bill_version", session=session, bill_id=version["billName"], version_id=version["printVersionName"]) self.deal_with_version(version_json,bill,proxy) self.save_bill(bill)