def calculate_lunar_date(self): if len(self.user_date_input.text()) != 8: reply = QMessageBox.question(self, 'Error', "YYYYMMDD로 정확한 날짜를 입력해 주십시오.", QMessageBox.Close) return url = 'http://apis.data.go.kr/B090041/openapi/service/LrsrCldInfoService/getSolCalInfo' personal_key = 'UrEdHGIOCJr600RsRQ%2BnoL2wma3HT9JXmYn0uhHwmxImsnE2GHKYfn1RUfiEK9RIHXPJ3FBRnONxXMjplU3%2F7g%3D%3D' lunar_input = self.user_date_input.text() lunar_month = lunar_input[4:6] lunar_day = lunar_input[6:] repeat = int(self.user_repetition.text()) + 1 for i in range(0, repeat): lunar_year = int(lunar_input[:4]) + int(i) # Limit search at lunar year 2050. if lunar_year > 2050: break query_params = '?lunYear={}&lunMonth={}&lunDay={}&ServiceKey={}'.format( lunar_year, lunar_month, lunar_day, personal_key) session = XMLSession() r = session.get(url + query_params) solar_day = r.xml.xpath('//solDay', first=True).text solar_month = r.xml.xpath('//solMonth', first=True).text solar_year = r.xml.xpath('//solYear', first=True).text solar_week = r.xml.xpath('//solWeek', first=True).text solar_date_text = ' {}년 {}월 {}일 {}요일 '.format( solar_year, solar_month, solar_day, solar_week) QListWidgetItem(solar_date_text, self.result_console)
def _retrieve_jenkins_jobs(jenkins_url): """ Query the Jenkins server and return all jenkins jobs and the last run id :return: Array of JenkinsJobs """ session = XMLSession() try: r = session.get(url=jenkins_url + JENKINS_ALL_RUNS_API) r.raise_for_status() except requests.exceptions.HTTPError as err: logging.error(err) # <Project activity="Sleeping" lastBuildStatus="Success" lastBuildLabel="756" # webUrl="http://jenkins.mxnet-ci.amazon-ml.com/job/Broken_Link_Checker_Pipeline/" # name="Broken_Link_Checker_Pipeline" lastBuildTime="2018-11-30T01:12:59Z"/> # # <Project activity="Sleeping" lastBuildStatus="Success" lastBuildLabel="1" # webUrl="http://jenkins.mxnet-ci.amazon-ml.com/job/incubator-mxnet/job/PR-10008/" # name="incubator-mxnet » PR-10008" lastBuildTime="2018-03-06T18:19:44Z"/> return [ JenkinsJob(jenkins_url=jenkins_url, last_run_id=int(run.attrs['lastBuildLabel']), job_url=run.attrs['webUrl'], full_job_name=run.attrs['name'], last_build_time=run.attrs['lastBuildTime']) for run in r.xml.xpath('//Project') ]
async def main(): # get command line args account = sys.argv[1] container = sys.argv[2] sas = sys.argv[3] destDir = sys.argv[4] # list files in container try: session = XMLSession() requestUrl = 'https://{}.blob.core.windows.net/{}?restype=container&comp=list&{}'.format(account, container, sas) r = session.get(requestUrl) blobNames = [ e.text for e in r.xml.xpath('Blobs//Blob/Name') ] except Exception as e: print(e, file=sys.stderr) print('Blobs: {}'.format(blobNames)) # download the blobs # https://myaccount.blob.core.windows.net/mycontainer/myblob try: tasks = [] async with ClientSession(connector=TCPConnector(ssl=False)) as session: for blob in blobNames: filePath = os.path.join(destDir, blob) requestUrl = 'https://{}.blob.core.windows.net/{}/{}?{}'.format(account, container, blob, sas) task = asyncio.ensure_future(grabber(requestUrl, filePath, session)) tasks.append(task) await asyncio.gather(*tasks) except Exception as e: print("oops") print(e, file=sys.stderr) except: print("Unexpected error:", sys.exc_info()[0])
def get_shelter_xml(): session = XMLSession() taiwan_shelter_url = "http://portal.emic.gov.tw/pub/DSP/OpenData/EEA/Shelter.xml" r = session.get(taiwan_shelter_url) shelter_info_xml_list = r.xml.xpath('//shelterInfo') # [print(s) for s in shelter_info_xml_list] def check_indoors(xml_in): if xml_in.attrs['isIndoor'] == '是': return 'true' else: return 'false' output = [] for s in shelter_info_xml_list: output.append({ "name": s.attrs['name'], "shelterCode": s.attrs['shelterCode'], "address": s.attrs['address'], "capacity": s.attrs['peopleno'], "lat": s.attrs['lat'], "lng": s.attrs['lon'], "indoors": check_indoors(s) }) return output
def test_xml_assertion(): session = XMLSession() # 发起请求 r = session.get('https://www.nasa.gov/rss/dyn/lg_image_of_the_day.rss') # 获取接口的链接 print(r.xml.links) #通过xpath进行断言 item = r.xml.xpath('//item',first=True) print(item.text)
def get_latest_episode_download_url(xmlUrl): links = [] session = XMLSession() r = session.get(xmlUrl) items = r.xml.xpath('//item') for urls in items: url = [urls.xpath('//enclosure', first=True).attrs['url']] links.append(url) return links[0]
def get_latest_episode_title(xmlUrl): titles = [] session = XMLSession() r = session.get(xmlUrl) items = r.xml.xpath('//item') for item in items: title = [item.xpath('//title', first=True).text] titles.append(title) return titles[0]
def get_show_title(xmlUrl): show_titles = [] session = XMLSession() r = session.get(xmlUrl) items = r.xml.xpath('//channel') for item in items: title = [item.xpath('//title', first=True).text] show_titles.append(title) return show_titles[0]
def _request_relays_names(self): ''' Requests all 56 relays custom names configured in IPX800 UI ''' if self._relays_config == None: return False session = XMLSession() res = session.get(self._names_xml_url) if res.status_code == 200: for r in self._relays_config.enabled_relays: relay_name = res.xml.xpath('//response/output%d' % r, first=True).text self.relays['R%d' % r].name = relay_name
def download_text(url): if url: session = XMLSession() try: response = session.get(url) finally: if session: session.close() if response: items = response.xml.xpath('//item') for item in items: descritpion = item.xpath('//description')[0].text link = item.xpath('//link')[0].text yield link, descritpion
def get_xml(url): session = XMLSession() session.proxies = {'http': HTTP_PROXY, 'https': HTTPS_PROXY} retry = Retry(connect=3, backoff_factor=1) adapter = HTTPAdapter(max_retries=retry) headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) ' 'AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8', 'Accept-Encoding': 'gzip, deflate', 'Accept': '*/*', 'Connection': 'keep-alive'} session.mount('http://', adapter) r = session.get(url, headers=headers, verify=False) session.close() # results = [] # if int(status.get('totalRes')) > 0: # for elem in root.findall('{http://purl.org/rss/1.0/}item'): # temp = [] # temp.append(elem.find('{http://purl.org/rss/1.0/}title').text) # temp.append(elem.find('{http://purl.org/rss/1.0/}link').text) # results.append(temp) return ET.fromstring(r.content)
def retrieve_names(self): ''' Requests custom names configured in IPX800 UI ''' session = XMLSession() res = session.get(self._names_xml_url) if res.status_code == 200: for r in self.relays: idx = int(r.id) _name = res.xml.xpath('//response/output%d' % idx, first=True).text self.relays[idx-1].name = _name for r in self.inputs: idx = int(r.id) _name = res.xml.xpath('//response/input%d' % idx, first=True).text self.inputs[idx-1].name = _name for r in self.analogs: idx = int(r.id) _name = res.xml.xpath('//response/analog%d' % idx, first=True).text self.analogs[idx-1].name = _name for r in self.virt_analogs: idx = int(r.id) _name = res.xml.xpath('//response/analogVirt%d' % idx, first=True).text self.virt_analogs[idx-1].name = _name for r in self.counters: idx = int(r.id) _name = res.xml.xpath('//response/compt%d' % idx, first=True).text self.counters[idx-1].name = _name for r in self.virt_inputs: idx = int(r.id) _name = res.xml.xpath('//response/inputVirt%d' % idx, first=True).text self.virt_inputs[idx-1].name = _name for r in self.virt_outputs: idx = int(r.id) _name = res.xml.xpath('//response/outputVirt%d' % idx, first=True).text self.virt_outputs[idx-1].name = _name else: print("No response when asking for names")
def test_xml(self): session = XMLSession() r = session.get("https://ceshiren.com/categories.json") assert r.xml.xpath('..//name')[0] == "开源项目"
def test_xpath(self): session = XMLSession() r = session.get('http://home.testing-studio.com/categories') item = r.text print(item)
def assistant(command): if 'open' in command and 'moodle' not in command and 1: regex = re.search('open (.+)', command) if regex: domain = regex.group(1) url = 'https://www.' + domain + '.com' webbrowser.open(url) logicResponse("Opening " + domain) else: pass elif 'open moodle' in command: webbrowser.open('https://moodle.iiit.ac.in/my/') logicResponse("Opening moodle") elif 'hello' in command or 'hey' in command: day_time = int(strftime('%H')) if day_time < 12: logicResponse('Hello Sudhansh. Good morning!') elif 12 <= day_time < 18: logicResponse('Hello Sudhansh. Good afternoon') else: logicResponse('Hello Sudhansh. Good evening') elif 'shutdown' in command or 'bye' in command: logicResponse('Bye bye. Have a nice day') sys.exit() elif 'joke' in command: try: url = requests.get("https://icanhazdadjoke.com") html = url.text url.close() soup = BeautifulSoup(html, 'html.parser') joke = soup.find("p", {'class': 'subtitle'}).string.strip() logicResponse(joke) except Exception as e: print("Oops! Out of jokes!") elif 'news' in command or "headlines" in command: try: news_url = "https://news.google.com/news/rss" session = XMLSession() r = session.get(news_url) news_list = r.xml.xpath("//item/title") for news in news_list[:15]: print(news.text) except Exception as e: print(e) elif 'current weather' in command: reg_ex = re.search('current weather in (.*)', command) if reg_ex: city = reg_ex.group(1) owm = OWM(API_key='ab0d5e80e8dafb2cb81fa9e82431c1fa') obs = owm.weather_at_place(city) w = obs.get_weather() k = w.get_status() x = w.get_temperature(unit='celsius') logicResponse( 'Current weather in %s is %s. The maximum temperature is %0.2f and the minimum temperature is %0.2f degree celcius' % (city, k, x['temp_max'], x['temp_min'])) elif 'time' in command: import datetime now = datetime.datetime.now() logicResponse('Current time is %d hours %d minutes' % (now.hour, now.minute)) elif 'tell me about' in command: reg_ex = re.search('tell me about (.*)', command) try: if reg_ex: print(wikipedia.summary(reg_ex.group(1))) except Exception as e: logicResponse(e) elif 'launch' in command: regex = re.search('launch (.*)', command) if regex: appname = regex.group(1) app = appname + '.app' subprocess.Popen(["open", "/Applications/" + app], stdout=subprocess.PIPE) logicResponse("Launching " + appname) else: logicResponse("App not found")
#add async BASE_URL = 'https://ia800907.us.archive.org/24/items/video_annotations_test/' #'https://ia800901.us.archive.org/13/items/Jopik_YT_Annotation_Collection/' #update this to the actual item to be used LAST_UPDATE_TIME = -1 if BASE_URL.endswith('/'): BASE_URL = BASE_URL[:-1] import requests dlsession = requests.session() from requests_xml import XMLSession session = XMLSession() retrieval_url = BASE_URL + '/' + BASE_URL.split('/')[-1] + '_files.xml' r = session.get(retrieval_url) item = r.xml.xpath('//files//file', first=False) iteminfo = [] MTIME_LIST = [] from filehash import FileHash for element in item: try: mtime = int(element.xpath('//mtime', first=True).text) except: mtime = 0 try: crc32 = str(element.xpath('//crc32', first=True).text).upper().lstrip( '0') #try to match the format returned by FileHash
def test_xlm(): session = XMLSession() r = session.get('https://www.nasa.gov/rss/dyn/lgimage_of the_ day.rss') r.xml.links item = r.xml.xpath('//item', first=True) print(item.text)