def main(wf): parser = argparse.ArgumentParser() parser.add_argument('--setkey', dest='apikey', nargs='?', default = None) parser.add_argument('query', nargs='?', default = None) args = parser.parse_args(wf.args) if args.apikey: wf.save_password('slack_api_key', args.apikey) return 0 try: api_key = wf.get_password('slack_api_key') except PasswordNotFound: wf.add_item('No API key set.', 'Please run slt', valid = False) wf.send_feedback() return 0 if len(wf.args): query = wf.args[0] else: query = None web.get('https://slack.com/api/presence.set?token=' + api_key + '&presence=' + query + '&pretty=1') wf.send_feedback()
def slack_list(keys): wf = Workflow() slack_search = [] for key in keys: api_key = str(key) slack_auth = web.get('https://slack.com/api/auth.test?token=' + api_key + '&pretty=1').json() if slack_auth['ok'] is False: wf.add_item(title='Authentication failed. Check your API key', valid=False) wf.send_feedback() break else: slack_channels = web.get('https://slack.com/api/channels.list?token=' + api_key + '&exclude_archived=1&pretty=1').json() slack_users = web.get('https://slack.com/api/users.list?token=' + api_key + '&pretty=1').json() slack_groups = web.get('https://slack.com/api/groups.list?token=' + api_key + '&pretty=1').json() for channels in slack_channels['channels']: slack_search.append({'name': channels['name'], 'team': slack_auth['team'],'team_id': slack_auth['team_id'], 'id': channels['id'], 'type': 'channel', 'api_key': api_key}) for users in slack_users['members']: slack_search.append({'name': users['name'], 'team': slack_auth['team'],'team_id': slack_auth['team_id'], 'id': users['id'], 'type': 'user', 'api_key': api_key}) slack_search.append({'name': users['profile']['real_name'], 'team': slack_auth['team'],'team_id': slack_auth['team_id'], 'id': users['id'], 'type': 'user', 'api_key': api_key}) for groups in slack_groups['groups']: if 'name' in groups: slack_search.append({'name': groups['name'], 'team': slack_auth['team'],'team_id': slack_auth['team_id'], 'id': groups['id'], 'type': 'group', 'api_key': api_key}) return slack_search
def slack_files(keys): files_list = [] for key in keys: api_key = str(key) slack_auth = web.get('https://slack.com/api/auth.test?token=' + api_key + '&pretty=1').json() if slack_auth['ok'] is False: wf.add_item('Authentication failed.' 'Try saving your API key again', valid=False) wf.send_feedback() break else: files = web.get('https://slack.com/api/files.list?token=' + api_key + '&count=20&pretty=1').json() for file in files['files']: if 'initial_comment' in file: files_list.append({'name': file['name'], 'id': file['id'], 'url': file['url'], 'title': file['title'], 'filetype': file['filetype'], 'initial_comment': file['initial_comment'], 'comment': file['initial_comment']['comment']}) else: files_list.append({'name': file['name'], 'id': file['id'], 'url': file['url'], 'title': file['title'], 'filetype': file['filetype']}) return files_list
def join_channel(keys, query): for key in keys: api_key = str(key) channels_list = web.get('https://slack.com/api/channels.list?token=' + api_key + '&pretty=1').json() for channels in channels_list['channels']: if query == channels['name']: web.get('https://slack.com/api/channels.join?token=' + api_key + '&name=' + query + '&pretty=1')
def main(wf): url = 'https://translate.yandex.net/api/v1.5/tr.json/translate' # RU params = dict(key=API_KEY, lang='ru', text=wf.args[0]) r = web.get(url, params) r.raise_for_status() result = r.json() value = result['text'][0] wf.add_item(title=value, arg=value, subtitle='-> RU', valid=True) # EN params = dict(key=API_KEY, lang='en', text=wf.args[0]) r = web.get(url, params) r.raise_for_status() result = r.json() value = result['text'][0] wf.add_item(title=value, arg=value, subtitle='-> EN', valid=True) wf.send_feedback()
def main(wf): parser = argparse.ArgumentParser() parser.add_argument('query', nargs='?', default = None) args = parser.parse_args(wf.args) try: slack_keys = wf.get_password('slack_api_key') except PasswordNotFound: wf.add_item(title='No API key set. Please run slt', valid=False) wf.send_feedback() return 0 keys = slack_keys.split(",") if len(wf.args): query = wf.args[0] else: query = None for key in keys: api_key = str(key) slack_auth = web.get('https://slack.com/api/auth.test?token=' + api_key + '&pretty=1').json() if slack_auth['ok'] is False: wf.add_item(title='Authentication failed. Check your API key', valid=False) wf.send_feedback() else: web.get('https://slack.com/api/users.setPresence?token=' + api_key + '&presence=' + query + '&pretty=1') wf.send_feedback()
def parse(url): """Parse a URL for OpenSearch specification.""" log.info('[opensearch] fetching "%s" ...', url) defurl = iconurl = None # Fetch and parse URL r = web.get(url) r.raise_for_status() s = r.text if not _is_xml(s): # find URL of OpenSearch definition defurl, iconurl = _parse_html(s, url) if not defurl: log.error('[opensearch] no OpenSearch link found') raise NotFound(url) r = web.get(defurl) r.raise_for_status() s = r.text # Parse OpenSearch definition search = _parse_definition(s) search.validate() search.uid = _url2uid(url) search.icon_url = iconurl return search
def test_gzipped_content(httpserver): """Gzip encoding""" httpserver.serve_content(gifbytes, headers={'Content-Type': 'image/gif'}) r = web.get(httpserver.url) assert r.status_code == 200 assert r.content == gifbytes httpserver.serve_content( gifbytes_gzip, headers={ 'Content-Type': 'image/gif', 'Content-Encoding': 'gzip', }) # Full response r = web.get(httpserver.url) assert r.status_code == 200 assert r.content == gifbytes # Streamed response r = web.get(httpserver.url, stream=True) assert r.status_code == 200 content = b'' for chunk in r.iter_content(): content += chunk assert content == gifbytes
def do_search(query, page=None): """ Search for the query with the specified page or the first one if None is specified :rtype : dict """ log.debug('Starting searching for [%s]', query) # Gets the first page if not page: r = web.get(search_url + query, timeout=10) else: # Gets the specified page r = web.get("{}{}/page/{}".format(search_url, query, page), timeout=10) r.raise_for_status() json = r.json() if json['Error'] != u'0': wf.add_item('Erro on searching', str(json['Error']), icon=ICON_ERROR) wf.send_feedback() raise SearchException('Error or searching') log.debug('No errors on search') return json
def main(wf): if len(wf.args): query = wf.args[0] else: query = None if len(query) < 2: wf.add_item(title = 'Enter a movie title', subtitle = 'Please enter more than 2 characters.') wf.send_feedback() return imdbURL = 'http://www.imdb.com/title/' moviesData = web.get('http://www.omdbapi.com/?s=' + urllib.quote(query) + '&r=json').json() if 'Response' in moviesData: wf.add_item(title = 'Nothing was found.') elif 'Search' in moviesData: for movie in moviesData['Search']: extendedMovieData = web.get('http://www.omdbapi.com/?tomatoes=true&i=' + movie['imdbID'] + '&r=json').json() wf.add_item(title = '%s (%s)' % (movie['Title'], movie['Year']), subtitle = 'IMDb: %s RT: %s%s Metacritic: %s' % (extendedMovieData['imdbRating'], extendedMovieData['tomatoMeter'], '' if extendedMovieData['tomatoMeter'] == 'N/A' else '%', extendedMovieData['Metascore']), arg = imdbURL + movie['imdbID'], valid = True, ) wf.send_feedback()
def get_data(arg): url = arg_switch(arg) data_filename = datetime.date.today().strftime('%Y_%m_%d') + '_%s.json' % arg if not os.path.exists(data_filename): web.get(url).save_to_path(data_filename) data_fd = open(data_filename, 'r') data = json.load(data_fd) data_fd.close() return data
def test_download(travis = None): url1 = 'http://unicode.org/emoji/charts-beta/full-emoji-list.html' url2 = 'http://unicode.org/emoji/charts/full-emoji-list.html' if travis: with travis.folding_output(): r = web.get(url1, timeout=6000) else: r = web.get(url1, timeout=6000) print r.status_code
def _get_story_icon_file_path(wf, dir, img_url): regex = r'\w+\.\w+$' match = re.search(regex, img_url) img_name = match.group(0) img_cache_full_path = wf.cachedir + '/thumbnail_cache/' + dir + '/' + img_name if not os.path.exists(img_cache_full_path): web.get(img_url).save_to_path(img_cache_full_path) if not os.path.exists(img_cache_full_path): return default_thumsnail else: return img_cache_full_path
def downloadAudio(wf, word): BriURL = "http://dict.youdao.com/dictvoice?audio=" + word + "&type=1" AmeURL = "http://dict.youdao.com/dictvoice?audio=" + word + "&type=2" dirname = wf.cachedir + "/" res = web.get(BriURL) if res.status_code == 200: res.save_to_path(dirname + word + ".bri") res = web.get(AmeURL) if res.status_code == 200: res.save_to_path(dirname + word + ".ame")
def slackList(api_key): slackChannels = web.get('https://slack.com/api/channels.list?token=' + api_key + '&pretty=1').json() slackUsers = web.get('https://slack.com/api/users.list?token=' + api_key + '&pretty=1').json() slackSearch = [] for channels in slackChannels['channels']: slackSearch.append('#' + channels['name']) for users in slackUsers['members']: slackSearch.append('@' + users['name']) return slackSearch
def main(wf): parser = argparse.ArgumentParser() parser.add_argument('--message', nargs='?') parser.add_argument('query', nargs='?', default=None) args = parser.parse_args(wf.args) if args.message: query = args.message carrot = query.find('>') colon = query.find(':') team = query[:(colon - 1)] channel_name = query[(colon+2):(carrot-1)] message = query[(carrot+2):].split() if len(message) >= 2: message = '%20'.join(message) else: message = message[0] for key in slack_keys(): api_key = str(key) slack_auth = web.get('https://slack.com/api/auth.test?token=' + api_key + '&pretty=1').json() if slack_auth['ok'] is True and slack_auth['team'] == team: message_url = 'https://slack.com/api/chat.postMessage?token=' + api_key + '&channel=%23' + channel_name + '&text=' + message + '&as_user=true&pretty=1' web.get(message_url) def wrapper(): return slack_channels(keys=slack_keys()) channels_list = wf.cached_data('channels', wrapper, max_age=120) query = args.query if query: channels_list = wf.filter(query, channels_list, key=search_slack_channels) if len(channels_list) == 0: wf.add_item(title="Enter your message", arg=query, valid=True) else: for channels in channels_list: if channels['member'] == True: wf.add_item(title=channels['name']+' - '+channels['team'], subtitle='Member', autocomplete='{0} : {1} > '.format(channels['team'], channels['name']), arg=query, valid=True) wf.send_feedback()
def login_create_cookie(wf): """ Use account to login and return cookie information. """ url = "http://passport.garmin.com.tw/passport/login.aspx?Page=http://biz.garmin.com.tw/introduction/index.asp&Qs=" pwd = wf.get_password('employ_password') r = web.get(url=url, auth=(USER_NAME, pwd)) r.raise_for_status() soup = BeautifulSoup(r.text, "html5lib") cookie = cookielib.MozillaCookieJar(COOKIE_NAME) result = web.get(soup.body.a['href'], cookies=cookie) result.raise_for_status() cookie.save(ignore_discard=True, ignore_expires=True) return cookie
def main(wf): parser = argparse.ArgumentParser() parser.add_argument('--join', nargs = '?') parser.add_argument('--leave', dest = 'leave', nargs = '?') parser.add_argument('query', nargs = '?', default = None) args = parser.parse_args(wf.args) try: api_key = wf.get_password('slack_api_key') except PasswordNotFound: wf.add_item('No API key set.' 'Please run slt', valid = False) wf.send_feedback() return 0 if args.leave: query = args.leave web.get('https://slack.com/api/channels.leave?token=' + api_key + '&channel=' + query + '&pretty=1') elif args.join: query = args.join channelName = getChannelName(api_key, query) web.get('https://slack.com/api/channels.join?token=' + api_key + '&name=' + channelName + '&pretty=1') def wrapper(): return slackChannels(api_key) channelsList = wf.cached_data('channels', wrapper, max_age = 60) query = args.query if query: channelsList = wf.filter(query, channelsList, key = searchSlackChannels) for channels in channelsList: if channels['is_member'] == True: wf.add_item(title = channels['name'], subtitle = 'Member', arg = channels['id'], valid = True) elif channels['is_member'] == False: wf.add_item(title = channels['name'], subtitle = 'Not a member', arg = channels['id'], valid = True) wf.send_feedback()
def main(wf): url = 'https://api.digitalocean.com/v2/droplets' header = {'Authorization': 'Bearer cf2b9348d846867bbafb71da8877bf1fa2d77771be7bb35e9cc3a7b78f1cf1b7'} r = web.get(url, headers=header) data = r.json() droplet_array = data['droplets'] for droplet in droplet_array: if droplet['size']['memory'] == 512: wf.add_item(title ='%s is %s on %s' % ( droplet['name'], droplet['status'], droplet['region']['name']), subtitle = 'CPU(s): %s || Memory: %sMB || Size: %sGB' % ( droplet['size']['vcpus'], droplet['size']['memory'], droplet['size']['disk'])) else: memory_string = str(droplet['size']['memory']) wf.add_item(title ='%s is %s on %s' % ( droplet['name'], droplet['status'], droplet['region']['name']), subtitle = 'CPU(s): %s || Memory: %sGB || Size: %sGB' % ( droplet['size']['vcpus'], memory_string[0], droplet['size']['disk'])) wf.send_feedback()
def _get_html(url): #html_text = urllib.urlopen(volume_url).read() html_text = web.get(url) try: return html_text.text except AttributeError: return None
def main(wf): url = 'https://www.inoreader.com/reader/api/0/stream/contents' r = web.get( url, headers={ 'AppId': wf.settings['appid'], 'AppKey': wf.settings['appkey'], 'Authorization': 'GoogleLogin auth={}'.format(wf.settings['token']) } ) # throw an error if request failed # Workflow will catch this and show it to the user r.raise_for_status() # Parse the JSON returned by pinboard and extract the posts result = r.json() posts = result['items'] # Loop through the returned posts and add an item for each to # the list of results for Alfred for post in posts: wf.add_item( title=post['title'], subtitle=post['canonical'][0]['href'], valid=True, arg=post['canonical'][0]['href'], icon=workflow.ICON_WEB ) # Send the results to Alfred as XML wf.send_feedback() return 0
def test_iter_content_fails_if_content_read(httpserver): """iter_content fails if `content` has been read""" httpserver.serve_content(utf8html_bytes) r = web.get(httpserver.url, stream=True) r.content with pytest.raises(RuntimeError): r.iter_content(decode_unicode=True)
def get_rates(src, dst): """ Gets the current exchange rates from src to dst. :type src: str :type dst: str :rtype : float """ if not dst: dst = '' request = '{}{}'.format(api_url, rates_query.replace(':?', '"{}{}"'.format(src, dst))) response = web.get(urllib.quote(request, ':/?&=*')) response.raise_for_status() rates = response.json() rate_resp = rates['query']['results']['rate'] if rate_resp['Rate'] == 'N/A': return -1 return float(rate_resp['Rate'])
def test_no_follow_redirect(self): """Redirects are not followed""" url = self.httpbin.url + '/redirect-to?url=' + self.httpbin.url r = web.get(url, allow_redirects=False) self.assertNotEquals(r.url, self.httpbin.url) self.assertRaises(urllib2.HTTPError, r.raise_for_status) self.assertEqual(r.status_code, 302)
def test_no_encoding(self): """No encoding""" # Is an image url = self.httpbin.url + '/bytes/100' r = web.get(url) self.assertEqual(r.encoding, None) self.assert_(isinstance(r.text, str))
def getDroplrInfo(input): output = [] r = web.get(input) # Check for errors! r.raise_for_status() # Parse the result! result = r.text if u'Droplr.dropProps' in result: # We have some JS from the page! Let's read it... # We'll start with the title! tS = result.find('title: ', result.find('Droplr.dropProps')) + 8 tE = result.find('\'', tS) if tS > 0 and tE > 0: title = result[tS:tE] output.append(title) log.debug(title) # Now we'll get the file type fS = result.find('type: ', result.find('Droplr.dropProps')) + 7 fE = result.find('\'', fS) if fS > 0 and fE > 0: fileType = result[fS:fE].lower() output.append(fileType) log.debug(fileType) return output
def main(wf): args = len(sys.argv) emulator = 'mame' query = '' if args >= 2: query = ' '.join(sys.argv[1:]) url = raw_url % (query, emulator) r = web.get(url) # throw an error if request failed # Workflow will catch this and show it to the user r.raise_for_status() # Parse the JSON returned by pinboard and extract the posts # print r.content matcher = re.findall(pattern, r.content) if matcher: for i, sub in enumerate(matcher): rom = Rom(sub[1], sub[2], sub[3]) wf.add_item(title=rom.name, subtitle=rom.size + ' - MAME .158 ROMs', icon='zipfile.png', arg=rom.downloadurl, copytext=rom.downloadurl, valid=True) if i == 50: break else: wf.add_item(title='No roms found.', icon=ICON_INFO) wf.send_feedback()
def test_no_encoding(self): """No encoding""" # Is an image url = 'https://avatars.githubusercontent.com/u/747913' r = web.get(url) self.assertEqual(r.encoding, None) self.assert_(isinstance(r.text, str))
def test_gzipped_content(self): """Gzipped content decoded""" url = self.httpbin.url + '/gzip' r = web.get(url) self.assertEqual(r.status_code, 200) data = r.json() self.assertTrue(data.get('gzipped'))
def test_json_encoding(self): """JSON decoded correctly""" url = 'https://suggestqueries.google.com/complete/search?client=firefox&q=münchen' r = web.get(url) self.assertEqual(r.status_code, 200) data = r.json() self.assertEqual(data[0], 'münchen')
def get_recent_currencies_info(): """ Retrieve recent currencies from http://coinmarketcap.com/ Returns a list of currencies' information """ # Get the information of the top 100 most popular currencies from the # website url = "https://api.coinmarketcap.com/v1/ticker/" r = web.get(url) # Throw an error if request failed # Workflow will catch this and show it to the user r.raise_for_status() # Parse the JSON returned by coinmarketcap.com result = r.json() return result
def get_from_api(self, route, params): url = self.to_url(route) self.logger.info( "api_request %s (%s) - %s", url, route, params, ) resp = web.get(url, params=params, headers=dict(Authorization=self.basic_auth_header)) resp.raise_for_status() self.logger.debug(resp.text) return resp
def get_recipes(): url = ('https://raw.githubusercontent.com/gchq/CyberChef' + '/master/src/core/config/Categories.json') r = web.get(url) r.raise_for_status() categories = r.json() recipes = dict() for category in categories: for op in category['ops']: key = op.replace(' ', '_') recipes[key] = dict(category=category['name'], title=op) return recipes
def display_repo(user_input): req = web.get('%s/repos/%s' % (API_BASE_URL, user_input)) try: repo = req.json() except: parts = user_input.split('/') return display_repos(parts[0], parts[1]) status = get_status(repo['last_build_result']) title = 'Build #%s (%s)' % (repo['last_build_number'], status) subtitle = None if repo['last_build_duration']: timedelta = datetime.timedelta(seconds=repo['last_build_duration']) subtitle = 'Duration: %s' % humanize.naturaldelta(timedelta) wf.add_item(title, subtitle, arg=user_input, autocomplete=user_input, valid=True)
def get_recent_snippets(api_key): """Hent nyeste snippets fra internsidene Returner liste av snippetd """ url = 'https://intern.orakel.ntnu.no/snippets/api/' params = dict(count=100, format='json') headers = dict(Authorization=api_key) r = web.get(url, params, headers) # throw an error if request failed # Workflow will catch this and show it to the user r.raise_for_status() # Parse the JSON returned by internsidene and extract the snippets snippets = r.json() return snippets
def search_user_logs(username): """ search current user logs/content """ url = "https://www.analogue.app/api/logs" params = dict(username=username, tag="", limit=10, offset=0, collection=True) r = web.get(url, params) # Workflow will catch this and show it to the user r.raise_for_status() result = r.json()["data"] return result
def get_project_page(api_key, url, page, list): log.info("Calling API page {page}".format(page=page)) params = dict(per_page=100, page=page, membership='true') tokenHeader = {"PRIVATE-TOKEN": api_key} r = web.get(url, params, headers=tokenHeader) # throw an error if request failed # Workflow will catch this and show it to the user r.raise_for_status() # Parse the JSON returned by GitLab and extract the projects result = list + r.json() nextpage = r.headers.get('X-Next-Page') if nextpage: result = get_project_page(api_key, url, nextpage, result) return result
def search_any(): url = 'http://search.maven.org/solrsearch/select' q = fix_query(wf.args[0].strip()) if q is None or len(q) <= 0: return [] # 不能使用 web.get 的参数形式 Maven 官方 API 仅仅只是对双引号做了 url 编码 params = '?q=%s&rows=20&wt=json' % q.replace('"', '%22') if q.find('"+AND+a:"') > -1: params = params + '&core=gav' r = web.get(url + params) r.raise_for_status() result = r.json() items = result['response']['docs'] items.sort(key=lambda it: long(it['timestamp']), reverse=True) return items
def get_avatars(): projects = get_projects() directory = 'avatars' if not os.path.exists(directory): os.makedirs(directory) for project in projects: key = project['key'] path = '{}/{}.png'.format(directory, key) avatarExists = os.path.isfile(path) log.debug('{}: {}'.format(key, avatarExists)) if not avatarExists: auth = base64.b64encode('{}:{}'.format(USERNAME, PASSWORD)) url = '{}/rest/api/1.0/projects/{}/avatar.png'.format( BASE_URL, key) headers = {'Authorization': 'Basic {}'.format(auth)} response = web.get(url, headers=headers) response.raise_for_status() response.save_to_path('avatars/{}.png'.format(key))
def _arg_option_flags(): """Get short and long form of all `pandoc` argument options. """ # Soupify the HTML of pandoc README req = web.get('http://johnmacfarlane.net/pandoc/README.html') req.raise_for_status() soup = BeautifulSoup(req.text) cli_arg_options = [] # Get all the sub-sections under "Options" option_types = soup.find_all('dl') for option_set in option_types: # Get all the options under that sub-section options = option_set.find_all('dt') for opt in options: if '=' in opt.text: cli_arg_options.append(opt.text) return cli_arg_options
def get_recent_posts(api_key): """ Retrieve recent posts from Pinboard.in Returns a list of post dictionaries. """ url = 'https://api.pinboard.in/v1/posts/recent' params = dict(auth_token=api_key, count=100, format='json') r = web.get(url, params) # throw an error if request failed # Workflow will catch this and show it to the user r.raise_for_status() # Parse the JSON returned by pinboard and extract the posts result = r.json() posts = result['posts'] return posts
def display_repos(user_input, repo_prefix=None): req = web.get('%s/repos/%s' % (API_BASE_URL, user_input)) if req.status_code != 200: return wf.add_item('User invalid.', arg='') repos = req.json() if len(repos) > 0: for repo in repos: if repo_prefix: repo_name = repo['slug'].split('/')[1] if not repo_name.startswith(repo_prefix): continue wf.add_item(repo['slug'], repo['description'], arg=repo['slug'], autocomplete=repo['slug'], valid=True) else: wf.add_item('No repositories found for this user.', arg='')
def curl(key): url = 'http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQFuturesData?page={page}&num={num}&sort={sort}&asc={asc}&node={node}&base={base}'.format( page=1, num=5, sort='position', asc=0, node=the_contract(key), base='futures') keys = [ 'ask', 'askvol', 'bid', 'bidvol', 'changepercent', 'close', 'contract', 'currentvol', 'high', 'low', 'market', 'name', 'open', 'position', 'prevsettlement', 'settlement', 'symbol', 'ticktime', 'trade', 'tradedate', 'volume' ] resp = web.get(url).content.decode('gbk') return demjson.decode(resp)
def fetch_results(query): """Fetches query search results from Jisho.org API. Args: query: A string representing the search query for Jisho.org. Returns: An array of JSON results from Jisho.org based on search query. """ params = dict(keyword=query) request = web.get(API_URL, params) # Throw an error if request failed. request.raise_for_status() # Parse response as JSON and extract results. response = request.json() return response['data']
def get_repos(): auth = base64.b64encode('{}:{}'.format(USERNAME, PASSWORD)) limit = 1000 isLastPage = False start = 0 repos = [] while not isLastPage: url = '{}/rest/api/1.0/repos'.format(BASE_URL) params = {'limit': 1000, 'start': start} headers = {'Authorization': 'Basic {}'.format(auth)} response = web.get(url, params=params, headers=headers) response.raise_for_status() result = response.json() isLastPage = result['isLastPage'] if not isLastPage: start = result['nextPageStart'] repos.extend(result['values']) return repos
def _suggest(self): response = web.get(self.suggest_url, { 'topsuggest': 'true', 'q': self.options['query'] }) response.raise_for_status() raw_results = response.json() results = collections.OrderedDict() for d in raw_results: if (d['year'] != "" and d['year'] != "0"): pretty_query = d['rus'].replace( " ", "\u00a0") + " (" + d['year'].replace( "–", "\u2013") + ")" else: pretty_query = d['rus'].replace(" ", "\u00a0") results[pretty_query] = d['link'] return results
def main(wf): if len(wf.args[1]) < 2: wf.add_item("At least two letters", subtitle="test", arg="At least two letters", valid=True) wf.send_feedback() return cmd = ''.join(wf.args[0]) user_input = ''.join(wf.args) args = wf.args[1] args = ''.join(args).replace(" ", "") if wf.update_available: wf.add_item("An update is available!", autocomplete='workflow:update', valid=False) results = wf.cached_data('uib_' + cmd + '_' + args, max_age=60 * 60 * 24 * 7 * 52) # a year if results is None: url = '' if 'stav' in cmd: url = 'https://ordbok.uib.no/perl/lage_ordliste_liten_nr2000.cgi?spr=begge&query=' + args elif 'nn' in cmd: url = 'https://ordbok.uib.no/perl/lage_ordliste_liten_nr2000.cgi?spr=nynorsk&query=' + args elif 'bm' in cmd: url = 'https://ordbok.uib.no/perl/lage_ordliste_liten_nr2000.cgi?spr=bokmaal&query=' + args # begge r = web.get(url) if r.status_code != 200: wf.add_item("No matches", arg="no matches", valid=True) wf.send_feedback() return results = toJson(r.text) data = results['suggestions'] wf.cache_data('uib_' + cmd + '_' + args, data) results = data data = results if data: for unit in data: title = wf.decode(unit) #print(title) wf.add_item(title, arg=title, valid=True) wf.send_feedback()
def main(wf): if wf.update_available: wf.add_item("An update is available!", autocomplete='workflow:update', valid=False) # The Workflow instance will be passed to the function # you call from `Workflow.run`. Not so useful, as # the `wf` object created in `if __name__ ...` below is global. # # Your imports go here if you want to catch import errors (not a bad idea) # or if the modules/packages are in a directory added via `Workflow(libraries=...)` # Get args from Workflow, already in normalized Unicode # Get query from Alfred if len(wf.args): query = wf.args[0] else: query = None params = dict(movieId=query, attributes='1', mediaType='streaming') url = 'http://www.canistream.it/services/query' r = web.get(url, params) r.raise_for_status() results = r.json() if len(results) > 0: for key, value in results.iteritems(): wf.add_item(title=value['friendlyName'], subtitle=str('View on ' + value['friendlyName']), uid=value['external_id'], arg=value['direct_url'], valid=True, icon='images/' + key + '.png') else: wf.add_item('No streaming options available.') # # Send output to Alfred. You can only call this once. # Well, you *can* call it multiple times, but Alfred won't be listening # any more... wf.send_feedback()
def get_chart_request(self, query=None): params = dict(q=query) r = web.get(self.CHART_SERVICE_SEARCH_URL, params) r.raise_for_status() result = r.json() charts = [ Chart( chart['id'], chart['attributes']['description'], chart['relationships']['latestChartVersion']['data'] ['version'], self.get_chart_url(chart['id']), self.ICON_PATH, ) for chart in result['data'] ] return charts
def _suggest(self): response = web.get( self.suggest_url, { 'v': '2', 'cp': '999', 'json': 'b', 'q': self.options['query'], 'hl': self.options['lang'], 'gl': self.options['lang'] }) response.raise_for_status() results = response.json().get('suggestion', []) if not results: return [] results = [d['query'] for d in results if 'query' in d] return results
def get_artwork(podcast_or_episode, cache_directory, size=60): ''' Takes a podcast or episode dictionary (with an artworkUrl100 key) and downloads the aassociated image ''' # What are the available artwork sizes? # artwork_sizes = [int(key.replace('artworkUrl', '')) # for key in podcast_or_episode.keys() # if key.startswith('artworkUrl')] artwork_url = podcast_or_episode.get('artworkUrl{}'.format(size), None) artwork = web.get(url=artwork_url) artwork.raise_for_status() # This works because both podcasts and episodes have a trackId value artwork_path = '{}/{trackId}.jpg'.format(cache_directory, **podcast_or_episode) artwork.save_to_path(artwork_path) return artwork_path
def main(wf): if(not apiKey): wf.add_item(u'Do you have your API key set?', u'Press Enter to open GitHub for a setup tutorial.', arg='https://github.com/gabrielrios/alfred2-hearthstone/', valid=True, icon='./icon.png') else: data = web.get('https://omgvamp-hearthstone-v1.p.mashape.com/cards?collectible=1', headers={'X-Mashape-Key': apiKey}) if(data.status_code != 200): wf.add_item(u'Bad Request. Is your API Key set?', u'Think something should be here that isn\'t? Hit Enter to open an issue on Github', arg='https://github.com/gabrielrios/alfred2-hearthstone/issues/new', valid=True, icon='./icon.png') else: data = data.json() fullCardSet = [] for cardSet in data: fullCardSet += data[cardSet] with open('./cards.json', 'w') as outfile: json.dump(fullCardSet, outfile) wf.add_item(u'Your card list has been updated successfully', u'You can how search using hs Card Name', valid=True, icon='./icon.png') wf.send_feedback()
def translate_by_dict(query, language='eng'): url = DICT_URL % (language, urllib.quote(str(query))) data = { 'Connection': 'keep-alive', 'Content-Encoding': 'gzip', 'Content-Language': 'zh-CN', 'Content-Type': 'text/html; charset=utf-8', 'User-Agent': user_agent, 'Vary': 'Accept-Encoding', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Connection': 'keep-alive', 'Host': 'dict.youdao.com' } res = web.get(url, params=data) return res
def search_analogue(query): """ Search all of analogue for a specific piece of content """ url = "https://pysrv.now.sh/analogue/search" # TODO: Get username via the env config or using auth params = dict(q=query) r = web.get(url, params) # throw an error if request failed # Workflow will catch this and show it to the user r.raise_for_status() # Parse the JSON returned by pinboard and extract the posts result = r.json()["hits"] return result
def get_chart_request(self, query=None): params = dict(name_fragment=query) r = web.get(self.API_SEARCH_URL, params) r.raise_for_status() result = r.json() charts = [ Chart( '{0}/{1}'.format(chart['namespace'], chart['name']), chart['description'], chart['latest_chart_version'], self.get_chart_url('{0}/{1}'.format(chart['namespace'], chart['name'])), self.ICON_PATH, ) for chart in result['chart'] ] return charts
def get_trace_saved_views(url, shard, auth): # log.debug("Calling trace searches API with auth={auth} shard={shard}".format(auth=auth, shard=shard)) headers = { 'Cookie': "dogweb={auth}; DD-PSHARD={shard}".format(auth=auth, shard=shard) } params = dict(type='trace') r = web.get('https://' + url + '/api/v1/logs/views', params=params, headers=headers) # throw an error if request failed # Workflow will catch this and show it to the user r.raise_for_status() # Parse the JSON returned by Kibana and extract the saved objects result = r.json()['logs_views'] return result
def load_cryptocurrency_rates(symbols): """Return dict of exchange rates from CryptoCompare.com. Args: symbols (sequence): Abbreviations of currencies to fetch exchange rates for, e.g. 'BTC' or 'DOGE'. Returns: dict: `{symbol: rate}` mapping of exchange rates. """ url = CRYPTO_COMPARE_BASE_URL.format(REFERENCE_CURRENCY, ','.join(symbols)) r = web.get(url) r.raise_for_status() data = r.json() log.debug('fetching %s ...', url) return data
def test_save_to_path(httpserver): """Save directly to file""" filepath = os.path.join(tempdir, 'fubar.txt') assert not os.path.exists(tempdir) assert not os.path.exists(filepath) httpserver.serve_content(fubar_bytes, headers={'Content-Type': 'text/plain'}) try: r = web.get(httpserver.url) assert r.status_code == 200 r.save_to_path(filepath) assert os.path.exists(filepath) data = open(filepath).read() assert data == fubar_bytes finally: if os.path.exists(tempdir): shutil.rmtree(tempdir)
def search(query, search_limit): """Search GIFs for 'query' Search results number is limited by the 'search_limit' parameter """ # Send search request to Giphy API url = GIPHY_API_HOST + GIPHY_SEARCH_ENDPOINT params = dict(q=query, limit=search_limit, api_key=GIPHY_API_KEY) response = web.get(url, params) # Throw error if request failed response.raise_for_status() # Parse response JSON result = response.json() search_results = result['data'] return search_results
def search_analogue_users(query): """ Search all of analogue for a specific piece of content """ # TODO: host this under a proper microservice url = "https://pysrv.now.sh/analogue/users" # TODO: Get username via the env config or using auth params = dict(q=query) r = web.get(url, params) # throw an error if request failed # Workflow will catch this and show it to the user r.raise_for_status() result = r.json()["hits"] return result