def get_tx(): # get tx info r = web.get(url_tx) # get current tip of main chain t = web.get(url_tip) # alert alfred if result returns error r.raise_for_status() t.raise_for_status() # parse json tip = t.json() tx = r.json() # extract data confirmation = tx['status'] size = str(tx['size']) weight = str(tx['weight']) # Virtual size (vsize), also called virtual bytes (vbytes), # are an alternative measurement, with one vbyte being equal # to four weight units. That means the maximum block size # measured in vsize is 1 million vbytes. v_size = tx['weight'] / 4 sat_per_byte = str(tx['fee']/v_size) fee_in_btc = tx['fee'] * .00000001 format_fee = "{:.8f}".format(float(fee_in_btc)) fee = str(format_fee) + ' BTC' + ' ('+ sat_per_byte + ' sat/B)' # tx confirmation check # Calculate diff between tip and block height of confirmed tx. # Add one to include the confirmed block in the calculation if confirmation['confirmed']: conf_count = str(tip - confirmation['block_height'] + 1) else: conf_count = '0' # alfred item info id_title = 'TXID: ' + query conf_title = 'Confirmations: ' + conf_count fee_title = 'Fee: ' + fee size_title = 'Size: ' + size + 'b' weight_title = 'Weight: ' + weight + 'wu' tx_info = [ {'title': id_title, 'subtitle': url_subtitle, 'arg': query}, {'title': conf_title, 'subtitle': subtitle, 'arg': conf_count}, {'title': fee_title, 'subtitle': subtitle, 'arg': fee}, {'title': size_title, 'subtitle': subtitle, 'arg': size}, {'title': weight_title, 'subtitle': subtitle, 'arg': weight}, ] return tx_info
def get_address(): # get address info r = web.get(url_address) # alert alfred if result error r.raise_for_status() # parse json and pull out chain_stats data = r.json()['chain_stats'] # calculate balance bal_in_sat = data['funded_txo_sum'] - data['spent_txo_sum'] # convert from sats to bitcoin bal_in_btc = bal_in_sat * .00000001 # convert to string balance = str(bal_in_btc) #titles address_title = 'Address: ' + query balance_title = 'Balance: ' + balance + 'BTC' address_info = [ {'title': address_title, 'subtitle': url_subtitle, 'arg': query}, {'title': balance_title, 'subtitle': subtitle, 'arg': balance}, ] return address_info
def checkConfig(config): log.info('~~ Checking config..') log.info('~~ baseUrl: ' + config['baseUrl']) log.info('~~ username: '******'username']) log.info('~~ password: '******'password']) r = web.get( config['baseUrl'] + '/rest/api/user/current', headers=dict(Authorization='Basic ' + b64encode(config['username'] + ':' + config['password']))) if not ((r.status_code == 200) and (r.json()['type'] == 'known')): log.info('~~ Status code: %d', r.status_code) log.info('~~ ... Failed.') wf.add_item( title='Authentication failed.', subtitle= 'CAPTCHA issues? Try to logout and login again in your browser.', valid=False) wf.send_feedback() return 0 else: log.info('~~ ... Ok.')
def main(wf): parser = argparse.ArgumentParser() parser.add_argument('--baseUrl', dest='baseUrl', nargs='?', default=None) parser.add_argument('--username', dest='username', nargs='?', default=None) parser.add_argument('--password', dest='password', nargs='?', default=None) parser.add_argument('query', nargs='?', default=None) args = parser.parse_args(wf.args) if args.baseUrl: wf.settings[PROP_BASEURL] = args.baseUrl return 0 if args.username: wf.settings[PROP_USERNAME] = args.username return 0 if args.password: wf.save_password(PROP_PASSWORD, args.password) return 0 try: # lookup config for system args = wf.args[0].split() config = findConfig(args) if config.get('isFallback') is None: query = ' '.join(args[1:]) else: query = ' '.join(args) except: query = wf.args[0] config = dict(baseUrl=getConfluenceBaseUrl(), prefix='', username=getConfluenceUsername(), password=getConfluencePassword()) checkConfig(config) # query Confluence r = web.get( config['baseUrl'] + '/rest/quicknav/1/search', params=dict(query=query), headers=dict(Authorization='Basic ' + b64encode(config['username'] + ':' + config['password']))) # throw an error if request failed # Workflow will catch this and show it to the user r.raise_for_status() # Parse the JSON returned by pinboard and extract the posts result = r.json() contentGroups = result['contentNameMatches'] # Loop through the returned posts and add an item for each to # the list of results for Alfred for contentGroup in contentGroups: for content in contentGroup: # filter results to only contain pages and blog posts (and search site link) if content['className'] in [ 'content-type-page', 'content-type-blogpost', 'search-for' ]: if (content.get('spaceName')): subtitle = content['spaceName'] else: subtitle = 'Use full Confluence Search' wf.add_item(title=htmlParser.unescape(content['name']), subtitle=config['prefix'] + subtitle, arg=getBaseUrlWithoutPath(config['baseUrl']) + content['href'], valid=True, icon='assets/' + content['className'] + '.png') # Send the results to Alfred as XML wf.send_feedback()
def main(wf): parser = argparse.ArgumentParser() parser.add_argument('--baseUrl', dest='baseUrl', nargs='?', default=None) parser.add_argument('--username', dest='username', nargs='?', default=None) parser.add_argument('--password', dest='password', nargs='?', default=None) parser.add_argument('query', nargs='?', default=None) args = parser.parse_args(wf.args) if args.baseUrl: wf.settings[PROP_BASEURL] = args.baseUrl return 0 if args.username: wf.settings[PROP_USERNAME] = args.username return 0 if args.password: wf.save_password(PROP_PASSWORD, args.password) return 0 try: # lookup config for system args = wf.args[0].split() config = findConfig(args) if config.get('isFallback') is None: query = ' '.join(args[1:]) else: query = ' '.join(args) except: query = wf.args[0] config = dict(baseUrl=getConfluenceBaseUrl(), prefix='', username=getConfluenceUsername(), password=getConfluencePassword()) # query Confluence url = config['baseUrl'] + "/rest/api/search" log.debug('Quick Search URL: ' + url) if config['type'] == 'title': r = web.get(url, params=dict(cql="space=" + config['space'] + " and type=page and title~\"*" + query + "*\" order by lastModified desc"), headers=dict(Accept='application/json', authorization="Basic " + (config['username'] + ":" + config['password']).encode("base64")[:-1])) else: r = web.get(url, params=dict(cql="space=" + config['space'] + " and type=page and siteSearch~\"" + query + "\" order by lastModified desc"), headers=dict(Accept='application/json', authorization="Basic " + (config['username'] + ":" + config['password']).encode("base64")[:-1])) # throw an error if request failed # Workflow will catch this and show it to the user r.raise_for_status() # Parse the JSON returned by pinboard and extract the posts result = r.json() contentGroups = result['results'] # Loop through the returned posts and add an item for each to # the list of results for Alfred for content in contentGroups: wf.add_item(title=htmlParser.unescape(content['title']), arg=getBaseUrlWithoutPath(config['baseUrl']) + "/wiki" + content['url'], subtitle=htmlParser.unescape(content['excerpt']), valid=True, icon='assets/content-type-page.png') # Send the results to Alfred as XML wf.send_feedback()