def paste_tapped(sender): # Get the root view: v = sender.superview if webbrowser.can_open(clipboard.get()): v['urlfield'].text = clipboard.get() else: console.hud_alert('Invalid URL')
def clone_action(self,sender): import clipboard remote='https://github.com/' local='' if clipboard.get().startswith('http'): remote=clipboard.get() local=os.path.split(urlparse.urlparse(remote).path)[-1] local= local.split('.git')[0] d=UIDialog(root=self.view,title='Clone repo',items={'remote url':remote,'local path':local},ok_action=self.clone)
def main(): if appex.is_running_extension(): url = appex.get_url() else: url = clipboard.get().strip() if not RE_URL.match(url): try: url = console.input_alert("Enter gamefaqs URL", "", "https://www.gamefaqs.com/") except KeyboardInterrupt: sys.exit(0) newurl = "{0}?print=1".format(url) #baseurl = http://www.gamefaqs.com/ps3/959558-fallout-new-vegas/faqs/61226 if RE_URL.match(url): h = html2text.HTML2Text() r = requests.get( url=newurl, headers={"User-agent": "Mozilla/5.0{0:06}".format(random.randrange(999999))} ) html_content = r.text.decode('utf-8') rendered_content = html2text.html2text(html_content) filename = url.partition("gamefaqs.com/")[-1].partition("/")[-1].partition("/faqs")[0]+".txt" filepath = os.path.join(os.path.expanduser("~/Documents"), filename) with open(filepath, "w") as fo: fo.write(rendered_content) console.hud_alert('Success! Saved {0}'.format(filename), "success")
def preptext(): # Prepare content in clipboard # Get Clipboard clip = clipboard.get() print('\n(1) Preparing Text\n -Retrieved Clipboard: '+clip) # Collect / validate desc and URL w/ user url = console.input_alert("URL?","",clip,"OK") desc = console.input_alert("Commentary?","","","OK") # Prepare URL if (len(url) < 1): ## Set the URL / title to empty finalurl, shorturl, title = '' else: # Follow URL to final destination req = urllib2.Request(url,None,hdr) try: res = urllib2.urlopen(req) finalurl = res.geturl() except urllib2.HTTPError, e: print 'HTTPError = ' + str(e.code) except urllib2.URLError, e: print 'URLError = ' + str(e.reason)
def fetch_link_from_clipboard(): text = clipboard.get() if re.match(regex, text): print('Tweet link found in clipboard...') like_tweet(text) else: print('Clipboard does not contain a link to a tweet.')
def main(args): ap = argparse.ArgumentParser() ap.add_argument('url', nargs='?', help='the url to read (default to clipboard') ap.add_argument('-o', '--output-file', help='write output to file instead of stdout') ap.add_argument('-X', '--request-method', default='GET', choices=['GET', 'POST', 'HEAD'], help='specify request method to use (default to GET)') ap.add_argument('-H', '--header', help='Custom header to pass to server (H)') ap.add_argument('-d', '--data', help='HTTP POST data (H)') ns = ap.parse_args(args) url = ns.url or clipboard.get() headers = {} if ns.header: for h in ns.header.split(';'): name, value = h.split(':') headers[name.strip()] = value.strip() if ns.request_method == 'GET': r = requests.get(url, headers=headers) elif ns.request_method == 'POST': r = requests.post(url, data=ns.data, headers=headers) elif ns.request_method == 'HEAD': r = requests.head(url, headers=headers) else: print('unknown request method: {}'.format(ns.request_method)) return if ns.output_file: with open(ns.output_file, 'w') as outs: outs.write(r.text) else: print(r.text)
def main(): if appex.is_running_extension(): if appex.get_url(): text = appex.get_url() else: text = appex.get_text() else: text = clipboard.get() if not text: text = console.input_alert('Jira ID') if text: ids = JIRA_PAT.findall(text) if len(ids) == 0: text = console.input_alert('Jira ID') ids = JIRA_PAT.findall(text) if len(ids) > 0: id = ids[0] base_url, username = get_conf_info() url = '%s/browse/%s' % (base_url, id) console.hud_alert('Jira ID: %s' % id) app=UIApplication.sharedApplication() url=nsurl(url) app.openURL_(url) else: console.hud_alert('No Jira ID found.') else: console.hud_alert('No input text found.') if appex.is_running_extension(): appex.finish()
def get_args(argv): args = { 'action': 'set_password', 'user': None, 'text': clipboard.get(), 'date': date.today().strftime('%Y%m%d'), 'overwrite': "0" } for a in argv: pairs = a.split(':') for p in pairs: (k, v) = p.split('=', 2) if k not in args: raise ValueError("Invalid argument '%s'" % (k)) args[k] = v if args['user'] is None: args['user'] = console.input_alert("Tradervue Username") if not re.match(r'^\d{8}$', args['date']): raise ValueError("Invalid date format '%s'. Must be YYYYMMDD" % (args['date'])) if int(args['overwrite']) == 0: args['overwrite'] = False else: args['overwrite'] = True args['date'] = datetime.strptime(args['date'], '%Y%m%d') return args
def main(): text = None if len(sys.argv) > 1: text = sys.argv[1] else: try: import appex import clipboard if appex.is_running_extension(): if appex.get_url(): text = appex.get_url() else: text = appex.get_text() else: text = clipboard.get() except ImportError: pass if text: keys = JIRA_PAT.findall(text) if len(keys) > 0: key = keys[0] print('Found Jira ID: %s' % key) else: key = raw_input('Jira ID:') base_url, username, jsessionid = get_conf_info() if check_jsessionid(base_url, jsessionid): get_issue_info(base_url, jsessionid, key) else: jsessionid = get_new_cookie(base_url, username) get_issue_info(base_url, jsessionid, key) else: print('No input text found.')
def webopen(url=None): base_url = "safari-{0}" cburl = clipboard.get().encode() if not cburl.startswith("http"): cburl = dialogs.input_alert("URL?") url = base_url.format(cburl) if not url else base_url.format(url) webbrowser.open(url)
def main (): xParser = argparse.ArgumentParser() xParser.add_argument("-f", "--file", help="parse file (UTF-8 required!) [on Windows, -f is similar to -ff]", type=str) xParser.add_argument("-ff", "--file_to_file", help="parse file (UTF-8 required!) and create a result file (*.res.txt)", type=str) xParser.add_argument("-j", "--json", help="generate list of errors in JSON", action="store_true") xParser.add_argument("-w", "--width", help="width in characters (40 < width < 200; default: 100)", type=int, choices=range(40,201,10), default=100) xParser.add_argument("-tf", "--textformatter", help="auto-format text according to typographical rules", action="store_true") xParser.add_argument("-tfo", "--textformatteronly", help="auto-format text and disable grammar checking (only with option 'file' or 'file_to_file')", action="store_true") xArgs = xParser.parse_args() gce.load() gce.setOptions({"html": True}) oDict = gce.getDictionary() oTokenizer = tkz.Tokenizer("fr") oLexGraphe = lxg.Lexicographe(oDict) if xArgs.textformatter or xArgs.textformatteronly: oTF = tf.TextFormatter() sText = clipboard.get() bDebug = False for sParagraph in txt.getParagraph(sText): if xArgs.textformatter: sText = oTF.formatText(sText) sRes = generateText(0, sText, oTokenizer, oDict, xArgs.json, nWidth=xArgs.width, bDebug=bDebug, bEmptyIfNoErrors=True) if sRes: clipboard.set(sRes) else: clipboard.set("No errors found.") print(sRes)
def update_database_from_clipboard(db): try: import clipboard except ImportError: return plist_string = clipboard.get() update_database(db, plist_string)
def main(): description = 'formd: A (for)matting (M)ark(d)own tool.' md = clipboard.get() if md == '': print 'Nothing on clipboard' else: text = ForMd(md) [clipboard.set(t) for t in text.ref_md()]
def main(): text = clipboard.get() assert text, 'No text on the clipboard!' filename = sys.argv[1] console.clear() print('Wait a Moment Please!') filename = save(filename, text) console.set_font('Futura', 16) print('Done!\nFile Saved as:\n' + filename)
def addMagnet(): magnetUrl = clipboard.get() # Not a complete syntax check lof the magnet URL - will need to improve for the future # Especially considering that this will be run in a shell on the remote-host if not (re.match("^magnet\:\?.*",magnetUrl)): console.alert("Not a magnet URL",magnetUrl,"OK",hide_cancel_button=True) exit() commands = [ "transmission-remote -n '"+transmissionRemoteUsername+":"+transmissionRemotePassword+"' --torrent-done-script /home/torrentbot/torrentbot/bin/remove-the-torrent.sh -a '"+magnetUrl+"'" ] executeSshCommand(commands)
def github_download(*args): branch = None if len(args) == 0: return github_download(clipboard.get()) elif len(args) ==1 and is_github_url(args[0]): user, repo, branch = _decode_github_url(args[0]) elif (len(args) == 1 and re.match(ur'^[0-9a-zA-Z]*/[0-9a-zA-Z]*$', str(args[0]))): user, repo = args[0].split("/", 1) branch = "master"
def make_text_view(self): text_view = ui.TextView(frame=self.bounds, name='text_view') text_view.y += 100 text_view.height -= 100 text_view.delegate = self text_view.text = clipboard.get() text_view.autocapitalization_type = ui.AUTOCAPITALIZE_NONE text_view.autocorrection_type = False text_view.spellchecking_type = False return text_view
def makefile(slug, url, title): f = codecs.open(slug + '.md', 'w', 'utf-8') f.write(title + '\n') f.write('======\n') f.write('Link: ' + url + '\n') f.write('publish-not-yet\n\n') isLink = console.alert('', '', 'Blockquote', 'No Blockquote') if isLink == 1: f.write('> ' + clipboard.get()) f.close()
def main(): # get text from app share or clipboard if appex.is_running_extension(): text = appex.get_url() else: text = clipboard.get().strip() # get url url = '' try: url = [ mgroups[0] for mgroups in GRUBER_URLINTEXT_PAT.findall(text) ][0] except: url = console.input_alert("URL", "", url) if url: if not 'http' in url: url = 'http://' + url else: console.hud_alert('No URL found.') sys.exit() sel = console.alert('Save: %s ?' % url, button1='File', button2='Clipboard') # get url info url_items = url.split("?")[0].split("/") # if url ends with /, last item is an empty string file_name = url_items[-1] if url_items[-1] else url_items[-2] try: content = urllib2.urlopen(url).read() except Exception as e: console.alert(e.message) sys.exit() if sel == 1: # get file save info save_dir_name = get_save_dir() save_dir = os.path.join(BASE_DIR, save_dir_name) file_path = os.path.join(save_dir, file_name) try: # check dirs and save if not os.path.exists(save_dir): os.makedirs(save_dir) with open(file_path, 'w') as f: f.write(content) f.close() # wrapup console.alert('Saved to: %s' % file_path, hide_cancel_button=True, button1='OK') except Exception as e: console.alert(str(e), button1='OK',hide_cancel_button=True) elif sel == 2: clipboard.set(content) if appex.is_running_extension(): appex.finish()
def main(): choice = console.alert('Markdown Conversion', '', 'Demo', 'Convert Clipboard') md = DEMO if choice == 1 else clipboard.get() html = markdown(md, extras=['smarty-pants']) tempdir = tempfile.gettempdir() html_path = os.path.join(tempdir, 'temp.html') html = TEMPLATE.replace('{{CONTENT}}', html) with codecs.open(html_path, 'w', 'utf-8') as f: f.write(html) file_url = 'file://' + html_path webbrowser.open(file_url)
def pasteboardChanged_(_self, _cmd, _n): import clipboard from datetime import datetime import editor if editor.get_path().endswith('/Scrapbook.txt'): # Don't add to ScrapBook when copying from it... return timestamp = datetime.strftime(datetime.now(), '%Y-%m-%d - %H:%M') text = clipboard.get() with open('Scrapbook.txt', 'a') as f: f.write('\n\n=== %s\n%s' % (timestamp, text))
def main(args): ap = argparse.ArgumentParser() ap.add_argument('file', nargs='?', help='the file to be pasted') ns = ap.parse_args(args) status = 0 if ns.file: if os.path.exists(ns.file): print("pbpaste: {}: file exists".format(ns.file), file=sys.stderr) status = 1 else: try: with open(ns.file, 'w') as f: f.write(clipboard.get()) except Exception as err: print("pbpaste: {}: {!s}".format(type(err).__name__, err), file=sys.stderr) else: print(clipboard.get()) sys.exit(status)
def main(): url = appex.get_url() if use_appex else clipboard.get() if 'omz-forums' not in url or len(url.splitlines()) > 1: #print 'No forum URL' return ['-1'] # i know this is crappy. import requests import bs4 html = requests.get(url).text soup = bs4.BeautifulSoup(html) pre_tags = soup.find_all('pre') return [line.get_text() for line in soup.find_all('pre')]
def main(): post_file = clipboard.get() markdownified = markdown.markdown(post_file) links_fixed = fix_links(markdownified) code_fixed = fix_code(links_fixed) final_text = code_fixed clipboard.set( unicode(final_text) ) webbrowser.open('wordpress://')
def main(): resp = console.alert('Alert!', 'Choose File Extension', '.py', '.pyui', 'other', hide_cancel_button=False) if resp==1: ext = '.py' elif resp==2: ext = '.pyui' elif resp==3: ext = console.input_alert('Specify file extension') text = clipboard.get() assert text, 'No text on the clipboard!' filename = sys.argv[1] console.clear() filename = save(filename, text, ext) editor.open_file(filename)
def main(): resp = console.alert('Alert!', 'Choose File Extension', '.py', '.pyui', hide_cancel_button=False) if resp==1: ext = '.py' elif resp==2: ext = '.pyui' text = clipboard.get() assert text, 'No text on the clipboard!' filename = sys.argv[1] console.clear() print('Wait a Moment Please!') filename = save(filename, text, ext) console.set_font('Futura', 16) print('Done!\nFile Saved as:\n' + filename)
def main(): if appex.is_widget(): console.clear() v = appex.get_widget_view() # Check if the clipboard view already exists, if not, create it, # and set it as the widget's view. if not v or v.name != 'Clipboard': v = make_widget_view() appex.set_widget_view(v) else: v = make_widget_view() v.background_color = '#333' v.name = 'Widget Preview' v.present('sheet') v['text_label'].text = 'Clipboard: ' + (clipboard.get() or '(Empty)')
def btn_Download(self, sender): url = clipboard.get() pos = url.find('://') # ftp://, http://, https:// >> 3-5 self.view_po = ui.load_view('popover') self.view_po.name = 'Download' self.view_po.present('popover',popover_location=(self.view.width/2,self.view.height/2)) self.view_po['label1'].hidden = True self.view_po['label2'].text = 'Url:' self.view_po['label3'].hidden = True if pos < 3 or pos > 5: self.view_po['textfield1'].text = 'http://www.' else: self.view_po['textfield1'].text = url self.view_po['btn_Okay'].action = self.btn_Download_Okay self.view_po['btn_Cancel'].action = self.btn_Cancel
def main(command, *args): if command == 'build': build_site() elif command == 'clean': clean_site() elif command == 'add': post = clipboard.get() if post: # It's already unicode, so no need to decode success = create_post(post) webbrowser.open('editorial://workflow-callback/?success={}'.format(success)) else: print 'Nothing on clipboard' webbrowser.open('editorial://workflow-callback/?success=False') elif command == 'help': print """Blok is a small static site generator.
def github_download(*args): branch = None if len(args) == 0: return github_download(clipboard.get()) elif len(args) ==1 and is_github_url(args[0]): user, repo, branch = _decode_github_url(args[0]) elif (len(args) == 1 and re.match(r'^[0-9a-zA-Z]*/[0-9a-zA-Z]*$', str(args[0]))): user, repo = args[0].split("/", 1) branch = "master" elif len(args)==2: user, repo = args branch = "master" elif len(args) == 3: user, repo, branch = args else: return branch = "master" if not branch else branch _change_dir("Documents") print(('Downloading {0}...'.format(repo))) base_url = 'https://github.com/{0}/{1}/archive/{2}.zip' url = base_url.format(user, repo, branch) zipname = '{0}.zip'.format(repo) urlretrieve(url, zipname) with zipfile.ZipFile(open(zipname, "rb")) as zip_file: print('Extracting...') print('\n'.join(name for name in zip_file.namelist())) zip_file.extractall() dst = os.path.join(DOCUMENTS, zipname[:-len(".zip")]) src = "{dir}-{branch}".format(dir=dst, branch=branch) os.remove(zipname) try: os.rename(src, dst) except OSError: os.rename(dst, "{}.BAK".format(dst)) os.rename(src, dst) print('Done.') # If branch is a version tag the directory # is slightly different #if re.match('^v[0-9.]*$', branch): # dirname = repo + '-' + branch[1:] #else: # dirname = repo + '-' + branch return os.path.basename(dst)
# I got help from here: http://twolivesleft.com/Codea/Talk/discussion/1652/what-others-do%3A-pythonista/p1 # I got help from here: http://www.macdrifter.com/2012/09/pythonista-trick-url-to-markdown.html import clipboard import urllib2 import editor import os url = clipboard.get() scriptName = os.path.basename(url) contents = urllib2.urlopen(url).read() editor.make_new_file(scriptName[:-3], contents)
import json import yaml import urllib import appex import clipboard uri = clipboard.get() if appex.get_url(): uri = appex.get_url() webf = urllib.urlopen(uri) txt = webf.read() y = yaml.dump(yaml.load(json.dumps(json.loads(txt))), default_flow_style=False) clipboard.set(y)
def download_from_args(args): if len(args) == 2: url = args[1] else: url = clipboard.get() download(url)
def ref_md(self): """generate referenced markdown""" self._format() ref_nums = iter([_[0].rstrip(" :") for _ in self.data]) formd_text = self.match_links.sub(lambda _: next(ref_nums), md) formd_refs = self.match_refs.sub('', formd_text).strip() references = (i[1] for i in self.data) formd_md = '\n'.join( (formd_refs, '\n', '\n'.join(i for i in references))) yield formd_md def flip(self): """convert markdown to the opposite style of the first text link""" first_match = re.search(self.match_links, self.text).group(0) if '(' and ')' in first_match: formd_md = self.ref_md() else: formd_md = self.inline_md() return formd_md if __name__ == '__main__': import clipboard import console md = clipboard.get() if md: console.clear() text = ForMd(md) [clipboard.set(t) for t in text.flip()] final = clipboard.get() print(final)
# Utility functions to build the UUID def hexstring(length): result = '' for i in xrange(length): result += hex(random.randrange(0, 15))[-1] return result def uuid(): return hexstring(8) + '-' + hexstring(4) + '-' + hexstring( 4) + '-' + hexstring(4) + '-' + hexstring(32) # Gets title of the page page = urllib2.urlopen(clipboard.get()) content = page.read() title = re.search('<title>.*</title>', content).group(0)[7:-8] # Builds the note output = '* TODO Check [[' + clipboard.get() + '][' + title + ']]\n' today = datetime.datetime.now().strftime('%Y-%m-%d %a %H:%M') output += '[' + today + ']\n' output += '** Note ID: ' + uuid() output = urllib.quote(output) # Sends the note back to Drafts and invokes the Dropbox action webbrowser.open('drafts://x-callback-url/create?text=' + output + '&action=AppendToOrg')
headers = { 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1', } api_url = 'http://gxqu.top/douyin/ajax.php?act=dy&url=' + url api_response = requests.get(api_url, headers=headers, verify=False).json() #print(api_response) urls = api_response['url'] #print(urls) tite = api_response['name'] #print(tite) urllib.urlretrieve(urls, tite + ".mp4") print('*' * 84) print('\t\t\t\t抖音无水印下载') print('\t\t\t\t作者:honzou') print('*' * 84) print('视频(' + tite + '.mp4)已下载完成') if __name__ == '__main__': urls = clipboard.get() url1 = re.compile('(http.*)') url2 = url1.search(urls) url = url2.group(1) getRealUrl(url) #测试urls=真实故事真励志! #宅家dou剧场 https://v.douyin.com/3rUc81/ 复制此链接,打开【抖音短视频】,直接观看视频!
import clipboard import console import webbrowser import string from glob import glob from urllib.parse import urlparse import sys twitter_name = 'jayhickey' try: mytext = sys.argv[1] print(clipboard.get()) except IndexError: mytext = clipboard.get() u = urlparse(mytext) mytext = mytext.replace('https://twitter.com/', 'tweetbot://') mytext = mytext.replace('statuses', 'status') mytext = mytext.replace('http://twitter.com/', 'tweetbot://') mytext = mytext.replace('http://mobile.twitter.com/', 'tweetbot://') mytext = mytext.replace('https://mobile.twitter.com/', 'tweetbot://') if mytext.count('/') < 3 or mytext.find('tweets') != -1: mytext = 'tweetbot://' + twitter_name + '/user_profile' + u.path # console.clear() print(mytext) webbrowser.open(mytext)
def main(): image_url = clipboard.get() if is_url(image_url): search_url = create_url(image_url) webbrowser.open('safari-' + search_url) print 'Done.'
from __future__ import print_function # Uses direct link to image in clipboard to generate HTML code suitable for MacStories # Should work just about anywhere else though. # Please note: script will ask for image Title and Alt attributes using an input_alert from console. import clipboard import console image = clipboard.get() alts = console.input_alert("Image Alt", "Type alt below") title = console.input_alert("Image Title", "Type title below") final = "<img src=" + '"' + image + '"' + " " + "alt=" + '"' + alts + '"' + " " + "title=" + '"' + title + '"' + " " + "class=\"aligncenter\" />" print(final) clipboard.set(final)
import markdown import clipboard input_file = clipboard.get() s = input_file md = markdown.Markdown() html = md.convert(s) print html clipboard.set(html)
# coding: utf-8 # http://plobo.net/send-workflow-to-another-device-with-pythonista-command-c/ import re import urllib.request, urllib.error, urllib.parse import clipboard import webbrowser source = clipboard.get() mystring = urllib.request.urlopen(source).read() clipboard.set(re.search("workflow://.*\\b", mystring, re.M).group(0)) webbrowser.open('workflow://')
#Summary: Takes copied text and creates new task in OmniFocus #By: Jason Verly #Rev: 2013-02-04 #Rev Note: Added Page Title & URL to clipped txt import webbrowser import clipboard import urllib import console import sys title = sys.argv[1] url = sys.argv[2] task = console.input_alert('Task', 'Enter task description') task = urllib.quote(task) note = clipboard.get() full_note = ''.join([title, '\n\n', url, '\n\n', note]) full_note = urllib.quote(full_note.encode('utf-8')) webbrowser.open('omnifocus:///add?name=' + task + '¬e=' + full_note)
def inputbutton_tapped(sender): text = clipboard.get() sender.superview['inputbox'].text = text
import urllib.request, urllib.parse, urllib.error import clipboard import bs4 import webbrowser import console link = clipboard.get() soup = bs4.BeautifulSoup(urllib.request.urlopen(link)) clipboard.set(soup.title.string + ' ' + link) text = clipboard.get() console.clear() print(text)
#Convert clipboard to uppercase/lowercase import clipboard text = clipboard.get() if text == '': print 'No text in clipboard' else: uppercase = text.upper() if uppercase != text: new_clip = uppercase else: #already uppercase, convert to lowercase new_clip = text.lower() clipboard.set(new_clip) print new_clip
"""generate referenced markdown""" self._format() ref_nums = iter([_[0].rstrip(" :") for _ in self.data]) formd_text = self.match_links.sub(lambda _: next(ref_nums), md) formd_refs = self.match_refs.sub('', formd_text).strip() references = (i[1] for i in self.data) formd_md = '\n'.join( (formd_refs, '\n', '\n'.join(i for i in references))) yield formd_md def flip(self): """convert markdown to the opposite style of the first text link""" first_match = re.search(self.match_links, self.text).group(0) if '(' and ')' in first_match: formd_md = self.ref_md() else: formd_md = self.inline_md() return formd_md if __name__ == '__main__': description = 'formd: A (for)matting (M)ark(d)own tool.' md = clipboard.get() if md == '': print 'No text in clipboard' else: text = ForMd(md) new_clip = text.ref_md() # clipboard.set(new_clip) [clipboard.set(t) for t in new_clip]
user_input = '' # input is a built-in function so use a different name if appex.is_running_extension(): LKEvernoteApi.log_progress('load url provided to app extension') user_input = appex.get_url() else: LKEvernoteApi.log_progress( 'not running from extension, checking arguments') if len(sys.argv) > 1: evernote.log_progress('argument found, use that') user_input = sys.argv[1] else: LKEvernoteApi.log_progress( 'no arguments found, will use clipboard text') user_input = clipboard.get() if not user_input: sys.exit('Clipboard is empty, no arguments passed to script') LKEvernoteApi.log_progress('Loading title of passed url') url_title = title_of_url(user_input) if url_title: url_title = ' ({}) '.format(url_title.replace('&', 'and')) LKEvernoteApi.log_progress('create ENML string') fmt = '<en-todo checked="false"></en-todo> {}{}(@ {:%d %b %Y %H:%M})' en_todo_text = fmt.format(input, url_title, datetime.datetime.now()) print(en_todo_text) LKEvernoteApi.log_progress('call ´appendNote´ function') LKEvernoteApi.append_to_note(guid=guid,
# coding: utf-8 # https://gist.github.com/omz/4177224 # Original script by Federico Viticci: # http://www.macstories.net/reviews/fantastical-for-iphone-review/ # Modified to work better with international characters import webbrowser import clipboard import urllib import console when = clipboard.get() fant = 'fantastical://parse?sentence=' newtask = console.input_alert('What is this?', 'Type your event below') loc = console.alert('Location', 'Does the event have a location?', 'Yes', 'No') if loc == 1: place = console.input_alert('Where', 'Type your location below') event = newtask.decode('utf-8') + ' ' + when + ' at ' + place.decode( 'utf-8') encoded = urllib.quote(event.encode('utf-8'), safe='') elif loc == 2: event = newtask.decode('utf-8') + ' ' + when
def get_content_type(filename): return mimetypes.guess_type(filename)[0] or 'application/octet-stream' ## end of http://code.activestate.com/recipes/146306/ }}} ## Now let's DO this. First put some markdown text on your clipboard. ## Put clipboard contents in a file to send to Docverter. #input_text = clipboard.get() #f = open("docverterin.txt", "w").write(input_text) ## support markdown documents with ASCII characters input_text = clipboard.get() input_text = input_text.encode('ascii', 'xmlcharrefreplace') f = open("docverterin.txt", "w").write(input_text) ## Use CSS to style your output. I've included some sensible defaults. css = """ body {margin: 4em; } p {line-height: 1.2em; text-align: justify;} h1, h2, h3, h4 {font-weight: normal;} sup {line-height: 0;} hr {border: 1px #eee solid; margin-top: 2em; margin-botom: 2em; width: 70%;} pre {white-space: pre-wrap; word-wrap: break-word;} """ # Use CSS3 Paged Media Module to number pages of PDF, set margins. page_info = "@page {margin: 1in; @bottom-center{content: counter(page)}}" c = open("docverter.css", "w").write(css + page_info)
if v['swexit'].value: v.close() #Download Button Enabled sender.enabled = True webbrowser.open('workflow://run-workflow?name=DownTube&input=' + audio + titulo) def slider_action(sender): # Get the root view: v = sender.superview # Update Quality if sender.name == 'quality': if v['quality'].value <= 0.33: v['qualityopt'].text = 'Low' elif v['quality'].value > 0.33 and v['quality'].value <= 0.66: v['qualityopt'].text = 'Medium' elif v['quality'].value > 0.66: v['qualityopt'].text = 'Best' v = ui.load_view('YoutubeNow2') urlfield = v['urlfield'] if webbrowser.can_open(clipboard.get()): urlfield.text = clipboard.get() if ui.get_screen_size()[1] >= 768: # iPad v.present('popover') else: # iPhone v.present(orientations=['portrait'])
def accessed(): return ' on ' + the_date def blockquote(): return bq + the_clip # clip is from the system clipboard # if you highlighted text but did not also copy it # what will show up is the last item that was # copied or cut in any application in which you # might have been working before the_clip = clipboard.get() # sys.argv[0] = this appname # sys.argv[1] = the webpage title # sys.argv[2] = the webpage url the_title = sys.argv[1] the_url = sys.argv[2] # layout and format in markdown first_line = 'Clipped from ' + bracket(the_title) + accessed() link = bracket(the_title) + colonspace + the_url payload = first_line + spacer + blockquote( ) + spacer + link + spacer + ruler + spacer
if d['status'] == 'finished': console.hide_output() chosen = console.alert( 'Download Finished', "Video is already in Pythonista.\nWaht else do you want to do with it?", 'Quick Look', 'Open in', 'Save to Album') if chosen == 1: console.quicklook(d['filename']) elif chosen == 2: console.open_in(d['filename']) elif chosen == 3: save_video(d['filename'].encode('utf-8')) if appex.is_running_extension() and re.search( 'https*:\/\/[^\s]+', appex.get_attachments()[0]) is not None: url = appex.get_attachments()[0] else: clip = re.search('https*:\/\/[^\s]+', clipboard.get()) if clip is None: url = console.input_alert('URL Input') else: url = clipboard.get() console.clear() config = input('input config: ') sys.argv = ['you-get', '%s' % config, '%s' % url] you_get.main()
import clipboard filename = "pyui_from_clipboard.pyui" # edit this line BEFORE running assert clipboard.get(), "There is no text on the clipboard!" with open(filename, "w") as out_file: out_file.write(clipboard.get()) print("The contents of the clipboard are now on file {}.".format(filename))
# http://www.leancrew.com/all-this/2014/08/automatic-shortened-urls-via-google/ import requests import json import clipboard # Build the request. shortener = "https://www.googleapis.com/urlshortener/v1/url" longURL = clipboard.get() headers = {'content-type': 'application/json'} payload = {'longUrl': longURL} # Get the shortened URL and put it on the clipboard. r = requests.post(shortener, headers=headers, data=json.dumps(payload)) clipboard.set(r.json()['id'])
# startTimer.py - writes epoch time to text file for work log # by: Jason Verly # rev date: 2014-07-25 import time import console import os import os.path import clipboard import webbrowser import urllib if os.path.isfile('timer.txt'): console.clear() console.hud_alert('File exists', 'error') webbrowser.open('drafts://') else: console.clear() curDate = time.time() f = open('timer.txt', 'w') f.write(str(curDate)) f.close() console.hud_alert('Timer started', 'success') worklogtext = clipboard.get() encodetxt = urllib.quote(worklogtext, safe='') draft_url = 'drafts4://x-callback-url/create?text=' action = '&action%3DWorkLog_Entry&afterSuccess%3DDelete' webbrowser.open(draft_url + encodetxt + action)
print("[2] lowercase") print("[3] UPPERCASE") print("[4] Capital case") print("[5] Strip Leading") print("[6] Strip Trailing") print("[7] Strip All") print("[8] URL Quote") print("[x] Exit") formatType = input("Select Conversion: ") if formatType == "x": print("Exited") else: #userInput = getClipboardData() userInput = clipboard.get() #userInput = raw_input("Input String: ") print("\n\n") if formatType == "1": outString = titleCase(userInput) elif formatType == "2": outString = userInput.lower() elif formatType == "3": outString = userInput.upper() elif formatType == "4": outString = userInput.capitalize() elif formatType == "5": outString = userInput.lstrip() elif formatType == "7":
table.flex = "WH" table.data_source = ui.ListDataSource(data) table.data_source.delete_enabled = False table.delegate = Delegate() rview.add_subview(table) return rview def segchange(sender): index = sender.selected_index if index == 2: # Gist view["repolabel"].text = "Gist ID:" view["bbutton"].title = "Browse gists" else: view["repolabel"].text = "Repo:" view["bbutton"].title = "Browse repos" view = ui.load_view('gitrepo') for name in 'username reponame'.split(): view[name].autocapitalization_type = ui.AUTOCAPITALIZE_NONE #parse = urllib.parse(clipboard.get().strip()) parse = urllib.parse.urlparse(clipboard.get().strip()) if parse.netloc in "www.github.com github.com".split(): path = [i for i in parse.path.split("/") if i] if len(path) >= 2: view["username"].text, view["reponame"].text = path[:2] if len(path) >= 4: view["reponame"].text += '/' + path[3] view["sgcontrol"].action = segchange view.present('popover')
import clipboard import console import webbrowser import string from glob import glob from urlparse import urlparse import sys twitter_name = 'jayhickey' try: mytext = sys.argv[1] print clipboard.get() except IndexError: mytext = clipboard.get() u = urlparse(mytext) mytext = mytext.replace('https://twitter.com/', 'tweetbot://') mytext = mytext.replace('statuses', 'status') mytext = mytext.replace('http://twitter.com/', 'tweetbot://') mytext = mytext.replace('http://mobile.twitter.com/', 'tweetbot://') mytext = mytext.replace('https://mobile.twitter.com/', 'tweetbot://') if mytext.count('/') < 3 or mytext.find('tweets') != -1: mytext = 'tweetbot://' + twitter_name + '/user_profile' + u.path # console.clear() print mytext webbrowser.open(mytext)
# coding: utf-8 import clipboard import urllib2 import webbrowser clipString = clipboard.get() marky = 'http://heckyesmarkdown.com/go/?u=' queryString = marky + clipString reqMD = urllib2.Request(queryString) openMD = urllib2.urlopen(reqMD) content = (openMD.read().decode('utf-8')) clipboard.set(content) webbrowser.open(queryString)
# coding: utf-8 # https://forum.omz-software.com/topic/2974/need-python-script-to-decode-or-encode-in-base64 from __future__ import print_function import base64 encsting = raw_input('Encoded Value:') decstring = base64.b64decode(encsting) print(decstring) # ... import base64 import clipboard encsting = clipboard.get() decstring = base64.b64decode(encsting) clipboard.set(decstring)