def main():
	arg = workflow.get_input().encode('utf-8')
	tag_pattern = re.compile("((?!.*@done).*" + TAG +".*(\n*\t*(?!\w.*:$)\w.*)*)", re.M)
	task_pattern = re.compile('\s{1}@\w+(\(\d{4}-\d{2}-\d{2}\s*((|\d{2}:\d{2}))\)|)', re.M)
	notes_pattern = re.compile("^-.*\n*|\t")
	
	# Isolate tasks with @remind tag
	tp_remind = re.findall(tag_pattern, arg)

	# Iterate over every task tagged and process it.
	url_str = ''
	for match in tp_remind:
		
		# Process Task
		src_task = re.sub("- ","",re.search("^-.*", match[0].strip()).group())
		task = re.sub(task_pattern,'',src_task)
			
		# Process task time
		dt =  taskTime(src_task)
		
		# Prepate notes if Notes == True, else ommit notes from Reminder.
		if ADD_NOTES:
			# Get list of tags from task without remind tag
			tag_str = ' '.join([tag for tag in stripTags(src_task) if stripTags(src_task)])
			
			# Prepare Notes
			notes = (re.sub(notes_pattern,"", match[0].strip())).strip()
			
			# Create body of task
			if tag_str and notes:
				notes_str = notes.strip()+'\n'+tag_str.strip()
			elif tag_str and not notes:
				notes_str = tag_str.strip()
			elif notes and not tag_str:
				notes_str = notes.strip()
			else:
				notes_str = ''
				
		else:
			notes_str = ''

		# Add the task to Reminders.
		task_str = task + ' on ' + str(dt[0]) + ' at ' + str(dt[1]) + ' /' + DEFAULT_LIST

		if url_str == '':
			url_str += 'fantastical2://x-callback-url/parse?sentence='+urllib2.quote(task_str,'')+'&notes='+urllib2.quote(notes_str)+'&reminder=1'

		else:
			url_str = 'fantastical2://x-callback-url/parse?sentence='+urllib2.quote(task_str,'')+'&notes='+urllib2.quote(notes_str,'')+'&reminder=1&x-success='+urllib2.quote(url_str,'')

	workflow.set_output(url_str)
Exemple #2
0
 def save_action(self, sender):
     if self.image_view.image:
         # We draw a new image here, so that it has the current
         # orientation (the canvas is quadratic).
         filename = time.strftime('%Y%m%d%H%M%S') + '.png'
         with ui.ImageContext(self.width, self.height) as ctx:
             self.image_view.image.draw()
             img = ctx.get_image()
             img = ui2pil(img)
             bbox = img.getbbox()
             img = img.crop(bbox)
             editor.set_file_contents(filename, img._repr_png_(), 'dropbox')
             editor.insert_text('![test](../' + filename + ')')
             self.close()
     else:
         console.hud_alert('No Image', 'error')
     workflow.set_output('test')
Exemple #3
0
def editorial_save(tlist):
    """
    **This will fail horribly if used outside Editorial**
    It's not really save, only preparation to it.
    To use only in Editorial.app, it's workaround [this bug](http://omz-forums.appspot.com/editorial/post/5925732018552832)
    that doesn't allow to use simple call to editor.set_files_contents, instead it's required to use Set File Contents block.

    It's annoying.
    """
    import workflow
    import pickle

    paths = []
    path_to_content = {}
    for item in tlist.items:
        if hasattr(item, "source"):
            item.sublist.dedent()
            text = PlainPrinter().pformat(item.sublist)
            path = item.source.replace(path_to_folder_synced_in_editorial, "")
            paths.append(path)
            path_to_content[path] = text.decode("utf-8")
    with real_open("content-temp.pickle", "w") as f:
        pickle.dump(path_to_content, f)
    workflow.set_output("\n".join(paths))
#coding: utf-8
import workflow
import editor
import clipboard
import datetime


entry_file = workflow.get_variable('entry_filename')
file_contents = editor.get_file_contents(entry_file)

if (file_contents):
    entry = file_contents
else:
    entry = 'empty'

daily_stats = workflow.get_variable('journal_stats')
print('daily_stats' + daily_stats)

editor.set_file_contents(filename, entry + '\n\n' + daily_stats)

workflow.set_variable('entry_text', content)
workflow.set_variable('entry_filename', filename)

clipboard.set(content)
workflow.set_output(content)
Exemple #5
0
import datetime
import workflow
import reminders

action_in = workflow.get_input()
for line in action_in.split('\n'):
    for name, s_time in re.findall(r'(.*)@alarm\((.*)\)', line):
        date, time = s_time.split(', ')
        d_yyyy, d_mm, d_dd = [int(x) for x in date.split('-')]
        t_hh, t_mm = [int(x) for x in time.split(':')]
        rem = reminders.Reminder()
        rem.title = name
        due = datetime.datetime(d_yyyy, d_mm, d_dd, t_hh, t_mm)
        rem.due_date = due
        a = reminders.Alarm()
        a.date = due
        rem.alarms = [a]
        try:
            res = dialogs.alert(
                'The Reminder Was Set',
                'Name: {name}\n{date} {time}'.format(name=name,
                                                     date=date,
                                                     time=time), 'Ok')
            rem.save()
        except KeyboardInterrupt:
            print("User Cancled Input")

action_out = action_in

workflow.set_output(action_out)
Exemple #6
0
title = re.sub(r"^[^A-z]*([A-z].*)", "\\1", input)
title_tagged = linguistictagger.tag_string(title, linguistictagger.SCHEME_LEXICAL_CLASS)
last_word = len(title_tagged) - 1
output = []

while True:
	if re.search(r"[ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0-9]", title_tagged[last_word][1]): # `r"[A-z]"`/`r"\w"` matches `_`!?
		break
	last_word -= 1

for i, v in enumerate(title_tagged):
	w = re.sub(r"^[*_]*([^*_]*)", "\\1", v[1])
	w_prefix = re.sub(r"^([*_]*)[^*_]*", "\\1", v[1])
	if i == 0 or i == last_word:
		output.append(w_prefix + capitalize_(w))
	elif w.lower() in general_exceptions:
		# output.append(w_prefix + w.lower())
		output.append(v[1])
	elif v[0] == "Preposition":
		if len(w) > float("inf"):
			output.append(w_prefix + w.capitalize())
		else:
			# output.append(w_prefix + w.lower())
			output.append(v[1])
	else:
		output.append(w_prefix + capitalize_(w))

output = input_prefix + "".join(output)

workflow.set_output(output)
Exemple #7
0
import workflow
import re

##get url from previous action
url = workflow.get_input()
match = re.search(r"youtube\.com/.*v=([^&]*)", url)
if match:
  action_out = match.group(1)
else:
  action_out = 'error'

#print action_out

workflow.set_output(action_out)
    elif exif[orientation] == 8:
        img = image.rotate(90, expand=True)
# cases: image don't have getexif
except (AttributeError, KeyError, IndexError):
    pass

doc_path = editor.get_path()
doc_dir, fn = os.path.split(doc_path)
default_name = '%s' % timestr + '_' + 'image'

i = 1
while True:
    if not os.path.exists(os.path.join(doc_dir, default_name + '.jpg')):
        break
    default_name = '%s' % timestr + '_' + 'image' + '_' + str(i)
    i += 1

root, rel_doc_path = editor.to_relative_path(editor.get_path())
filename = default_name + '.jpg'

if not filename:
    workflow.stop()

img_data = io.BytesIO()
img.save(img_data, 'jpeg')

rel_doc_dir, fn = os.path.split(rel_doc_path)
dest_path = os.path.join(rel_doc_dir, filename)
editor.set_file_contents(dest_path, img_data.getvalue(), root)
workflow.set_output(filename)
def main():
    arg = workflow.get_input().encode('utf-8')
    tag_pattern = re.compile(
        "((?!.*@done).*" + TAG + ".*(\n*\t*(?!\w.*:$)\w.*)*)", re.M)
    task_pattern = re.compile(
        '\s{1}@\w+(\(\d{4}-\d{2}-\d{2}\s*((|\d{2}:\d{2}))\)|)', re.M)
    notes_pattern = re.compile("^-.*\n*|\t")

    # Isolate tasks with @remind tag
    tp_remind = re.findall(tag_pattern, arg)

    # Iterate over every task tagged and process it.
    url_str = ''
    for match in tp_remind:

        # Process Task
        src_task = re.sub("- ", "",
                          re.search("^-.*", match[0].strip()).group())
        task = re.sub(task_pattern, '', src_task)

        # Process task time
        dt = taskTime(src_task)

        # Prepate notes if Notes == True, else ommit notes from Reminder.
        if ADD_NOTES:
            # Get list of tags from task without remind tag
            tag_str = ' '.join(
                [tag for tag in stripTags(src_task) if stripTags(src_task)])

            # Prepare Notes
            notes = (re.sub(notes_pattern, "", match[0].strip())).strip()

            # Create body of task
            if tag_str and notes:
                notes_str = notes.strip() + '\n' + tag_str.strip()
            elif tag_str and not notes:
                notes_str = tag_str.strip()
            elif notes and not tag_str:
                notes_str = notes.strip()
            else:
                notes_str = ''

        else:
            notes_str = ''

        # Add the task to Reminders.
        task_str = task + ' on ' + str(dt[0]) + ' at ' + str(
            dt[1]) + ' /' + DEFAULT_LIST

        if url_str == '':
            url_str += 'fantastical2://x-callback-url/parse?sentence=' + urllib2.quote(
                task_str,
                '') + '&notes=' + urllib2.quote(notes_str) + '&reminder=1'

        else:
            url_str = 'fantastical2://x-callback-url/parse?sentence=' + urllib2.quote(
                task_str, '') + '&notes=' + urllib2.quote(
                    notes_str, '') + '&reminder=1&x-success=' + urllib2.quote(
                        url_str, '')

    workflow.set_output(url_str)
Exemple #10
0
last_word = len(title_tagged) - 1
output = []

while True:
    if re.search(
            r"[ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0-9]",
            title_tagged[last_word][1]):  # `r"[A-z]"`/`r"\w"` matches `_`!?
        break
    last_word -= 1

for i, v in enumerate(title_tagged):
    w = re.sub(r"^[*_]*([^*_]*)", "\\1", v[1])
    w_prefix = re.sub(r"^([*_]*)[^*_]*", "\\1", v[1])
    if i == 0 or i == last_word:
        output.append(w_prefix + capitalize_(w))
    elif w.lower() in general_exceptions:
        # output.append(w_prefix + w.lower())
        output.append(v[1])
    elif v[0] == "Preposition":
        if len(w) > float("inf"):
            output.append(w_prefix + w.capitalize())
        else:
            # output.append(w_prefix + w.lower())
            output.append(v[1])
    else:
        output.append(w_prefix + capitalize_(w))

output = input_prefix + "".join(output)

workflow.set_output(output)
    response = requests.put(URL, headers=header, data=json.dumps(data))

    if response.status_code == 201:
        console.hud_alert("Blog post created successfully.", 'success', 2)
    else:
        console.alert("Commit failed.")
elif response.status_code == 200:  # File exists, update it.
    data = {
        'path': post_filename,
        'content': base64.b64encode(post_content),
        'message': "Updated %s" % commit_msg,
        'branch': BRANCH,
        'committer': COMMITTER,
        'sha': response_json['sha']
    }

    response = requests.put(URL, headers=header, data=json.dumps(data))

    if response.status_code == 200:
        console.hud_alert("Blog post updated successfully.", 'success', 2)
    else:
        console.hud_alert("Commit failed.", 'error')
        workflow.stop()
else:  # Something went wrong!
    console.hud_alert("There was a problem with the server.", 'error')
    workflow.stop()

# If we get this far, all is great so lets pass on the determined destination URL
workflow.set_output(destination_url)
Exemple #12
0
    journalline = re.search(journalTitleMatch, thefile)
    journal = journalline.group(1)
    volumeline = re.search(volumeMatch, thefile)
    volume = volumeline.group(1)
    numberline = re.search(issueNumberMatch, thefile)
    number = numberline.group(1)
    pagesline = re.search(pagesMatch, thefile)
    pages = pagesline.group(1)
    reference = {'Type': reftype, 'Authors': authors, 'Title': title, 'Year': year, 'Journal': journal, 'Volume': volume, 'Number': number, 'Pages': pages}

if reftype == 'chapter':
    titleline = re.search(chapterTitleMatch, thefile)
    title = titleline.group(1)
    publisherline = re.search(bookPublisherMatch, thefile)
    publisher = publisherline.group(1)
    cityline = re.search(bookPublisherCityMatch, thefile)
    city = cityline.group(1)
    pagesline = re.search(pagesMatch, thefile)
    pages = pagesline.group(1)
    bookeditorline = re.search(editedBookEditorMatch, thefile)
    editor = bookeditorline.group(1)
    booktitleline = re.search(editedBookTitleMatch, thefile)
    booktitle = booktitleline.group(1)
    reference = {'Type': reftype, 'Authors': authors, 'Title': title, 'Year': year, 'Editors': editor, 'Book-title': booktitle, 'Pages': pages, 'Publisher': publisher, 'City': city}


# SEND TO JSON:

refline = json.dumps(reference)
workflow.set_output(refline)
Exemple #13
0
            pass

        if len(found_snippets) > 0:
            match_count += 1
            root, rel_path = editor.to_relative_path(full_path)
            if filename_match == 0:
                ed_url = 'editorial://open/' + quote(
                    rel_path.encode('utf-8')) + '?root=' + root
                html.write('<h2><a href="' + ed_url + '">' + name +
                           '</a></h2>')
            for snippet in found_snippets:
                start = snippet[3]
                end = snippet[4]
                select_url = 'editorial://open/' + quote(
                    rel_path.encode('utf-8')) + '?root=' + root
                select_url += '&selection=' + str(start) + '-' + str(end)
                html.write('<a class="result-box" href="' + select_url + '">' +
                           snippet[0] + '<span class="highlight">' +
                           snippet[1] + '</span>' + snippet[2] + '</a>')

if match_count == 0:
    html.write('<p>No matches found.</p>')

workflow.set_output(html.getvalue())

#           html.write('<h2><a href="' + ed_url + '">' + name + '</a></h2>')
# Output no match found
#if match_count == 0:
#   html.write('<p>No matches found.</p>' + 'term var = ' + str(term_array) )
#workflow.set_output(html.getvalue())
Exemple #14
0
					pass


			if len(found_snippets) > 0:
					match_count += 1
					root, rel_path = editor.to_relative_path(full_path)
					if filename_match == 0:
							ed_url = 'editorial://open/' + quote(rel_path.encode('utf-8')) + '?root=' + root
							html.write('<h2><a href="' + ed_url + '">' + name + '</a></h2>')
					for snippet in found_snippets:
							start = snippet[3]
							end = snippet[4]
							select_url = 'editorial://open/' + quote(rel_path.encode('utf-8')) + '?root=' + root
							select_url += '&selection=' + str(start) + '-' + str(end)
							html.write('<a class="result-box" href="' + select_url + '">' + snippet[0] + '<span class="highlight">' + snippet[1] + '</span>' + snippet[2] + '</a>')

if match_count == 0:
	html.write('<p>No matches found.</p>')

workflow.set_output(html.getvalue())





#           html.write('<h2><a href="' + ed_url + '">' + name + '</a></h2>')
# Output no match found
#if match_count == 0:
#   html.write('<p>No matches found.</p>' + 'term var = ' + str(term_array) )
#workflow.set_output(html.getvalue())
	if rot_degrees:
		img = img.rotate(rot_degrees, expand=True)
# cases: image don't have getexif
except (AttributeError, KeyError, IndexError):
	pass

doc_path = editor.get_path()
doc_dir, fn = os.path.split(doc_path)
default_name = '{}_image'.format(timestr)

i = 1
while True:
	if not os.path.exists(os.path.join(doc_dir, default_name + '.jpg')):
		break
	default_name = '{}_image_{}'.format(timestr, i)
	i += 1

root, rel_doc_path = editor.to_relative_path(editor.get_path())
filename = default_name + '.jpg'

if not filename:
	workflow.stop()

img_data = io.BytesIO()
img.save(img_data, 'jpeg')

rel_doc_dir, fn = os.path.split(rel_doc_path)
dest_path = os.path.join(rel_doc_dir, filename)
editor.set_file_contents(dest_path, img_data.getvalue(), root)
workflow.set_output(filename)
Line 10: TypeError: 'builtin_function_or_method' object is not iterable

###==============================

#coding: utf-8
import workflow

action_in = workflow.get_input()
my_list = action_in.splitlines()

print('\n'.join(my_list))
print('=' * 5)
print('\n'.join(sorted(my_list)))
print('=' * 5)
print('\n'.join(sorted(my_list, key=len)))
print('=' * 5)
print('\n'.join(reversed(sorted(my_list, key=len))))
print('=' * 5)

#: Generate the output...
action_out = '\n'.join(reversed(sorted(my_list, key=len)))
workflow.set_output(action_out)

###==============================

#coding: utf-8
import workflow
workflow.set_output('\n'.join(reversed(sorted(
    workflow.get_input().splitlines(), key=len))))