Пример #1
0
def create_block_command(message, block_path):
    block, block_hash = create_block(message, block_path)
    with open(f'{block_path}/{block_hash}', 'w') as block_file:
        print(frontmatter.dumps(block), file=block_file)
        print(frontmatter.dumps(block))

    with open('index.html.jinja2') as file_:
        template = Template(file_.read())
        with open('index.html', 'w') as index:
            print(template.render(block_hash=block_hash), file=index)
Пример #2
0
    def insert(self):
        if self.validate():
            extensions.set_max_id(extensions.get_max_id() + 1)
            self.id = extensions.get_max_id()
            data = {
                "type": self.type,
                "desc": self.desc,
                "title": str(self.title),
                "date": self.date.strftime("%x").replace("/", "-"),
                "tags": self.tags,
                "id": self.id,
                "path": self.path
            }
            if self.type == "bookmarks" or self.type == "pocket_bookmarks":
                data["url"] = self.url

            # convert to markdown
            dataobj = frontmatter.Post(self.content)
            dataobj.metadata = data
            self.fullpath = create(frontmatter.dumps(dataobj),
                                   str(self.id) + "-" + dataobj["date"] + "-" +
                                   dataobj["title"],
                                   path=self.path,
                                   needs_to_open=self.type == "note")

            add_to_index(current_app.config['INDEX_NAME'], self)
            return self.id
        return False
Пример #3
0
def update_item_frontmatter(dataobj_id, new_frontmatter):
    """
    Given an object id, this method overwrites the front matter
    of the post with `new_frontmatter`.

    ---
    date: Str
    id: Str
    path: Str
    tags: List[Str]
    title: Str
    type: note/bookmark
    ---
    """

    from archivy.models import DataObj

    filename = get_by_id(dataobj_id)
    dataobj = frontmatter.load(filename)
    for key in list(new_frontmatter):
        dataobj[key] = new_frontmatter[key]
    md = frontmatter.dumps(dataobj)
    with open(filename, "w", encoding="utf-8") as f:
        f.write(md)

    converted_dataobj = DataObj.from_md(md)
    converted_dataobj.fullpath = str(
        filename.relative_to(current_app.config["USER_DIR"])
    )
    converted_dataobj.index()
    load_hooks().on_edit(converted_dataobj)
Пример #4
0
def dump_recursive_comments(rpc,
                            post_author,
                            post_permlink,
                            depth=0,
                            format="markdown"):
    global currentThreadDepth
    postWrapper = TextWrapper()
    postWrapper.width = 120
    postWrapper.initial_indent = "  " * (depth + currentThreadDepth)
    postWrapper.subsequent_indent = "  " * (depth + currentThreadDepth)

    depth = int(depth)

    posts = rpc.get_content_replies(post_author, post_permlink)
    for post in posts:
        meta = {}
        for key in ["author", "permlink"]:
            meta[key] = post[key]
        meta["reply"] = "@{author}/{permlink}".format(**post)
        if format == "markdown":
            body = markdownify(post["body"])
        else:
            body = post["body"]
        yaml = frontmatter.Post(body, **meta)
        print(frontmatter.dumps(yaml))
        reply = rpc.get_content_replies(post["author"], post["permlink"])
        if len(reply):
            dump_recursive_comments(rpc, post["author"], post["permlink"],
                                    depth + 1)
Пример #5
0
    def insert(self):
        if self.validate():
            self.id = app.config["max_id"]
            data = {
                "type": self.type,
                "desc": self.desc,
                "title": str(self.title),
                "date": self.date.strftime("%x").replace("/", "-"),
                "tags": self.tags,
                "id": self.id,
                "path": self.path
            }
            if self.type == "bookmarks" or self.type == "pocket_bookmarks":
                data["url"] = self.url
            app.config["max_id"] += 1

            # convert to markdown
            dataobj = frontmatter.Post(self.content)
            dataobj.metadata = data
            create(frontmatter.dumps(dataobj),
                   str(self.id) + "-" + dataobj["date"] + "-" +
                   dataobj["title"],
                   path=self.path)
            print(add_to_index("dataobj", self))
            return self.id
        return False
Пример #6
0
def cleanup_excerpt(filename):
    print("Processing " + filename + "...", end='')
    post = frontmatter.load(filename)

    if not 'excerpt' in post.keys():
        return

    soup = BeautifulSoup(post['excerpt'], 'html.parser')

    image = soup.find('img')
    if image:
        post['featured-image'] = image.get('src')

    post['excerpt'] = soup.get_text()
    post['excerpt'] = cleanwidget(post['excerpt'])

    soup = BeautifulSoup(post.content, 'html.parser')

    for image in soup.find_all('img'):
        src_old = image['src']
        image['src'] = "{{ site.baseurl }}/" + src_old

    post.content = soup.prettify()

    with open(filename, mode='w', encoding="utf-8") as f:
        text = frontmatter.dumps(post)
        #print (text)
        f.write(text)
        print("DONE")
Пример #7
0
def main():
    parser = argparse.ArgumentParser(description="Create a new book entry")
    options = parser.parse_args()

    query = input("Search: ")
    selected = openlibrary.interactive_search(query=query)
    metadata = selected.metadata

    status, _ = pick.pick(
        ["read", "to-read", "abandoned", "currently-reading"], "Status")
    metadata["status"] = status

    # Determine where to put the date based on the status.
    date = datetime.datetime.now().replace(
        tzinfo=dateutil.tz.tzlocal()).isoformat()
    if status == "currently-reading":
        metadata["date"] = date
    elif status == "read":
        metadata["end_date"] = date

    destination = os.path.expanduser(BOOKS_DIRECTORY)
    with open(os.path.join(destination, f"{selected.basename}.markdown"),
              "w") as fh:
        fh.write(frontmatter.dumps(Document(content="", metadata=metadata)))
        fh.write("\n")
Пример #8
0
def sync(
    all_sheets: bool = False,
    output_folder: str = "_players",
    sheet_app_id: str = typer.Option(envvar="GOOGLE_SHEET_APP_ID", default=""),
    sheet_name: str = typer.Option(envvar="GOOGLE_SHEET_NAME",
                                   default="Sheet1"),
):
    typer.secho("sync-players", fg="yellow")
    try:
        sa = SpreadsheetApp(from_env=True)
    except AttributeError:
        print_expected_env_variables()
        raise typer.Exit()

    try:
        spreadsheet = sa.open_by_id(sheet_app_id)
    except Exception:
        typer.echo(
            f"We can't find that 'sheet_app_id'.\n"
            f"Please double check that 'GOOGLE_SHEET_APP_ID' is set. (Currently set to: '{sheet_app_id}')"
        )
        raise typer.Exit()

    if all_sheets:
        sheets = spreadsheet.get_sheets()
    else:
        try:
            sheets = [spreadsheet.get_sheet_by_name(sheet_name)]
        except Exception:
            typer.echo(
                f"We can't find that 'sheet_name' aka the tab.\n"
                f"Please double check that 'SHEET_NAME' is set. (Currently set to: '{sheet_name}')"
            )
            raise typer.Exit()

    for sheet in sheets:
        data_range = sheet.get_data_range()

        table = Table(data_range, backgrounds=True)

        metadata = {}
        for item in table:
            for key in item.header:
                value = item.get_field_value(key)
                metadata[key] = value

            player = Player(**metadata)

            if not Path(output_folder).exists():
                Path(output_folder).mkdir()

            player_filename = Path(output_folder, f"{player.slug}.md")
            if player_filename.exists():
                post = frontmatter.loads(player_filename.read_text())
            else:
                post = frontmatter.loads("")

            post.metadata.update(player.dict(by_alias=True))

            player_filename.write_text(frontmatter.dumps(post))
Пример #9
0
def process(logohandle, logodir):

    srcfile = os.path.join(logodir, "index.md")

    if not os.path.isfile(srcfile):
        print("WARNING: no index file for '%s' (%s)" % (logohandle, srcfile))
        return

    print("INFO: processing %s" % (logohandle))

    fmsrc = frontmatter.load(srcfile)
    if "images" not in fmsrc.metadata:
        print("WARNING: no images, skipping")
        return

    fmdst = frontmatter.loads("---\nlayout: amp\nnoindex: true\n---")
    fmdst[
        "redirect_to"] = "https://www.vectorlogo.zone/logos/%s/index.html" % logohandle

    dstfile = os.path.join(logodir, "index.amp.html")

    f = open(dstfile, 'w')
    f.write(frontmatter.dumps(fmdst))
    f.write('\n')
    f.close()
Пример #10
0
    def update_existing_posts(self, sessions, users):
        """Take the latest export of sessions and users and update any information that has changed"""
        if self._post_location:
            count = 0
            blog_posts = self.get_blog_posts(self._post_location)
            # Loop through all the markdown posts in the directory
            for post in blog_posts:
                changed = False
                front_matter = frontmatter.loads(open(post,"r").read())
                for session in sessions:
                    if session['session_id'] == front_matter['session_id']:
                        # Gather speaker information
                        emails = session["speakers"].split(",")
                        names = []
                        for speaker_email in emails:
                            for attendee in users:
                                if attendee["speaker_email"] == speaker_email:
                                    name = attendee["first_name"] + " " + attendee["second_name"]
                                    bio_formatted =  f'"{attendee["bio"]}"'
                                    speaker_dict = {
                                            "name":name,
                                            "biography": bio_formatted,
                                            "job-title": attendee["job-title"],
                                            "company": attendee["company"],
                                            "speaker-image": attendee["image-name"]
                                            }
                                    names.append(speaker_dict)
                        # Check if there are changes to speakers
                        if front_matter['speakers'] != names:
                            front_matter['speakers'] = names
                            changed = True

                        # Check if session tracks have changed.
                        tracks = session["tracks"].replace(";",", ")
                        if front_matter["session_track"] != tracks:
                            front_matter["session_track"] = tracks
                            changed = True
                        
                        # Check if title has changed
                        title = re.sub('[^A-Za-z0-9-!: ()]+', '', session["title"])
                        if front_matter['title'] != title:
                            front_matter['title'] = title
                            changed = True
                        
                        # Check if post content has changed
                        content = session['blurb']
                        if front_matter.content != content:
                            front_matter.content = content
                            changed = True
                        
                        if changed:
                            # Write the changed frontmatter to the file.
                            with open(post,"w") as changed_file:
                                changed_file.writelines(frontmatter.dumps(front_matter))
                                print("{0} post updated!".format(session['session_id']))
                                count += 1
            print("{0} posts updated!".format(count))
                        
        else:
            return False
Пример #11
0
    def write(self, path: pathlib.Path) -> None:

        if not path.is_dir():
            raise NotADirectoryError(f'{str(path)} is not a directory')

        if not self._sync_notes:
            print('sync locked for', self._title)
            return

        print('updating', self._title)

        mod_date = max([anno.modified_date for anno in self._annotations])
        mod_date_str = mod_date.isoformat()

        fmpost = frontmatter.Post(self.content,
                                  asset_id=self._asset_id,
                                  title=self.title,
                                  author=self.author,
                                  modified_date=mod_date_str)

        fn = path / self._filename

        with open(fn, 'w') as f:
            s = frontmatter.dumps(fmpost)
            f.write(s)
def add_agent_information(search_path, file_component):
    """
    Will check to see if the file needs to be processed before updating the values.  Will then populate the new agent
    information with what should be used in the future.
    :param search_path:
    :param file_component:
    :return:
    """
    if file_component.suffix != '.md':
        return

    entry = frontmatter.load(file_component)

    write_file = False

    # Check to see if the agent information has been defined in the log file
    if fm_keys.AGENT_DEFINITION not in entry.metadata:
        entry.metadata[fm_keys.AGENT_DEFINITION] = dict(
            DEFAULT_AGENT_DEFINTION)
        write_file = True
    else:
        agent_definition = entry.metadata[fm_keys.AGENT_DEFINITION]
        try:
            for key in (fm_keys.AGENT_NAME, fm_keys.AGENT_VERSION,
                        fm_keys.FILE_VERSION):
                write_file = write_file or _check_set_value(
                    agent_definition, key)

        except (AttributeError | TypeError):
            entry.metadata[fm_keys.AGENT_DEFINITION] = DEFAULT_AGENT_DEFINTION
            write_file = True

    if write_file:
        with open(file_component, "w") as output_file:
            output_file.write(frontmatter.dumps(entry))
Пример #13
0
 def getYAMLProjects(self):
     """ Get projects from projects.yml and appends to self.projects"""
     with open("projects.yml", 'r') as stream:
         try:
             yaml_data = yaml.safe_load(stream)
             for each in yaml_data:
                 md_file = frontmatter.loads(open("template.md","r").read())
                 # Add front matter attributes
                 md_file['title'] = each["name"]
                 md_file['image'] = "/assets/images/projects/" + each["image"]
                 # Prepend the output folder and the new_post_name
                 file_name = each["name"].replace(" ", "-").lower() +  ".md"
                 if each["url"]:
                     md_file["url"] = each["url"]
                     # response = requests.get(each["url"])
                     # soup = BeautifulSoup(response.text, features="html.parser")
                     # metas = soup.find_all('meta')
                     # description = [ meta.attrs['content'] for meta in metas if 'name' in meta.attrs and meta.attrs['name'] == 'description' ]
                     # if len(description) > 0:
                     #     md_file["description"] = '"{0}"'.format(description[0])
                 output_object = "projects/" + file_name
                 # Create the new post object and write the front matter to post.
                 with open(output_object,"w") as new_post_file:
                     new_post_file.writelines(frontmatter.dumps(md_file))
                     print("Jekyll project created for {0} at {1}".format(each["name"], output_object))
         except yaml.YAMLError as exc:
             print(exc)
Пример #14
0
def main():
    parser = argparse.ArgumentParser("python analyze.py")
    parser.add_argument(
        "post_file",
        nargs=1,
        type=str,
        help="post file",
        default=None,
    )
    args = parser.parse_args()
    old_filename = args.post_file[0]
    reg = r"^(\d{4}-\d{2}-\d{2})-(.*)$"
    m = re.match(reg, old_filename)
    if not m:
        print("No date in filename")
        return
    dt = m[1]
    new_filename = m[2]
    cmd_args = ["git", "mv", old_filename, new_filename]
    cmd = " ".join(cmd_args)
    print(f"running {cmd}")
    subprocess.run(cmd_args)

    print(f"reading from {new_filename}")
    with open(new_filename) as f:
        post = frontmatter.load(f)
        post.metadata["date"] = dt
        new_content = frontmatter.dumps(post)

    print(f"writing to {new_filename}")
    with open(new_filename, "w") as w:
        w.write(new_content)
Пример #15
0
def show_dataobj(dataobj_id):
    dataobj = data.get_item(dataobj_id)

    if not dataobj:
        flash("Data could not be found!", "error")
        return redirect("/")

    if request.args.get("raw") == "1":
        return frontmatter.dumps(dataobj)

    backlinks = []
    if app.config["SEARCH_CONF"]["enabled"]:
        incoming_links = search(f"/{dataobj_id}\)]]")
        if incoming_links:
            for hit in incoming_links:
                if hit["id"] != dataobj_id:
                    backlinks.append({"title": hit["title"], "id": hit["id"]})

    return render_template(
        "dataobjs/show.html",
        title=dataobj["title"],
        dataobj=dataobj,
        backlinks=backlinks,
        current_path=dataobj["dir"],
        form=forms.DeleteDataForm(),
        view_only=0,
        search_enabled=app.config["SEARCH_CONF"]["enabled"],
    )
Пример #16
0
def dump_recursive_parents(rpc,
                           post_author,
                           post_permlink,
                           limit=1,
                           format="markdown"):
    global currentThreadDepth

    limit = int(limit)

    postWrapper = TextWrapper()
    postWrapper.width = 120
    postWrapper.initial_indent = "  " * (limit)
    postWrapper.subsequent_indent = "  " * (limit)

    if limit > currentThreadDepth:
        currentThreadDepth = limit + 1

    post = rpc.get_content(post_author, post_permlink)

    if limit and post["parent_author"]:
        parent = rpc.get_content_replies(post["parent_author"], post["parent_permlink"])
        if len(parent):
            dump_recursive_parents(rpc, post["parent_author"], post["parent_permlink"], limit - 1)

    meta = {}
    for key in ["author", "permlink"]:
        meta[key] = post[key]
    meta["reply"] = "@{author}/{permlink}".format(**post)
    if format == "markdown":
        body = markdownify(post["body"])
    else:
        body = post["body"]
    yaml = frontmatter.Post(body, **meta)
    print(frontmatter.dumps(yaml))
Пример #17
0
    def insert(self):
        """Creates a new file with the object's attributes"""
        if self.validate():
            helpers.set_max_id(helpers.get_max_id() + 1)
            self.id = helpers.get_max_id()
            self.date = datetime.now()
            data = {
                "type": self.type,
                "desc": self.desc,
                "title": str(self.title),
                "date": self.date.strftime("%x").replace("/", "-"),
                "tags": self.tags,
                "id": self.id,
                "path": self.path
            }
            if self.type == "bookmark" or self.type == "pocket_bookmark":
                data["url"] = self.url

            # convert to markdown file
            dataobj = frontmatter.Post(self.content)
            dataobj.metadata = data
            self.fullpath = create(
                frontmatter.dumps(dataobj),
                str(self.id) + "-" + dataobj["date"] + "-" + dataobj["title"],
                path=self.path,
            )

            add_to_index(current_app.config['INDEX_NAME'], self)
            return self.id
        return False
Пример #18
0
def cli(entry):
    if not entry:
        click.echo('sup', err=True)
        sys.exit(1)

    post = create_post(' '.join(entry))

    dir = CONFIG['github'].get('dir', '_posts')
    filename = post['date'].strftime('%Y-%m-%d-%H%M%S')
    post['date'] = post['date'].strftime('%Y-%m-%d %H:%M:%S')
    path = f'{dir}/{filename}.md'
    message = 'sup'
    content = frontmatter.dumps(post)

    params = {}

    if CONFIG['github'].get('branch'):
        params['branch'] = CONFIG['github']['branch']

    github = Github(CONFIG['github']['token'])
    repo = github.get_user().get_repo(CONFIG['github']['repo'])

    response = repo.create_file(path, message, content, **params)

    click.echo(response['content'].html_url)
Пример #19
0
    def insert(self):
        """Creates a new file with the object's attributes"""
        if self.validate():

            helpers.set_max_id(helpers.get_max_id() + 1)
            self.id = helpers.get_max_id()
            self.date = datetime.now()

            hooks = helpers.load_hooks()

            hooks.before_dataobj_create(self)
            data = {
                "type": self.type,
                "title": str(self.title),
                "date": self.date.strftime("%x").replace("/", "-"),
                "tags": self.tags,
                "id": self.id,
                "path": self.path
            }
            if self.type == "bookmark" or self.type == "pocket_bookmark":
                data["url"] = self.url

            # convert to markdown file
            dataobj = frontmatter.Post(self.content)
            dataobj.metadata = data
            self.fullpath = create(
                frontmatter.dumps(dataobj),
                f"{self.id}-{dataobj['title']}",
                path=self.path,
            )

            hooks.on_dataobj_create(self)
            self.index()
            return self.id
        return False
Пример #20
0
def dump_recursive_comments(rpc,
                            post_author,
                            post_permlink,
                            depth=0,
                            format="markdown"):
    global currentThreadDepth
    postWrapper = TextWrapper()
    postWrapper.width = 120
    postWrapper.initial_indent = "  " * (depth + currentThreadDepth)
    postWrapper.subsequent_indent = "  " * (depth + currentThreadDepth)

    depth = int(depth)

    posts = rpc.get_content_replies(post_author, post_permlink)
    for post in posts:
        meta = {}
        for key in ["author", "permlink"]:
            meta[key] = post[key]
        meta["reply"] = "@{author}/{permlink}".format(**post)
        if format == "markdown":
            body = markdownify(post["body"])
        else:
            body = post["body"]
        yaml = frontmatter.Post(body, **meta)
        print(frontmatter.dumps(yaml))
        reply = rpc.get_content_replies(post["author"], post["permlink"])
        if len(reply):
            dump_recursive_comments(rpc, post["author"], post["permlink"], depth + 1)
Пример #21
0
def dump_recursive_parents(rpc,
                           post_author,
                           post_permlink,
                           limit=1,
                           format="markdown"):
    global currentThreadDepth

    limit = int(limit)

    postWrapper = TextWrapper()
    postWrapper.width = 120
    postWrapper.initial_indent = "  " * (limit)
    postWrapper.subsequent_indent = "  " * (limit)

    if limit > currentThreadDepth:
        currentThreadDepth = limit + 1

    post = rpc.get_content(post_author, post_permlink)

    if limit and post["parent_author"]:
        parent = rpc.get_content_replies(post["parent_author"], post["parent_permlink"])
        if len(parent):
            dump_recursive_parents(rpc, post["parent_author"], post["parent_permlink"], limit - 1)

    meta = {}
    for key in ["author", "permlink"]:
        meta[key] = post[key]
    meta["reply"] = "@{author}/{permlink}".format(**post)
    if format == "markdown":
        body = markdownify(post["body"])
    else:
        body = post["body"]
    yaml = frontmatter.Post(body, **meta)
    print(frontmatter.dumps(yaml))
Пример #22
0
def problem2md(problem, convert_desc=True):
    metadata = {'title': problem['title'], 'difficulty': problem['difficulty']}
    if problem['tags']:
        metadata['tags'] = problem['tags']

    description = problem['description']
    description_md = description
    if convert_desc:
        h = html2text.HTML2Text()
        description_md = h.handle(description)

    lines = []
    lines.append('# ' + problem['title'] + '\n')
    lines.append('## Problem\n')
    lines.append('### Metadata\n')
    if problem['tags']:
        lines.append('- tags: ' + ', '.join(problem['tags']))
    lines.append('- difficulty: ' + problem['difficulty'])
    urls = leet_lint_url(problem['url'])
    for k, v in urls.items():
        lines.append('- source({}): <{}>'.format(k, v))
    lines.append('\n### Description\n')
    lines.append(description_md)

    content = '\n'.join(lines)
    yaml_content = YamlContent(metadata, content)
    problem_md = frontmatter.dumps(yaml_content, allow_unicode=True)
    return problem_md
    def move_files(self, files):
        if not os.path.exists(self.target_dir):
            os.makedirs(self.target_dir)

        if not os.path.exists(self.target_latest_dir):
            os.makedirs(self.target_latest_dir)

        for filename, src_filepath in files.items():
            if src_filepath is None:
                continue
            front_matter = self.front_matter_for(filename)
            # Add YAML front matter if required
            if front_matter is not None:
                content = frontmatter.dumps(
                    frontmatter.load(src_filepath, **front_matter))
                with open(self.target_filepath(filename), "w") as f:
                    f.write(content)
            else:
                shutil.copyfile(src_filepath, self.target_filepath(filename))

        # Always copy schema to latest directory since versions
        # are sorted
        for schema in self.schemas_metadata():
            shutil.copyfile(
                self.filepath(schema["original_path"]),
                self.target_latest_filepath(schema["path"]),
            )
Пример #24
0
    def test_dumping_with_custom_delimiters(self):
        "dump with custom delimiters"
        post = frontmatter.load("tests/hello-world.markdown")
        dump = frontmatter.dumps(post,
                                 start_delimiter="+++",
                                 end_delimiter="+++")

        self.assertTrue("+++" in dump)
Пример #25
0
    def get_contents(self):
        """Get the contents of the note. Includes header (front matter) and
        body.

        :return: contents of Note

        """
        return dumps(self.contents)
Пример #26
0
    def test_dumping_with_custom_delimiters(self):
        "dump with custom delimiters"
        post = frontmatter.load('tests/hello-world.markdown')
        dump = frontmatter.dumps(post,
                                 start_delimiter='+++',
                                 end_delimiter='+++')

        self.assertTrue('+++' in dump)
Пример #27
0
    def test_dumping_with_custom_delimiters(self):
        "dump with custom delimiters"
        post = frontmatter.load('tests/hello-world.markdown')
        dump = frontmatter.dumps(post,
            start_delimiter='+++',
            end_delimiter='+++')

        self.assertTrue('+++' in dump)
Пример #28
0
    def test_no_handler(self):
        "default to YAMLHandler when no handler is attached"
        post = frontmatter.load('tests/hello-world.markdown')
        del post.handler

        text = frontmatter.dumps(post)
        self.assertIsInstance(
            frontmatter.detect_format(text, frontmatter.handlers), YAMLHandler)
Пример #29
0
def _fix_dates_for_article(repofile):
    abs_article_file = os.path.join(current_app.yawt_root_dir, repofile)
    post = frontmatter.load(abs_article_file)
    now = _now()
    if 'create_time' not in post.metadata:
        post['create_time'] = now
    post['modified_time'] = now
    save_file(abs_article_file, frontmatter.dumps(post, Dumper=ExplicitDumper))
Пример #30
0
def main():
    parser = argparse.ArgumentParser(description='leetcode blog generator')
    parser.add_argument('question_id',
                        type=int,
                        metavar='question_id',
                        help='leetcode problem id')
    parser.add_argument('output_dir',
                        type=str,
                        metavar='output_dir',
                        help='output dir')
    args = parser.parse_args()
    title_slug, data = get_question(args.question_id)
    path = os.path.join(args.output_dir, title_slug + '.md')
    print(path)
    print(frontmatter.dumps(data))
    with open(path, 'w') as f:
        f.write(frontmatter.dumps(data))
Пример #31
0
def process(options, dirparam):
    logodir = os.path.abspath(dirparam)
    logohandle = os.path.basename(logodir)

    #print("INFO: processing %s (%s)" % (logohandle, logodir))
    files = [
        f for f in os.listdir(logodir)
        if os.path.isfile(os.path.join(logodir, f))
    ]

    images = []
    skipped = 0

    for f in files:
        if f.endswith(".svg") == False and f.endswith(".png") == False:
            if f.endswith(".ai") or f.endswith(".pdf") or f.endswith(".eps"):
                print("INFO: skipping " + f)
                skipped = skipped + 1
            continue
        if f.endswith("_src.svg") or f.endswith(".png"):
            print("INFO: skipping " + f)
            skipped = skipped + 1
            continue
        if f.startswith(logohandle + '-') == False:
            print("INFO: skipping " + f)
            skipped = skipped + 1
            continue

        images.append(f)

    if len(images) == 0:
        print("WARNING: no images for %s" % logohandle)
        return

    indexfn = os.path.join(logodir, "index.md")
    if os.path.exists(indexfn) == False:
        print("WARNING: no index.md for %s" % logohandle)
        return
        #indexmd = frontmatter.loads("---\n---\n")
    else:
        indexmd = frontmatter.load(indexfn)

    indexmd['images'] = images
    #indexmd['skipped'] = skipped

    if "logohandle" not in indexmd.keys():
        indexmd["logohandle"] = logohandle

    if "title" not in indexmd.keys():
        indexmd["title"] = logohandle.capitalize()

    if "sort" not in indexmd.keys():
        indexmd["sort"] = indexmd["title"].lower()

    f = open(indexfn, 'w')
    f.write(frontmatter.dumps(indexmd))
    f.write('\n')
    f.close()
Пример #32
0
def yaml_parse_file(args, initial_content):
    message = None

    if args.file and args.file != "-":
        if not os.path.isfile(args.file):
            raise Exception("File %s does not exist!" % args.file)
        with open(args.file) as fp:
            message = fp.read()
    elif args.file == "-":
        message = sys.stdin.read()
    else:
        import tempfile
        from subprocess import Popen
        EDITOR = os.environ.get('EDITOR', 'vim')
        # prefix = ""
        # if "permlink" in initial_content.metadata:
        #   prefix = initial_content.metadata["permlink"]
        with tempfile.NamedTemporaryFile(
                suffix=b".md",
                prefix=b"steem-",
                delete=False
        ) as fp:
            # Write initial content
            fp.write(bytes(frontmatter.dumps(initial_content), 'utf-8'))
            fp.flush()
            # Define parameters for command
            args = [EDITOR]
            if re.match("gvim", EDITOR):
                args.append("-f")
            args.append(fp.name)
            # Execute command
            Popen(args).wait()
            # Read content of file
            fp.seek(0)
            message = fp.read().decode('utf-8')

    try:
        meta, body = frontmatter.parse(message)
    except:
        meta = initial_content.metadata
        body = message

    # make sure that at least the metadata keys of initial_content are
    # present!
    for key in initial_content.metadata:
        if key not in meta:
            meta[key] = initial_content.metadata[key]

    # Extract anything that is not steem-libs meta and return it separately
    # for json_meta field
    json_meta = {key: meta[key] for key in meta if key not in [
        "title",
        "category",
        "author"
    ]}

    return meta, json_meta, body
Пример #33
0
def yaml_parse_file(args, initial_content):
    message = None

    if args.file and args.file != "-":
        if not os.path.isfile(args.file):
            raise Exception("File %s does not exist!" % args.file)
        with open(args.file) as fp:
            message = fp.read()
    elif args.file == "-":
        message = sys.stdin.read()
    else:
        import tempfile
        from subprocess import Popen
        EDITOR = os.environ.get('EDITOR', 'vim')
        # prefix = ""
        # if "permlink" in initial_content.metadata:
        #   prefix = initial_content.metadata["permlink"]
        with tempfile.NamedTemporaryFile(
            suffix=b".md",
            prefix=b"piston-",
            delete=False
        ) as fp:
            # Write initial content
            fp.write(bytes(frontmatter.dumps(initial_content), 'utf-8'))
            fp.flush()
            # Define parameters for command
            args = [EDITOR]
            if re.match("gvim", EDITOR):
                args.append("-f")
            args.append(fp.name)
            # Execute command
            Popen(args).wait()
            # Read content of file
            fp.seek(0)
            message = fp.read().decode('utf-8')

    try :
        meta, body = frontmatter.parse(message)
    except:
        meta = initial_content.metadata
        body = message

    # make sure that at least the metadata keys of initial_content are
    # present!
    for key in initial_content.metadata:
        if key not in meta:
            meta[key] = initial_content.metadata[key]

    # Extract anything that is not piston meta and return it separately
    # for json_meta field
    json_meta = {key: meta[key] for key in meta if key not in [
        "title",
        "category",
        "author"
    ]}

    return meta, json_meta, body
Пример #34
0
def commit_updated_posts(fronted_posts_by_path, silos):
    """
    Returns the response of committing the (presumably changed) given posts to
    the remote GITHUB_REF of this repo by following the recipe outlined here:

        https://developer.github.com/v3/git/

    1. Get the current commit object
    2. Retrieve the tree it points to
    3. Retrieve the content of the blob object that tree has for that
       particular file path
    4. Change the content somehow and post a new blob object with that new
       content, getting a blob SHA back
    5. Post a new tree object with that file path pointer replaced with your
       new blob SHA getting a tree SHA back
    6. Create a new commit object with the current commit SHA as the parent
       and the new tree SHA, getting a commit SHA back
    7. Update the reference of your branch to point to the new commit SHA
    """
    if not fronted_posts_by_path:
        action_log("All good: already marked.")
        return None
    if not os.getenv("GITHUB_TOKEN"):
        raise ValueError("missing GITHUB_TOKEN")
    if not os.getenv("GITHUB_REPOSITORY"):
        raise ValueError("missing GITHUB_REPOSITORY")
    if not os.getenv("GITHUB_REF"):
        raise ValueError("missing GITHUB_REF")

    parent = parent_sha()
    # Create a new tree with our updated blobs.
    new_tree = repo().create_git_tree(
        [
            InputGitTreeElement(
                path,
                mode=
                '100644',  # 'file', @see https://developer.github.com/v3/git/trees/#tree-object
                type='blob',
                content=frontmatter.dumps(fronted_post))
            for path, fronted_post in fronted_posts_by_path.items()
        ],
        base_tree=repo().get_git_tree(parent))

    # Commit the new tree.
    new_commit = repo().create_git_commit(
        f'(syndicate): adding IDs for {silos}', new_tree,
        [repo().get_git_commit(parent)])
    # Poosh it.
    ref_name = os.getenv('GITHUB_REF').lstrip('refs/')
    try:
        repo().get_git_ref(ref_name).edit(new_commit.sha)
    except github.GithubException as err:
        action_error(f"Failed to mark syndicated posts: {err}")
        return None
    ## NOTE Need to update the reference SHA for future workflow steps.
    action_setenv('SYNDICATE_SHA', new_commit.sha)
    action_log("Syndicate posts marked.")
Пример #35
0
    def test_no_handler(self):
        "default to YAMLHandler when no handler is attached"
        post = frontmatter.load('tests/hello-world.markdown')
        del post.handler

        text = frontmatter.dumps(post)
        self.assertIsInstance(
            frontmatter.detect_format(text, frontmatter.handlers), 
            YAMLHandler)
Пример #36
0
 def dump_posts(self):
     self.stdout.write('更新 post...')
     post_path = os.path.join(app_config.path, 'posts')
     for post in tqdm(self.blog_site.posts.values()):
         with open(os.path.join(post_path, post.filename), mode='wb') as f:
             content = frontmatter.dumps(post,
                                         handler=RuamelYamlHandler(),
                                         allow_unicode=True)
             f.write((content + '\n').encode('utf-8'))
Пример #37
0
def _process_items(items, output):
    res = []
    for key, data in items:
        matter = _build_item(data)
        fp = os.path.join(output, '%s.md' % key.lower())
        with open(fp, 'w') as f:
            f.write(frontmatter.dumps(matter))
        res.append((key, fp))
    return res
Пример #38
0
 def create_post(self, filename, data):
     with self.repository.open(filename, 'w+') as fp:
         post = frontmatter.load(fp)
         if 'metadata' in data:
             post.metadata = data['metadata']
         if 'content' in data:
             #TODO: parse from media
             post.content = data['content']
         fp.write(frontmatter.dumps(post))
         return filename
Пример #39
0
    def sanity_check(self, filename, handler_type):
        "Ensure we can load -> dump -> load"
        post = frontmatter.load(filename)

        self.assertIsInstance(post.handler, handler_type)

        # dump and reload
        repost = frontmatter.loads(frontmatter.dumps(post))

        self.assertEqual(post.metadata, repost.metadata)
        self.assertEqual(post.content, repost.content)
        self.assertEqual(post.handler, repost.handler)
Пример #40
0
def _add_tags_for_article(repofile, searcher):
    abs_article_file = os.path.join(current_app.yawt_root_dir, repofile)
    post = frontmatter.load(abs_article_file)
    if 'tags' not in post.metadata:
        keywords = [keyword for keyword, _
                    in searcher.key_terms_from_text("content", post.content,
                                                    numterms=3)]
        keyword_str = ",".join(keywords)
        usertags = input('Enter tags (default '+keyword_str+'): ')
        tags = usertags or keyword_str
        post['tags'] = tags
        save_file(abs_article_file, frontmatter.dumps(post))
Пример #41
0
 def edit_post(self, filename, data):
     # Replace post's data in file
     with self.repository.open(filename, 'r+') as fp:
         post = frontmatter.load(fp)
         if 'metadata' in data:
             post.metadata = data['metadata']
         if 'content' in data:
             post.content = data['content']
         fp.seek(0)
         fp.truncate()
         fp.write(frontmatter.dumps(post))
         return filename
Пример #42
0
    def test_unicode_post(self):
        "Ensure unicode is parsed correctly"
        chinese = frontmatter.load('tests/chinese.txt', 'utf-8')
        output = frontmatter.dumps(chinese)
        zh = "中文"

        self.assertTrue(isinstance(chinese.content, six.text_type))

        # check that we're dumping out unicode metadata, too
        self.assertTrue(zh in output)

        # this shouldn't work as ascii, because it's Hanzi
        self.assertRaises(UnicodeEncodeError, chinese.content.encode, 'ascii')
Пример #43
0
    def test_dump_to_file(self):
        "dump post to filename"
        post = frontmatter.load('tests/hello-world.markdown')

        tempdir = tempfile.mkdtemp()
        filename = os.path.join(tempdir, 'hello.md')
        frontmatter.dump(post, filename)

        with open(filename) as f:
            self.assertEqual(f.read(), frontmatter.dumps(post))

        # cleanup
        shutil.rmtree(tempdir)
Пример #44
0
    def test_pretty_dumping(self):
        "Use pyaml to dump nicer"
        # pyaml only runs on 2.7 and above
        if sys.version_info > (2, 6) and pyaml is not None:

            with codecs.open('tests/unpretty.md', 'r', 'utf-8') as f:
                data = f.read()

            post = frontmatter.load('tests/unpretty.md')
            yaml = pyaml.dump(post.metadata)

            # the unsafe dumper gives you nicer output, for times you want that
            dump = frontmatter.dumps(post, Dumper=pyaml.UnsafePrettyYAMLDumper)

            self.assertEqual(dump, data)
            self.assertTrue(yaml in dump)
Пример #45
0
def create_block(message, block_path):
    block = frontmatter.loads('')
    last_block_hash = get_last_block_hash(block_path)
    if last_block_hash:
        block['prev'] = last_block_hash
    block['created_at'] = datetime.datetime.now().isoformat()
    block['nonce'] = 0
    block.content = message
    while True:
        block['nonce'] += 1
        block_hash = hashlib.sha256(
            frontmatter.dumps(block).encode()
        ).hexdigest()
        if block_hash[:2] == '00':
            break
    return block, block_hash
Пример #46
0
def _add_tags_for_indexed_article(indexed_file, edit):
    root_dir = current_app.yawt_root_dir
    if not indexed_file.startswith(content_folder()):
        print("file must be in content folder")
        return

    searcher = _whoosh().searcher
    docnums = searcher.document_numbers(fullname=fullname(indexed_file))
    keywords = [keyword for keyword, _
                in searcher.key_terms(docnums, "content", numterms=3)]
    keyword_str = ",".join(keywords)
    print("Tags: "+keyword_str)
    if edit:
        abs_article_file = os.path.join(root_dir, indexed_file)
        post = frontmatter.load(abs_article_file)
        post['tags'] = keyword_str
        save_file(abs_article_file, frontmatter.dumps(post))
Пример #47
0
def main():
    feed_url = 'http://blog.marksteve.com/feed'
    page = 1
    while True:
        d = feedparser.parse('{}?page={}'.format(feed_url, page))
        if not d.entries:
            break
        for entry in d.entries:
            id = entry.link.rsplit('/', 1)[1]
            body = html2text(entry.content[0]['value'])
            title = entry.title
            y, m, d = entry.published_parsed[:3]
            publish_date = '{}-{:02d}-{:02d}'.format(y, m, d)
            post = frontmatter.Post(body, title=title,
                                    publish_date=publish_date)
            with open('posts/{}.md'.format(id), 'wb') as f:
                f.write(frontmatter.dumps(post).encode('utf8'))
        page += 1
Пример #48
0
    def test_external(self):
        filename = self.data['filename']
        content = self.data['content']
        metadata = self.data['metadata']
        content_stripped = content.strip()

        post = frontmatter.load(filename)

        self.assertEqual(post.content, content_stripped)
        for k, v in metadata.items():
            self.assertEqual(post[k], v)

        # dumps and then loads to ensure round trip conversions.
        posttext = frontmatter.dumps(post, handler=self.handler)
        post_2 = frontmatter.loads(posttext)

        for k in post.metadata:
            self.assertEqual(post.metadata[k], post_2.metadata[k])

        self.assertEqual(post.content, post_2.content)
Пример #49
0
def yaml_parse_file(args, initial_content):
    message = None

    if args.file and args.file != "-":
        if not os.path.isfile(args.file):
            raise Exception("File %s does not exist!" % args.file)
        with open(args.file) as fp:
            message = fp.read()
    elif args.file == "-":
        message = sys.stdin.read()
    else:
        import tempfile
        from subprocess import call
        EDITOR = os.environ.get('EDITOR', 'vim')
        prefix = ""
        if "permlink" in initial_content.metadata:
            prefix = initial_content.metadata["permlink"]
        with tempfile.NamedTemporaryFile(
            suffix=b".md",
            prefix=bytes("piston-" + prefix, 'ascii'),
            delete=False
        ) as fp:
            fp.write(bytes(frontmatter.dumps(initial_content), 'utf-8'))
            fp.flush()
            call([EDITOR, fp.name])
            fp.seek(0)
            message = fp.read().decode('utf-8')

    try :
        meta, body = frontmatter.parse(message)
    except:
        meta = initial_content.metadata
        body = message

    # make sure that at least the metadata keys of initial_content are
    # present!
    for key in initial_content.metadata:
        if key not in meta:
            meta[key] = initial_content.metadata[key]

    return meta, body
Пример #50
0
        created = curr_time()
        pardir = created[:7]  # 取年月 2018-02 为父文件夹
        metadata = {'created': created, 'title': title}
        if args.setup_comment:
            label_ = COMMENT_PREFIX + '_' + created
            gh_client = GithubClient(REPO)
            gh_client.create_label(label_)
            source_md = '[{title}](../blob/{branch}/posts/{pardir}/{created}.md)'.format(
                title=title, branch=BRANCH, pardir=pardir, created=created
            )
            body = source_md + ' ' + COMMENT_BODY
            labels = [COMMENT_PREFIX, label_]
            issue_title = '[{}] {}'.format(COMMENT_PREFIX, title)
            issue = gh_client.create_issue(issue_title, body, labels)
            print('created issue with title: {}, id: {}'.format(issue.title, issue.number))
            metadata = {'created': created, 'title': title, 'issue_id': issue.number}
        content = '# ' + title
        yaml_content = YamlContent(metadata, content)
        post_dir = os.path.join(POSTSDIR, pardir)
        post_fn = os.path.join(post_dir, created + '.md')
        mkdir_p(post_dir)
        post_md = frontmatter.dumps(yaml_content, allow_unicode=True)
        with open(post_fn, 'w', encoding='utf-8') as f:
            print('create post file {}...'.format(post_fn))
            f.write(post_md)
        # update summary
        update_summary(ROOTDIR)

    if args.fix_summary:
        update_summary(ROOTDIR)
Пример #51
0
 def add_guesses_to_page(self, team, data, guesses, doit):
 
     fm, team_path = self.get_team_data(team)
     
     fm['team']['links']['GitHub'] = data['html_url']
     if 'Github' in fm['team']['links']:
         del fm['team']['links']['Github']
     if 'github' in fm['team']['links']:
         del fm['team']['links']['github']
     
     if data['blog']:
         fm['team']['links']['Website'] = data['blog']
     
     if guesses:
         robot_code = fm.metadata.setdefault('robot_code', collections.OrderedDict())
         need_sort = False
         changed = False
         
         for year in sorted(guesses.keys()):
             types = guesses[year]
             iyear = int(year)
             
             need_sort = need_sort or iyear not in robot_code
             
             existing = None
                             
             for ctype in sorted(types.keys()):
                 # should only be one of each type
                 repos = types[ctype]
                 if not repos:
                     continue
                 
                 # Don't create until we have to
                 if existing is None:
                     existing = robot_code.setdefault(iyear, [collections.OrderedDict()])
                     existing_types = existing[0]
             
                 
                 repo = repos[0]
                 
                 # if the type already exists, and URL is different, output a warning, but do not change
                 existing_type = existing_types.get(ctype)
                 if existing_type:
                     if existing_type[0] != repo['html_url']:
                         pass
                 
                     # if the URL already exists, then don't change the language
                 else:
                     # if it doesn't exist, set it
                     existing_types[ctype] = [repo['html_url'], self._convert_language(repo['language'])]
                     changed = True
     
         if need_sort:
             fm.metadata['robot_code'] = collections.OrderedDict(sorted((k, v) for k, v in robot_code.items()))
             
         if changed:
             # write it out to file
             fcontents = frontmatter.dumps(fm)
             
             if not doit:
                 with open(team_path) as fp:
                     old_contents = fp.read()
                 
                 print("Would write to file:")
                 print(fcontents)
                 
                 print("Diff")
                 for line in difflib.unified_diff(old_contents.splitlines(), fcontents.splitlines(),
                                                  fromfile='old_contents', tofile='new_contents'):
                     print(line)
                 
                 doit = yesnoedit("Write it (or write and edit)?")
                 
             if doit:
                 print("Writing to file")
                 
                 with open(team_path, 'w') as fp:
                     fp.write(fcontents)
                     
                 if doit == 'edit':
                     os.system('"%s" "%s"' % (os.environ.get("EDITOR", "vi"), team_path))
         else:
             print("No changes detected")
Пример #52
0
 def add_guesses_to_page(self, team, data, guesses, doit):
 
     p = 'frc%04d' % (int(int(team)/1000)*1000)
     sp = '%03d' % (int(int(team)/100)*100)
     
     team_path = abspath(join(dirname(__file__), '..', p, '_frc', sp, '%s.md' % team))
     print("Path:", team_path)
 
     fm = frontmatter.load(team_path)
     
     fm['team']['links']['Github'] = data['html_url']
     
     if data['blog']:
         fm['team']['links']['Website'] = data['blog']
     
     if guesses:
         robot_code = fm.metadata.setdefault('robot_code', collections.OrderedDict())
         need_sort = False
         changed = False
         
         for year in sorted(guesses.keys()):
             types = guesses[year]
             iyear = int(year)
             
             need_sort = need_sort or iyear not in robot_code
             
             existing = None
                             
             for ctype in sorted(types.keys()):
                 # should only be one of each type
                 repos = types[ctype]
                 if not repos:
                     continue
                 
                 # Don't create until we have to
                 if existing is None:
                     existing = robot_code.setdefault(iyear, [collections.OrderedDict()])
                     existing_types = existing[0]
             
                 
                 repo = repos[0]
                 
                 # if the type already exists, and URL is different, output a warning, but do not change
                 existing_type = existing_types.get(ctype)
                 if existing_type:
                     if existing_type[0] != repo['html_url']:
                         pass
                 
                     # if the URL already exists, then don't change the language
                 else:
                     # if it doesn't exist, set it
                     existing_types[ctype] = [repo['html_url'], repo['language']]
                     changed = True
     
         if need_sort:
             fm.metadata['robot_code'] = collections.OrderedDict(sorted((k, v) for k, v in robot_code.items()))
             
         if changed:
             # write it out to file
             fcontents = frontmatter.dumps(fm)
             
             if not doit:
                 print("Would write to file:")
                 print(fcontents)
                 
                 doit = yesno("Write it?")
                 
             if doit:
                 print("Writing to file")
                 
                 with open(team_path, 'w') as fp:
                     fp.write(fcontents)
         else:
             print("No changes detected")
Пример #53
0
def main() :
    global args

    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter,
        description="Command line tool to interact with the Steem network"
    )

    """
        Default settings for all tools
    """
    parser.add_argument(
        '--node',
        type=str,
        default=config["node"],
        help='Websocket URL for public Steem API (default: "wss://this.piston.rocks/")'
    )
    parser.add_argument(
        '--rpcuser',
        type=str,
        default=config["rpcuser"],
        help='Websocket user if authentication is required'
    )
    parser.add_argument(
        '--rpcpassword',
        type=str,
        default=config["rpcpassword"],
        help='Websocket password if authentication is required'
    )
    parser.add_argument(
        '--nobroadcast',
        action='store_true',
        help='Do not broadcast anything'
    )
    parser.add_argument(
        '--verbose', '-v',
        type=int,
        default=3,
        help='Verbosity'
    )
    subparsers = parser.add_subparsers(help='sub-command help')

    """
        Command "set"
    """
    setconfig = subparsers.add_parser('set', help='Set configuration')
    setconfig.add_argument(
        'key',
        type=str,
        choices=availableConfigurationKeys,
        help='Configuration key'
    )
    setconfig.add_argument(
        'value',
        type=str,
        help='Configuration value'
    )
    setconfig.set_defaults(command="set")

    """
        Command "config"
    """
    configconfig = subparsers.add_parser('config', help='show local configuration')
    configconfig.set_defaults(command="config")

    """
        Command "changewalletpassphrase"
    """
    changepasswordconfig = subparsers.add_parser('changewalletpassphrase', help='Change wallet password')
    changepasswordconfig.set_defaults(command="changewalletpassphrase")

    """
        Command "addkey"
    """
    addkey = subparsers.add_parser('addkey', help='Add a new key to the wallet')
    addkey.add_argument(
        'wifkeys',
        nargs='*',
        type=str,
        help='the private key in wallet import format (wif)'
    )
    addkey.set_defaults(command="addkey")

    """
        Command "delkey"
    """
    delkey = subparsers.add_parser('delkey', help='Delete keys from the wallet')
    delkey.add_argument(
        'pub',
        nargs='*',
        type=str,
        help='the public key to delete from the wallet'
    )
    delkey.set_defaults(command="delkey")

    """
        Command "getkey"
    """
    getkey = subparsers.add_parser('getkey', help='Dump the privatekey of a pubkey from the wallet')
    getkey.add_argument(
        'pub',
        type=str,
        help='the public key for which to show the private key'
    )
    getkey.set_defaults(command="getkey")

    """
        Command "listkeys"
    """
    listkeys = subparsers.add_parser('listkeys', help='List available keys in your wallet')
    listkeys.set_defaults(command="listkeys")

    """
        Command "listaccounts"
    """
    listaccounts = subparsers.add_parser('listaccounts', help='List available accounts in your wallet')
    listaccounts.set_defaults(command="listaccounts")

    """
        Command "list"
    """
    parser_list = subparsers.add_parser('list', help='List posts on Steem')
    parser_list.set_defaults(command="list")
    parser_list.add_argument(
        '--start',
        type=str,
        help='Start list from this identifier (pagination)'
    )
    parser_list.add_argument(
        '--category',
        type=str,
        help='Only posts with in this category'
    )
    parser_list.add_argument(
        '--sort',
        type=str,
        default=config["list_sorting"],
        choices=["trending", "created", "active", "cashout", "payout", "votes", "children", "hot"],
        help='Sort posts'
    )
    parser_list.add_argument(
        '--limit',
        type=int,
        default=config["limit"],
        help='Limit posts by number'
    )

    """
        Command "categories"
    """
    parser_categories = subparsers.add_parser('categories', help='Show categories')
    parser_categories.set_defaults(command="categories")
    parser_categories.add_argument(
        '--sort',
        type=str,
        default=config["categories_sorting"],
        choices=["trending", "best", "active", "recent"],
        help='Sort categories'
    )
    parser_categories.add_argument(
        'category',
        nargs="?",
        type=str,
        help='Only categories used by this author'
    )
    parser_categories.add_argument(
        '--limit',
        type=int,
        default=config["limit"],
        help='Limit categories by number'
    )

    """
        Command "read"
    """
    parser_read = subparsers.add_parser('read', help='Read a post on Steem')
    parser_read.set_defaults(command="read")
    parser_read.add_argument(
        'post',
        type=str,
        help='@author/permlink-identifier of the post to read (e.g. @xeroc/python-steem-0-1)'
    )
    parser_read.add_argument(
        '--full',
        action='store_true',
        help='Show full header information (YAML formated)'
    )
    parser_read.add_argument(
        '--comments',
        action='store_true',
        help='Also show all comments'
    )
    parser_read.add_argument(
        '--parents',
        type=int,
        default=0,
        help='Show x parents for the reply'
    )
    parser_read.add_argument(
        '--format',
        type=str,
        default=config["format"],
        help='Format post',
        choices=["markdown", "raw"],
    )

    """
        Command "post"
    """
    parser_post = subparsers.add_parser('post', help='Post something new')
    parser_post.set_defaults(command="post")
    parser_post.add_argument(
        '--author',
        type=str,
        required=False,
        default=config["default_author"],
        help='Publish post as this user (requires to have the key installed in the wallet)'
    )
    parser_post.add_argument(
        '--permlink',
        type=str,
        required=False,
        help='The permlink (together with the author identifies the post uniquely)'
    )
    parser_post.add_argument(
        '--category',
        default=config["post_category"],
        type=str,
        help='Specify category'
    )
    parser_post.add_argument(
        '--title',
        type=str,
        required=False,
        help='Title of the post'
    )
    parser_post.add_argument(
        '--file',
        type=str,
        default=None,
        help='Filename to open. If not present, or "-", stdin will be used'
    )

    """
        Command "reply"
    """
    reply = subparsers.add_parser('reply', help='Reply to an existing post')
    reply.set_defaults(command="reply")
    reply.add_argument(
        'replyto',
        type=str,
        help='@author/permlink-identifier of the post to reply to (e.g. @xeroc/python-steem-0-1)'
    )
    reply.add_argument(
        '--author',
        type=str,
        required=False,
        default=config["default_author"],
        help='Publish post as this user (requires to have the key installed in the wallet)'
    )
    reply.add_argument(
        '--permlink',
        type=str,
        required=False,
        help='The permlink (together with the author identifies the post uniquely)'
    )
    reply.add_argument(
        '--title',
        type=str,
        required=False,
        help='Title of the post'
    )
    reply.add_argument(
        '--file',
        type=str,
        required=False,
        help='Send file as responds. If "-", read from stdin'
    )

    """
        Command "edit"
    """
    parser_edit = subparsers.add_parser('edit', help='Edit to an existing post')
    parser_edit.set_defaults(command="edit")
    parser_edit.add_argument(
        'post',
        type=str,
        help='@author/permlink-identifier of the post to edit to (e.g. @xeroc/python-steem-0-1)'
    )
    parser_edit.add_argument(
        '--author',
        type=str,
        required=False,
        default=config["default_author"],
        help='Post an edit as another author'
    )
    parser_edit.add_argument(
        '--file',
        type=str,
        required=False,
        help='Patch with content of this file'
    )
    parser_edit.add_argument(
        '--replace',
        action='store_true',
        help="Don't patch but replace original post (will make you lose votes)"
    )

    """
        Command "upvote"
    """
    parser_upvote = subparsers.add_parser('upvote', help='Upvote a post')
    parser_upvote.set_defaults(command="upvote")
    parser_upvote.add_argument(
        'post',
        type=str,
        help='@author/permlink-identifier of the post to upvote to (e.g. @xeroc/python-steem-0-1)'
    )
    parser_upvote.add_argument(
        '--voter',
        type=str,
        required=False,
        default=config["default_voter"],
        help='The voter account name'
    )
    parser_upvote.add_argument(
        '--weight',
        type=float,
        default=config["default_vote_weight"],
        required=False,
        help='Actual weight (from 0.1 to 100.0)'
    )

    """
        Command "downvote"
    """
    parser_downvote = subparsers.add_parser('downvote', help='Downvote a post')
    parser_downvote.set_defaults(command="downvote")
    parser_downvote.add_argument(
        '--voter',
        type=str,
        default=config["default_voter"],
        help='The voter account name'
    )
    parser_downvote.add_argument(
        'post',
        type=str,
        help='@author/permlink-identifier of the post to downvote to (e.g. @xeroc/python-steem-0-1)'
    )
    parser_downvote.add_argument(
        '--weight',
        type=float,
        default=config["default_vote_weight"],
        required=False,
        help='Actual weight (from 0.1 to 100.0)'
    )

    """
        Command "replies"
    """
    replies = subparsers.add_parser('replies', help='Show recent replies to your posts')
    replies.set_defaults(command="replies")
    replies.add_argument(
        '--author',
        type=str,
        required=False,
        default=config["default_author"],
        help='Show replies to this author'
    )
    replies.add_argument(
        '--limit',
        type=int,
        default=config["limit"],
        help='Limit posts by number'
    )

    """
        Command "transfer"
    """
    parser_transfer = subparsers.add_parser('transfer', help='Transfer STEEM')
    parser_transfer.set_defaults(command="transfer")
    parser_transfer.add_argument(
        'to',
        type=str,
        help='Recepient'
    )
    parser_transfer.add_argument(
        'amount',
        type=float,
        help='Amount to transfer'
    )
    parser_transfer.add_argument(
        'asset',
        type=str,
        choices=["STEEM", "SBD"],
        help='Asset to (i.e. STEEM or SDB)'
    )
    parser_transfer.add_argument(
        'memo',
        type=str,
        nargs="?",
        default="",
        help='Optional memo'
    )
    parser_transfer.add_argument(
        '--account',
        type=str,
        required=False,
        default=config["default_author"],
        help='Transfer from this account'
    )

    """
        Command "powerup"
    """
    parser_powerup = subparsers.add_parser('powerup', help='Power up (vest STEEM as STEEM POWER)')
    parser_powerup.set_defaults(command="powerup")
    parser_powerup.add_argument(
        'amount',
        type=str,
        help='Amount to powerup including asset (e.g.: 100.000 STEEM)'
    )
    parser_powerup.add_argument(
        '--account',
        type=str,
        required=False,
        default=config["default_author"],
        help='Powerup from this account'
    )
    parser_powerup.add_argument(
        '--to',
        type=str,
        required=False,
        default=config["default_author"],
        help='Powerup this account'
    )

    """
        Command "powerdown"
    """
    parser_powerdown = subparsers.add_parser('powerdown', help='Power down (start withdrawing STEEM from STEEM POWER)')
    parser_powerdown.set_defaults(command="powerdown")
    parser_powerdown.add_argument(
        'amount',
        type=str,
        help='Amount to powerdown including asset (e.g.: 100.000 VESTS)'
    )
    parser_powerdown.add_argument(
        '--account',
        type=str,
        required=False,
        default=config["default_author"],
        help='powerdown from this account'
    )

    """
        Command "powerdownroute"
    """
    parser_powerdownroute = subparsers.add_parser('powerdownroute', help='Setup a powerdown route')
    parser_powerdownroute.set_defaults(command="powerdownroute")
    parser_powerdownroute.add_argument(
        'to',
        type=str,
        default=config["default_author"],
        help='The account receiving either VESTS/SteemPower or STEEM.'
    )
    parser_powerdownroute.add_argument(
        '--percentage',
        type=float,
        default=100,
        help='The percent of the withdraw to go to the "to" account'
    )
    parser_powerdownroute.add_argument(
        '--account',
        type=str,
        default=config["default_author"],
        help='The account which is powering down'
    )
    parser_powerdownroute.add_argument(
        '--auto_vest',
        action='store_true',
        help=('Set to true if the from account should receive the VESTS as'
              'VESTS, or false if it should receive them as STEEM.')
    )

    """
        Command "balance"
    """
    parser_balance = subparsers.add_parser('balance', help='Show the balance of one more more accounts')
    parser_balance.set_defaults(command="balance")
    parser_balance.add_argument(
        'account',
        type=str,
        nargs="*",
        default=config["default_author"],
        help='balance of these account (multiple accounts allowed)'
    )

    """
        Command "history"
    """
    parser_history = subparsers.add_parser('history', help='Show the history of an account')
    parser_history.set_defaults(command="history")
    parser_history.add_argument(
        'account',
        type=str,
        nargs="?",
        default=config["default_author"],
        help='History of this account'
    )
    parser_history.add_argument(
        '--limit',
        type=int,
        default=config["limit"],
        help='Limit number of entries'
    )
    parser_history.add_argument(
        '--end',
        type=int,
        default=99999999999999,
        help='Transactioon numer (#) of the last transaction to show.'
    )
    parser_history.add_argument(
        '--types',
        type=str,
        nargs="*",
        default=[],
        help='Show only these operation types'
    )

    """
        Command "interest"
    """
    interest = subparsers.add_parser('interest', help='Get information about interest payment')
    interest.set_defaults(command="interest")
    interest.add_argument(
        'account',
        type=str,
        nargs="*",
        default=config["default_author"],
        help='Inspect these accounts'
    )

    """
        Command "web"
    """
    webconfig = subparsers.add_parser('web', help='Launch web version of piston')
    webconfig.set_defaults(command="web")
    webconfig.add_argument(
        '--port',
        type=int,
        default=config["web:port"],
        help='Port to open for internal web requests'
    )
    webconfig.add_argument(
        '--host',
        type=str,
        default=config["web:host"],
        help='Host address to listen to'
    )

    """
        Parse Arguments
    """
    args = parser.parse_args()

    # Logging
    log = logging.getLogger("piston")
    verbosity = ["critical",
                 "error",
                 "warn",
                 "info",
                 "debug"][int(min(args.verbose, 4))]
    log.setLevel(getattr(logging, verbosity.upper()))
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    ch = logging.StreamHandler()
    ch.setLevel(getattr(logging, verbosity.upper()))
    ch.setFormatter(formatter)
    log.addHandler(ch)

    # GrapheneAPI logging
    if args.verbose > 4:
        verbosity = ["critical",
                     "error",
                     "warn",
                     "info",
                     "debug"][int(min((args.verbose - 4), 4))]
        gphlog = logging.getLogger("graphenebase")
        gphlog.setLevel(getattr(logging, verbosity.upper()))
        gphlog.addHandler(ch)
    if args.verbose > 8:
        verbosity = ["critical",
                     "error",
                     "warn",
                     "info",
                     "debug"][int(min((args.verbose - 8), 4))]
        gphlog = logging.getLogger("grapheneapi")
        gphlog.setLevel(getattr(logging, verbosity.upper()))
        gphlog.addHandler(ch)

    if not hasattr(args, "command"):
        parser.print_help()
        sys.exit(2)

    # We don't require RPC for these commands
    rpc_not_required = [
        "set",
        "config",
        "web"
        ""]
    if args.command not in rpc_not_required and args.command:
        steem = Steem(
            node=args.node,
            rpcuser=args.rpcuser,
            rpcpassword=args.rpcpassword,
            nobroadcast=args.nobroadcast,
        )

    if args.command == "set":
        config[args.key] = args.value

    elif args.command == "config":
        t = PrettyTable(["Key", "Value"])
        t.align = "l"
        for key in config:
            if key in availableConfigurationKeys:  # hide internal config data
                t.add_row([key, config[key]])
        print(t)

    elif args.command == "changewalletpassphrase":
        steem.wallet.changePassphrase()

    elif args.command == "addkey":
        pub = None
        if len(args.wifkeys):
            for wifkey in args.wifkeys:
                pub = (steem.wallet.addPrivateKey(wifkey))
                if pub:
                    print(pub)
        else:
            import getpass
            wifkey = ""
            while True:
                wifkey = getpass.getpass('Private Key (wif) [Enter to quit]:')
                if not wifkey:
                    break
                pub = (steem.wallet.addPrivateKey(wifkey))
                if pub:
                    print(pub)

        if pub:
            name = steem.wallet.getAccountFromPublicKey(pub)
            print("Setting new default user: %s" % name)
            print("You can change these settings with:")
            print("    piston set default_author x")
            print("    piston set default_voter x")
            config["default_author"] = name
            config["default_voter"] = name

    elif args.command == "delkey":
        for pub in args.pub:
            steem.wallet.removePrivateKeyFromPublicKey(pub)

    elif args.command == "getkey":
        print(steem.wallet.getPrivateKeyForPublicKey(args.pub))

    elif args.command == "listkeys":
        t = PrettyTable(["Available Key"])
        t.align = "l"
        for key in steem.wallet.getPublicKeys():
            t.add_row([key])
        print(t)

    elif args.command == "listaccounts":
        t = PrettyTable(["Name", "Type", "Available Key"])
        t.align = "l"
        for account in steem.wallet.getAccounts():
            t.add_row([
                account["name"] or "n/a",
                account["type"] or "n/a",
                account["pubkey"]
            ])
        print(t)

    elif args.command == "reply":
        from textwrap import indent
        parent = steem.get_content(args.replyto)
        if parent["id"] == "0.0.0":
            print("Can't find post %s" % args.replyto)
            return

        reply_message = indent(parent["body"], "> ")

        post = frontmatter.Post(reply_message, **{
            "title": args.title if args.title else "Re: " + parent["title"],
            "author": args.author if args.author else "required",
            "replyto": args.replyto,
        })

        meta, json_meta, message = yaml_parse_file(args, initial_content=post)

        for required in ["author", "title"]:
            if (required not in meta or
                    not meta[required] or
                    meta[required] == "required"):
                print("'%s' required!" % required)
                # TODO, instead of terminating here, send the user back
                # to the EDITOR
                return

        pprint(steem.reply(
            meta["replyto"],
            message,
            title=meta["title"],
            author=meta["author"],
            meta=json_meta,
        ))

    elif args.command == "post" or args.command == "yaml":
        post = frontmatter.Post("", **{
            "title": args.title if args.title else "required",
            "author": args.author if args.author else "required",
            "category": args.category if args.category else "required",
        })

        meta, json_meta, body = yaml_parse_file(args, initial_content=post)

        if not body:
            print("Empty body! Not posting!")
            return

        for required in ["author", "title", "category"]:
            if (required not in meta or
                    not meta[required] or
                    meta[required] == "required"):
                print("'%s' required!" % required)
                # TODO, instead of terminating here, send the user back
                # to the EDITOR
                return

        pprint(steem.post(
            meta["title"],
            body,
            author=meta["author"],
            category=meta["category"],
            meta=json_meta,
        ))

    elif args.command == "edit":
        original_post = steem.get_content(args.post)

        edited_message = None
        if original_post["id"] == "0.0.0":
            print("Can't find post %s" % args.post)
            return

        post = frontmatter.Post(original_post["body"], **{
            "title": original_post["title"] + " (immutable)",
            "author": original_post["author"] + " (immutable)"
        })

        meta, json_meta, edited_message = yaml_parse_file(args, initial_content=post)
        pprint(steem.edit(
            args.post,
            edited_message,
            replace=args.replace,
            meta=json_meta,
        ))

    elif args.command == "upvote" or args.command == "downvote":
        if args.command == "downvote":
            weight = -float(args.weight)
        else:
            weight = +float(args.weight)
        if not args.voter:
            print("Not voter provided!")
            return
        pprint(steem.vote(
            args.post,
            weight,
            voter=args.voter
        ))

    elif args.command == "read":
        post_author, post_permlink = resolveIdentifier(args.post)

        if args.parents:
            # FIXME inconsistency, use @author/permlink instead!
            dump_recursive_parents(
                steem.rpc,
                post_author,
                post_permlink,
                args.parents,
                format=args.format
            )

        if not args.comments and not args.parents:
            post = steem.get_content(args.post)

            if post["id"] == "0.0.0":
                print("Can't find post %s" % args.post)
                return
            if args.format == "markdown":
                body = markdownify(post["body"])
            else:
                body = post["body"]

            if args.full:
                meta = {}
                for key in post:
                    if key in ["steem", "body"]:
                        continue
                    meta[key] = post[key]
                yaml = frontmatter.Post(body, **meta)
                print(frontmatter.dumps(yaml))
            else:
                print(body)

        if args.comments:
            dump_recursive_comments(
                steem.rpc,
                post_author,
                post_permlink,
                format=args.format
            )

    elif args.command == "categories":
        categories = steem.get_categories(
            sort=args.sort,
            begin=args.category,
            limit=args.limit
        )
        print(args.sort)
        print(args.category)
        print(args.limit)
        t = PrettyTable(["name", "discussions", "payouts"])
        t.align = "l"
        for category in categories:
            t.add_row([
                category["name"],
                category["discussions"],
                category["total_payouts"],
            ])
        print(t)

    elif args.command == "list":
        list_posts(
            steem.get_posts(
                limit=args.limit,
                sort=args.sort,
                category=args.category,
                start=args.start
            )
        )

    elif args.command == "replies":
        if not args.author:
            print("Please specify an author via --author\n "
                  "or define your default author with:\n"
                  "   piston set default_author x")
        else:
            discussions = steem.get_replies(args.author)
            list_posts(discussions[0:args.limit])

    elif args.command == "transfer":
        pprint(steem.transfer(
            args.to,
            args.amount,
            args.asset,
            memo=args.memo,
            account=args.account
        ))

    elif args.command == "powerup":
        pprint(steem.transfer_to_vesting(
            args.amount,
            account=args.account,
            to=args.to
        ))

    elif args.command == "powerdown":
        pprint(steem.withdraw_vesting(
            args.amount,
            account=args.account,
        ))

    elif args.command == "powerdownroute":
        pprint(steem.set_withdraw_vesting_route(
            args.to,
            percentage=args.percentage,
            account=args.account,
            auto_vest=args.auto_vest
        ))

    elif args.command == "balance":
        t = PrettyTable(["Account", "STEEM", "SBD", "VESTS", "VESTS (in STEEM)"])
        t.align = "r"
        if isinstance(args.account, str):
            args.account = [args.account]
        for a in args.account:
            b = steem.get_balances(a)
            t.add_row([
                a,
                b["balance"],
                b["sbd_balance"],
                b["vesting_shares"],
                b["vesting_shares_steem"]
            ])
        print(t)

    elif args.command == "history":
        import json
        t = PrettyTable(["#", "time/block", "Operation", "Details"])
        t.align = "r"
        if isinstance(args.account, str):
            args.account = [args.account]
        if isinstance(args.types, str):
            args.types = [args.types]

        for a in args.account:
            for b in steem.loop_account_history(
                a,
                args.end,
                limit=args.limit,
                only_ops=args.types
            ):
                t.add_row([
                    b[0],
                    "%s (%s)" % (b[1]["timestamp"], b[1]["block"]),
                    b[1]["op"][0],
                    format_operation_details(b[1]["op"]),
                ])
        print(t)

    elif args.command == "interest":
        t = PrettyTable(["Account",
                         "Last Interest Payment",
                         "Next Payment",
                         "Interest rate",
                         "Interest"])
        t.align = "r"
        if isinstance(args.account, str):
            args.account = [args.account]
        for a in args.account:
            i = steem.interest(a)

            t.add_row([
                a,
                i["last_payment"],
                i["next_payment"],
                "%.1f%%" % i["interest_rate"],
                "%.3f SBD" % i["interest"],
            ])
        print(t)

    elif args.command == "web":
        from .web_steem import WebSteem
        # WebSteem is a static class that ensures that
        # the steem connection is a singelton
        WebSteem(args.node,
                 args.rpcuser,
                 args.rpcpassword,
                 args.nobroadcast)
        from . import web
        web.run(port=args.port, host=args.host)

    else:
        print("No valid command given")
Пример #54
0
 def fcontent(self):
     return frontmatter.dumps(self.frontmatter)
Пример #55
0
def main():

    # input is teams csv datafile from TBA
    # -> https://github.com/the-blue-alliance/the-blue-alliance-data
    csv_fname = abspath(sys.argv[1])
    max_team = int(sys.argv[2])
    mode = sys.argv[3]
    
    if mode not in ['new', 'update']:
        print("Error: invalid mode")
        return

    os.chdir(abspath(join(dirname(__file__), '..')))
    cwd = os.getcwd()
    
    for row in read_team_csv(csv_fname):
        # this changes on occasion...
        number, name, sponsors, l1, l2, l3, website, rookie_year, \
            facebook, twitter, youtube, github, instagram, periscope = row
        
        name = name
        rookie_year = rookie_year
        
        if rookie_year:
            rookie_year = int(rookie_year)
        
        number = number[3:]
        if int(number) > max_team:
            continue
        
        d1 = '%04d' % (int(int(number)/1000)*1000,)
        d2 = '%03d' % (int(int(number)/100)*100,)
        
        f = join(cwd, 'frc%s' % d1, '_frc', d2, '%s.md' % number)
        
        if mode == 'new' and exists(f):
            continue
        
        if 'firstinspires' in website:
            website = ''
        
        if l3:
            location = '%s, %s, %s' % (l1, l2, l3)
        elif l2:
            location = '%s, %s' % (l1, l2)
        else:
            location = l1
            
        sponsors = [s.strip() for s in sponsors.split('/')]
        if sponsors == ['']:
            sponsors = None
        else:
            if '&' in sponsors[-1]:
                sN = sponsors[-1].split('&')
                del sponsors[-1]
                sponsors += [s.strip() for s in sN]
        
        if mode == 'update':
            try:
                fm = frontmatter.load(f)
            except:
                print("Error at %s" % f)
                raise
                
            reformatted = str(frontmatter.dumps(fm))
            
            if 'team' not in fm.metadata:
                raise Exception("Error in %s" % f)
                
            team = fm.metadata['team']
            if 'links' not in fm.metadata['team']:
                links = OrderedDict()
            else:
                links = fm.metadata['team']['links']
        else:
            data = OrderedDict()
            team = OrderedDict()
            links = OrderedDict()
            
            data['title'] = 'FRC Team %s' % number
            data['team'] = team
            
            team['type'] = 'FRC'
            team['number'] = int(number)
        
        add_maybe(team, 'name', name)
        add_maybe(team, 'rookie_year', rookie_year)
        add_maybe(team, 'location', location)
        
        if sponsors and mode != 'update':
            team['sponsors'] = sponsors
        
        if 'Github' in links:
            links['GitHub'] = links['Github']
            del links['Github']
        
        add_maybe_web(links, 'Website', website)
        add_maybe_web(links, 'Facebook', facebook)
        add_maybe_web(links, 'Twitter', twitter)
        add_maybe_web(links, 'YouTube', youtube)
        add_maybe_web(links, 'GitHub', github)
        add_maybe_web(links, 'Instagram', instagram)
        add_maybe_web(links, 'Periscope', periscope)
        
        if mode == 'update':
            
            if links:
                fm.metadata['team']['links'] = links
            
            if fm.content.strip() == 'No content has been added for this team':
                fm.content = '{% include remove_this_line_and_add_a_paragraph %}'
            
            page = str(frontmatter.dumps(fm))
            if reformatted == page:
                # don't make gratuitious changes
                continue
        
        elif mode == 'new':
            
            if links:
                team['links'] = links
        
            page = '---\n%s\n---\n\n{%% include remove_this_line_and_add_a_paragraph %%}\n' % (
                yaml.safe_dump(data)
            )
            
            # roundtrip through frontmatter to get the formatting consistent
            page = frontmatter.dumps(frontmatter.loads(page))
            
        if not exists(dirname(f)):
            os.makedirs(dirname(f))
            
        with open(f, 'w') as fp:
            fp.write(page)
Пример #56
0
def main() :
    global args
    config = Configuration()

    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter,
        description="Command line tool to interact with the Steem network"
    )

    """
        Default settings for all tools
    """
    parser.add_argument(
        '--node',
        type=str,
        default=config["node"],
        help='Websocket URL for public Steem API (default: "wss://this.piston.rocks/")'
    )
    parser.add_argument(
        '--rpcuser',
        type=str,
        default=config["rpcuser"],
        help='Websocket user if authentication is required'
    )
    parser.add_argument(
        '--rpcpassword',
        type=str,
        default=config["rpcpassword"],
        help='Websocket password if authentication is required'
    )
    parser.add_argument(
        '--nobroadcast',
        action='store_true',
        help='Do not broadcast anything'
    )
    parser.add_argument(
        '--verbose', '-v',
        type=int,
        default=3,
        help='Verbosity'
    )
    subparsers = parser.add_subparsers(help='sub-command help')

    """
        Command "set"
    """
    setconfig = subparsers.add_parser('set', help='Set configuration')
    setconfig.add_argument(
        'key',
        type=str,
        choices=["default_author",
                 "default_voter",
                 "node",
                 "rpcuser",
                 "rpcpassword",
                 "default_vote_weight",
                 "list_sorting",
                 "categories_sorting",
                 "limit",
                 "post_category"],
        help='Configuration key'
    )
    setconfig.add_argument(
        'value',
        type=str,
        help='Configuration value'
    )
    setconfig.set_defaults(command="set")

    """
        Command "config"
    """
    configconfig = subparsers.add_parser('config', help='show local configuration')
    configconfig.set_defaults(command="config")

    """
        Command "addkey"
    """
    addkey = subparsers.add_parser('addkey', help='Add a new key to the wallet')
    addkey.add_argument(
        'wifkeys',
        nargs='*',
        type=str,
        help='the private key in wallet import format (wif)'
    )
    addkey.set_defaults(command="addkey")

    """
        Command "listkeys"
    """
    listkeys = subparsers.add_parser('listkeys', help='List available keys in your wallet')
    listkeys.set_defaults(command="listkeys")

    """
        Command "listaccounts"
    """
    listaccounts = subparsers.add_parser('listaccounts', help='List available accounts in your wallet')
    listaccounts.set_defaults(command="listaccounts")

    """
        Command "list"
    """
    parser_list = subparsers.add_parser('list', help='List posts on Steem')
    parser_list.set_defaults(command="list")
    parser_list.add_argument(
        '--start',
        type=str,
        help='Start list from this identifier (pagination)'
    )
    parser_list.add_argument(
        '--category',
        type=str,
        help='Only posts with in this category'
    )
    parser_list.add_argument(
        '--sort',
        type=str,
        default=config["list_sorting"],
        choices=["trending", "created", "active", "cashout", "payout", "votes", "children", "hot"],
        help='Sort posts'
    )
    parser_list.add_argument(
        '--limit',
        type=int,
        default=config["limit"],
        help='Limit posts by number'
    )

    """
        Command "categories"
    """
    parser_categories = subparsers.add_parser('categories', help='Show categories')
    parser_categories.set_defaults(command="categories")
    parser_categories.add_argument(
        '--sort',
        type=str,
        default=config["categories_sorting"],
        choices=["trending", "best", "active", "recent"],
        help='Sort categories'
    )
    parser_categories.add_argument(
        'category',
        nargs="?",
        type=str,
        help='Only categories used by this author'
    )
    parser_categories.add_argument(
        '--limit',
        type=int,
        default=config["limit"],
        help='Limit categories by number'
    )

    """
        Command "read"
    """
    parser_read = subparsers.add_parser('read', help='Read a post on Steem')
    parser_read.set_defaults(command="read")
    parser_read.add_argument(
        'post',
        type=str,
        help='@author/permlink-identifier of the post to read (e.g. @xeroc/python-steem-0-1)'
    )
    parser_read.add_argument(
        '--full',
        action='store_true',
        help='Show full header information (YAML formated)'
    )
    parser_read.add_argument(
        '--comments',
        action='store_true',
        help='Also show all comments'
    )
    parser_read.add_argument(
        '--parents',
        type=int,
        default=0,
        help='Show x parents for the reply'
    )
    parser_read.add_argument(
        '--format',
        type=str,
        default=config["format"],
        help='Format post',
        choices=["markdown", "raw"],
    )

    """
        Command "post"
    """
    parser_post = subparsers.add_parser('post', help='Post something new')
    parser_post.set_defaults(command="post")
    parser_post.add_argument(
        '--author',
        type=str,
        required=False,
        default=config["default_author"],
        help='Publish post as this user (requires to have the key installed in the wallet)'
    )
    parser_post.add_argument(
        '--permlink',
        type=str,
        required=False,
        help='The permlink (together with the author identifies the post uniquely)'
    )
    parser_post.add_argument(
        '--category',
        default=config["post_category"],
        type=str,
        help='Specify category'
    )
    parser_post.add_argument(
        '--title',
        type=str,
        required=False,
        help='Title of the post'
    )
    parser_post.add_argument(
        '--file',
        type=str,
        default=None,
        help='Filename to open. If not present, or "-", stdin will be used'
    )

    """
        Command "reply"
    """
    reply = subparsers.add_parser('reply', help='Reply to an existing post')
    reply.set_defaults(command="reply")
    reply.add_argument(
        'replyto',
        type=str,
        help='@author/permlink-identifier of the post to reply to (e.g. @xeroc/python-steem-0-1)'
    )
    reply.add_argument(
        '--author',
        type=str,
        required=False,
        default=config["default_author"],
        help='Publish post as this user (requires to have the key installed in the wallet)'
    )
    reply.add_argument(
        '--permlink',
        type=str,
        required=False,
        help='The permlink (together with the author identifies the post uniquely)'
    )
    reply.add_argument(
        '--title',
        type=str,
        required=False,
        help='Title of the post'
    )
    reply.add_argument(
        '--file',
        type=str,
        required=False,
        help='Send file as responds. If "-", read from stdin'
    )

    """
        Command "edit"
    """
    parser_edit = subparsers.add_parser('edit', help='Edit to an existing post')
    parser_edit.set_defaults(command="edit")
    parser_edit.add_argument(
        'post',
        type=str,
        help='@author/permlink-identifier of the post to edit to (e.g. @xeroc/python-steem-0-1)'
    )
    parser_edit.add_argument(
        '--author',
        type=str,
        required=False,
        default=config["default_author"],
        help='Post an edit as another author'
    )
    parser_edit.add_argument(
        '--file',
        type=str,
        required=False,
        help='Patch with content of this file'
    )
    parser_edit.add_argument(
        '--replace',
        action='store_true',
        help="Don't patch but replace original post (will make you lose votes)"
    )

    """
        Command "upvote"
    """
    parser_upvote = subparsers.add_parser('upvote', help='Upvote a post')
    parser_upvote.set_defaults(command="upvote")
    parser_upvote.add_argument(
        'post',
        type=str,
        help='@author/permlink-identifier of the post to upvote to (e.g. @xeroc/python-steem-0-1)'
    )
    parser_upvote.add_argument(
        '--voter',
        type=str,
        required=False,
        default=config["default_voter"],
        help='The voter account name'
    )
    parser_upvote.add_argument(
        '--weight',
        type=float,
        default=config["default_vote_weight"],
        required=False,
        help='Actual weight (from 0.1 to 100.0)'
    )

    """
        Command "downvote"
    """
    parser_downvote = subparsers.add_parser('downvote', help='Downvote a post')
    parser_downvote.set_defaults(command="downvote")
    parser_downvote.add_argument(
        '--voter',
        type=str,
        required=False,
        help='The voter account name'
    )
    parser_downvote.add_argument(
        'post',
        type=str,
        help='@author/permlink-identifier of the post to downvote to (e.g. @xeroc/python-steem-0-1)'
    )
    parser_downvote.add_argument(
        '--weight',
        type=float,
        default=config["default_vote_weight"],
        required=False,
        help='Actual weight (from 0.1 to 100.0)'
    )

    """
        Command "replies"
    """
    replies = subparsers.add_parser('replies', help='Show recent replies to your posts')
    replies.set_defaults(command="replies")
    replies.add_argument(
        '--author',
        type=str,
        required=False,
        default=config["default_author"],
        help='Show replies to this author'
    )
    replies.add_argument(
        '--limit',
        type=int,
        default=config["limit"],
        help='Limit posts by number'
    )

    """
        Command "transfer"
    """
    parser_transfer = subparsers.add_parser('transfer', help='Transfer STEEM')
    parser_transfer.set_defaults(command="transfer")
    parser_transfer.add_argument(
        'to',
        type=str,
        help='Recepient'
    )
    parser_transfer.add_argument(
        'amount',
        type=str,
        help='Amount to transfer including asset (e.g.: 100.000 STEEM)'
    )
    parser_transfer.add_argument(
        'memo',
        type=str,
        nargs="?",
        default="",
        help='Optional memo'
    )
    parser_transfer.add_argument(
        '--account',
        type=str,
        required=False,
        default=config["default_author"],
        help='Transfer from this account'
    )

    """
        Command "powerup"
    """
    parser_powerup = subparsers.add_parser('powerup', help='Power up (vest STEEM as STEEM POWER)')
    parser_powerup.set_defaults(command="powerup")
    parser_powerup.add_argument(
        'amount',
        type=str,
        help='Amount to powerup including asset (e.g.: 100.000 STEEM)'
    )
    parser_powerup.add_argument(
        '--account',
        type=str,
        required=False,
        default=config["default_author"],
        help='Powerup from this account'
    )
    parser_powerup.add_argument(
        '--to',
        type=str,
        required=False,
        default=config["default_author"],
        help='Powerup this account'
    )

    """
        Command "powerdown"
    """
    parser_powerdown = subparsers.add_parser('powerdown', help='Power down (start withdrawing STEEM from STEEM POWER)')
    parser_powerdown.set_defaults(command="powerdown")
    parser_powerdown.add_argument(
        'amount',
        type=str,
        help='Amount to powerdown including asset (e.g.: 100.000 VESTS)'
    )
    parser_powerdown.add_argument(
        '--account',
        type=str,
        required=False,
        default=config["default_author"],
        help='powerdown from this account'
    )

    """
        Command "balance"
    """
    parser_balance = subparsers.add_parser('balance', help='Power down (start withdrawing STEEM from STEEM POWER)')
    parser_balance.set_defaults(command="balance")
    parser_balance.add_argument(
        'account',
        type=str,
        nargs="*",
        default=config["default_author"],
        help='balance from this account'
    )

    """
        Parse Arguments
    """
    args = parser.parse_args()

    # Logging
    log = logging.getLogger("piston")
    verbosity = ["critical",
                 "error",
                 "warn",
                 "info",
                 "debug"][int(min(args.verbose, 4))]
    log.setLevel(getattr(logging, verbosity.upper()))
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    ch = logging.StreamHandler()
    ch.setLevel(getattr(logging, verbosity.upper()))
    ch.setFormatter(formatter)
    log.addHandler(ch)

    # GrapheneAPI logging
    if args.verbose > 4:
        verbosity = ["critical",
                     "error",
                     "warn",
                     "info",
                     "debug"][int(min((args.verbose - 4), 4))]
        gphlog = logging.getLogger("graphenebase")
        gphlog.setLevel(getattr(logging, verbosity.upper()))
        gphlog.addHandler(ch)
    if args.verbose > 8:
        verbosity = ["critical",
                     "error",
                     "warn",
                     "info",
                     "debug"][int(min((args.verbose - 8), 4))]
        gphlog = logging.getLogger("grapheneapi")
        gphlog.setLevel(getattr(logging, verbosity.upper()))
        gphlog.addHandler(ch)

    rpc_not_required = ["set", "config", ""]
    if args.command not in rpc_not_required and args.command:
        steem = Steem(
            args.node,
            args.rpcuser,
            args.rpcpassword,
            nobroadcast=args.nobroadcast
        )

    if args.command == "set":
        config[args.key] = args.value

    if args.command == "config":
        t = PrettyTable(["Key", "Value"])
        t.align = "l"
        for key in config.store:
            t.add_row([key, config[key]])
        print(t)

    elif args.command == "addkey":
        wallet = Wallet(steem.rpc)
        if len(args.wifkeys):
            for wifkey in args.wifkeys:
                pub = (wallet.addPrivateKey(wifkey))
                if pub:
                    print(pub)
        else:
            import getpass
            wifkey = ""
            while True:
                wifkey = getpass.getpass('Private Key (wif) [Enter to quit]:')
                if not wifkey:
                    break
                pub = (wallet.addPrivateKey(wifkey))
                if pub:
                    print(pub)

        name = wallet.getAccountFromPublicKey(pub)
        print("Setting new default user: %s" % name)
        print("You can change these settings with:")
        print("    piston set default_author x")
        print("    piston set default_voter x")
        config["default_author"] = name
        config["default_voter"] = name

    elif args.command == "listkeys":
        t = PrettyTable(["Available Key"])
        t.align = "l"
        for key in Wallet(steem.rpc).getPublicKeys():
            t.add_row([key])
        print(t)

    elif args.command == "listaccounts":
        t = PrettyTable(["Name", "Available Key"])
        t.align = "l"
        for account in Wallet(steem.rpc).getAccounts():
            t.add_row(account)
        print(t)

    elif args.command == "reply":
        from textwrap import indent
        parent = steem.get_content(args.replyto)
        if parent["id"] == "0.0.0":
            print("Can't find post %s" % args.replyto)
            return

        reply_message = indent(parent["body"], "> ")

        post = frontmatter.Post(reply_message, **{
            "title": args.title if args.title else "Re: " + parent["title"],
            "author": args.author if args.author else "required",
            "replyto": args.replyto,
        })

        meta, message = yaml_parse_file(args, initial_content=post)

        for required in ["author", "title"]:
            if (required not in meta or
                    not meta[required] or
                    meta[required] == "required"):
                print("'%s' required!" % required)
                # TODO, instead of terminating here, send the user back
                # to the EDITOR
                return

        pprint(steem.reply(
            meta["replyto"],
            message,
            title=meta["title"],
            author=args.author
        ))

    elif args.command == "post" or args.command == "yaml":
        post = frontmatter.Post("", **{
            "title": args.title if args.title else "required",
            "author": args.author if args.author else "required",
            "category": args.category if args.category else "required",
        })

        meta, body = yaml_parse_file(args, initial_content=post)

        if not body:
            print("Empty body! Not posting!")
            return

        for required in ["author", "title", "category"]:
            if (required not in meta or
                    not meta[required] or
                    meta[required] == "required"):
                print("'%s' required!" % required)
                # TODO, instead of terminating here, send the user back
                # to the EDITOR
                return

        pprint(steem.post(
            meta["title"],
            body,
            author=meta["author"],
            category=meta["category"]
        ))

    elif args.command == "edit":
        original_post = steem.get_content(args.post)

        edited_message = None
        if original_post["id"] == "0.0.0":
            print("Can't find post %s" % args.post)
            return

        post = frontmatter.Post(original_post["body"], **{
            "title": original_post["title"] + " (immutable)",
            "author": original_post["author"] + " (immutable)"
        })

        meta, edited_message = yaml_parse_file(args, initial_content=post)
        pprint(steem.edit(
            args.post,
            edited_message,
            replace=args.replace
        ))

    elif args.command == "upvote" or args.command == "downvote":
        if args.command == "downvote":
            weight = -float(args.weight)
        else:
            weight = +float(args.weight)
        if not args.voter:
            print("Not voter provided!")
            return
        pprint(steem.vote(
            args.post,
            weight,
            voter=args.voter
        ))

    elif args.command == "read":
        post_author, post_permlink = resolveIdentifier(args.post)

        if args.parents:
            # FIXME inconsistency, use @author/permlink instead!
            dump_recursive_parents(
                steem.rpc,
                post_author,
                post_permlink,
                args.parents,
                format=args.format
            )

        if not args.comments and not args.parents:
            post = steem.get_content(args.post)

            if post["id"] == "0.0.0":
                print("Can't find post %s" % args.post)
                return
            if args.format == "markdown":
                body = markdownify(post["body"])
            else:
                body = post["body"]

            if args.full:
                meta = post.copy()
                meta.pop("body", None)  # remove body from meta
                yaml = frontmatter.Post(body, **meta)
                print(frontmatter.dumps(yaml))
            else:
                print(body)

        if args.comments:
            dump_recursive_comments(
                steem.rpc,
                post_author,
                post_permlink,
                format=args.format
            )

    elif args.command == "categories":
        categories = steem.get_categories(
            args.sort,
            begin=args.category,
            limit=args.limit
        )
        t = PrettyTable(["name", "discussions", "payouts"])
        t.align = "l"
        for category in categories:
            t.add_row([
                category["name"],
                category["discussions"],
                category["total_payouts"],
            ])
        print(t)

    elif args.command == "list":
        list_posts(
            steem.get_posts(
                limit=args.limit,
                sort=args.sort,
                category=args.category,
                start=args.start
            )
        )

    elif args.command == "replies":
        discussions = steem.get_replies(args.author)
        list_posts(discussions[0:args.limit])

    elif args.command == "transfer":
        pprint(steem.transfer(
            args.to,
            args.amount,
            memo=args.memo,
            account=args.account
        ))

    elif args.command == "powerup":
        pprint(steem.transfer_to_vesting(
            args.amount,
            account=args.account,
            to=args.to
        ))

    elif args.command == "powerdown":
        pprint(steem.withdraw_vesting(
            args.amount,
            account=args.account,
        ))

    elif args.command == "balance":
        t = PrettyTable(["Account", "STEEM", "SBD", "VESTS"])
        t.align = "r"
        if isinstance(args.account, str):
            args.account = [args.account]
        for a in args.account:
            b = steem.get_balances(a)
            t.add_row([
                a,
                b["balance"],
                b["sbd_balance"],
                b["vesting_shares"],
            ])
        print(t)

    else:
        print("No valid command given")