def construct_gist_from_bb_issue_attachments(
    bb_issue: Dict[str, Any], bb_export: BitbucketExport
) -> Optional[Dict[str, Union[str, Dict[str, InputFileContent]]]]:
    issue_id = bb_issue["id"]
    bb_attachments = bb_export.get_issue_attachments(issue_id)

    if not bb_attachments:
        return None

    gist_description = f"Attachments from Bitbucket issue {bb_issue['id']}"
    gist_files = {"# README.md": InputFileContent(gist_description)}

    for name in bb_attachments.keys():
        content = bb_export.get_issue_attachment_content(issue_id, name)
        if len(content) == 0:
            print(
                f"Warning: file '{name}' of bitbucket issue {bb_export.get_repo_full_name()}/#{issue_id} is empty."
            )
            content = "(empty)"
        elif len(content) > 500 * 1000:
            print(
                f"Error: file '{name}' of bitbucket issue {bb_export.get_repo_full_name()}/#{issue_id} is too big and "
                "cannot be uploaded as a gist file. This has to be done manually."
            )
            content = "(too big)"
        gist_files[name] = InputFileContent(content)

    return {"description": gist_description, "files": gist_files}
Ejemplo n.º 2
0
    def actionCreate(self, args):
        '''
		Creates the gist.
		
		By default the content is taken from stdin, but you can provide also a file path.
		'''

        # Note: it looks like dict keys are does not matter.
        contentDict = {}
        isPublic = args.public == True

        if args.stdin == True:
            stdInput = sys.stdin.readlines()
            contentString = ''.join(stdInput[1:])
            fileName = stdInput[0].strip()

            contentDict['0'] = InputFileContent(contentString, fileName)
        elif args.files and len(args.files):
            for offset, file in enumerate(args.files):
                with open(file, 'r') as hFile:
                    contentString = ''.join(hFile.readlines())

                fileName = os.path.split(file)[-1]
                contentDict[str(offset)] = InputFileContent(
                    contentString, fileName)
        else:
            fileName = self.readline('File name:')
            contentString = self.readline('Content:')

            contentDict['0'] = InputFileContent(contentString, fileName)

        gist = self._getGithubUser().create_gist(isPublic, contentDict)
Ejemplo n.º 3
0
def main(wf):
    log = wf.logger

    # Process command-line arguments.
    public = int(os.environ['GIST_PUBLIC']) == 1
    files = wf.args[0].split(u"\t") if wf.args[0] else None

    # Fetch user token.
    gh = Github(login_or_token=get_github_token(wf))
    gh_user = gh.get_user()

    # Create new gist for user.
    log.info("Creating new gist...")
    if files:
        log.info("Creating gist from files: %s." % ", ".join("`%s`" % f
                                                             for f in files))
        files = {
            os.path.basename(f): InputFileContent(read_file(f))
            for f in files
        }
    else:
        log.info("Creating gist from clipboard.")
        files = {"paste": InputFileContent(pyperclip.paste())}
    gist = gh_user.create_gist(public, files, GithubObject.NotSet)
    log.info(gist)
    print(gist.html_url)

    # Update cache of gists.
    gist_set = wf.stored_data('gists')
    n_starred = wf.stored_data('n_starred')
    n_forked = wf.stored_data('n_forked')
    n_public = wf.stored_data('n_public')
    n_private = wf.stored_data('n_private')
    tag_counts = wf.stored_data('tag_counts')
    language_counts = wf.stored_data('language_counts')

    gist_item = create_gist_item(gist)
    gist_set.append(gist_item)
    n_starred += 1 if gist_item['starred'] else 0
    n_forked += 1 if gist_item['forked'] else 0
    n_public += 1 if gist_item['public'] else 0
    n_private += 1 if not gist_item['public'] else 0
    for tag in gist_item['tags']:
        tag_counts[tag] += 1
    if gist_item['language']:
        language_counts[gist_item['language']] += 1

    wf.store_data('gists', gist_set)
    wf.store_data('n_starred', n_starred)
    wf.store_data('n_forked', n_forked)
    wf.store_data('n_public', n_public)
    wf.store_data('n_private', n_private)
    wf.store_data('tag_counts', tag_counts)
    wf.store_data('language_counts', language_counts)
Ejemplo n.º 4
0
 def publish_changelog_gist(self):
     """Publish the changelog as a github gist.
 """
     description = 'Changelog for Spinnaker {0}'.format(
         self.__release_version)
     with open(self.__changelog_file, 'r') as clog:
         raw_content_lines = clog.readlines()
         spinnaker_version = '# Spinnaker {0}\n'.format(
             self.__release_version)
         # Re-write the correct Spinnaker version at the top of the changelog.
         # Also add some identifying information.
         raw_content_lines[0] = spinnaker_version
         timestamp = '{:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now())
         signature = '\n\nGenerated by {0} at {1}'.format(
             self.__github_publisher, timestamp)
         raw_content_lines.append(signature)
         content = InputFileContent(''.join(raw_content_lines))
         filename = os.path.basename(self.__changelog_file)
         gist = self.__github.get_user().create_gist(
             True, {filename: content}, description=description)
         self.__gist_uri = 'https://gist.github.com/{user}/{id}'.format(
             user=self.__gist_user, id=gist.id)
         print('Wrote changelog to Gist at {0}.'.format(self.__gist_uri))
         # Export the changelog gist URI to include in an email notification.
         os.environ['GIST_URI'] = self.__gist_uri
         return self.__gist_uri
Ejemplo n.º 5
0
def gistify(title, in_file, test=False):

    # load the markdown from the file
    with open(in_file, 'r') as fp:
        body = fp.read()

    # extract code snippets from the text body
    # captured = re.findall(r'```bash\n[\s\S]*?\n```', body)
    captured = re.findall(r'```[a-zA-Z]*\n[\s\S]*?\n```', body)

    # make gists for each snippet
    g = Github(os.environ['GITHUB_GIST_TOKEN'])
    user = g.get_user()
    gist_urls = list()
    for i, snippet in enumerate(captured):
        print('{}/{}'.format(i + 1, len(captured)))
        if not test:
            gist_urls.append(
                user.create_gist(public=True,
                                 description='{}_{}'.format(title, i + 1),
                                 files={
                                     0: InputFileContent(snippet)
                                 }).html_url)
        else:
            sep = '=' * 30
            print(sep)
            print(snippet)
            print(sep)
    return gist_urls
Ejemplo n.º 6
0
    def handle(self, *args, **options):
        self.gh = Github(os.getenv('GITHUB_TOKEN'))
        self.org = self.gh.get_organization("california-civic-data-coalition")
        self.repo = self.org.get_repo("django-calaccess-raw-data")
        self.gist = self.gh.get_gist('66bed097ddca855c36506da4b7c0d349')

        sample_data_dir = self.repo.get_dir_contents('/example/test-data/tsv/')

        files = {}

        for file in sample_data_dir:
            lines = file.decoded_content.splitlines()

            # can't add empty files to gist, so skip
            if len(lines) > 0:
                # we want the header + the first five lines without illegal chars
                top_lines = []

                for line in lines:
                    if '"' not in line:
                        top_lines.append(line)
                    if len(top_lines) == 6:
                        break

                # recombine the split lines into a single string
                joined_lines = '\r\n'.join(top_lines)

            files[file.name] = InputFileContent(content=joined_lines)

        # now save
        self.gist.edit(
            description='Updating sample files',
            files=files,
        )
def update_gist(title: str, content: str) -> bool:
    access_token = os.environ[ENV_VAR_GITHUB_TOKEN]
    gist_id = os.environ[ENV_VAR_GIST_ID]
    gist = Github(access_token).get_gist(gist_id)
    # Shouldn't necessarily work, keeping for case of single file made in hurry to get gist id.
    old_title = list(gist.files.keys())[0]
    gist.edit(title, {old_title: InputFileContent(content, title)})
    print(f"{title}\n{content}")
Ejemplo n.º 8
0
def create_gist(description, content):
    public = False
    authenticated_user = gh_user()
    content = InputFileContent(content)
    gist = authenticated_user.create_gist(public, {'content': content},
                                          description)
    return {
        'gist-html-url': gist.html_url,
        'gist-id': gist.id,
        'gist-created-at': gist.created_at
    }
Ejemplo n.º 9
0
    def save(self, public: bool=False) -> None:
        """
        Saves gist's content. If the gist is not present on GH's servers,
        then creates one.
        """
        file_dict = {'gist': InputFileContent(self.content, self.title)}
        user = self.github.get_user()

        if not self.github_gist:
            self.github_gist = user.create_gist(public, files=file_dict)
        else:
            self.github_gist.edit(files=file_dict)
Ejemplo n.º 10
0
def update_gist(title: str, content: str) -> bool:
    """Update gist with provided title and content.

    Use gist id and github token present in environment variables.
    Replace first file in the gist.
    """
    access_token = os.environ[ENV_VAR_GITHUB_TOKEN]
    gist_id = os.environ[ENV_VAR_GIST_ID]
    gist = Github(access_token).get_gist(gist_id)
    # Works only for single file. Should we clear all files and create new file?
    old_title = list(gist.files.keys())[0]
    gist.edit(title, {old_title: InputFileContent(content, title)})
    print(f"{title}\n{content}")
Ejemplo n.º 11
0
# -*- coding: utf-8 -*-
import subprocess

from github import Github
from github.InputFileContent import InputFileContent

from credentials import CREDS
from driver import main
from driver import getlongterm

subprocess.call(["python", "driver.py"])

g = Github(CREDS['TOKEN'])
spotify_gist = g.get_gist(CREDS['GIST_ID'])
#spotify_gist_long_term = g.get_gist('20d9ea0342b543a1460fd13be64a7c60')
f = InputFileContent(main())
eggs = InputFileContent(getlongterm())
spotify_gist.edit('🎧 My music activity',
                  {'🎧 My music activity over the last 4 weeks': f})
#spotify_gist_long_term.edit('🎧 My music activity over the last 6 months',
#                  {'🎧 My music activity over the last 6 months': eggs})
spotify_gist.edit('🎧 My music activity',
                  {'🎧 My music activity over the last 6 months': eggs})
Ejemplo n.º 12
0
def main():
    g = Github(GITHUB_TOKEN)

    output = '_Last updated on {}_\n\n'.format(datetime.now())

    # Go through the given repositories that use milestones
    for repo_name in REPOS_M:
        repo = g.get_repo(repo_name)

        # Print repo header
        output += '# [{}]({})\n\n'.format(repo.name, repo.html_url)

        # Only check open milestones
        open_milestones = repo.get_milestones(state='open')

        found_milestone_without_due_date = False

        # Go through all open milestones
        for milestone in open_milestones:
            # Skip milestones without issues
            if (milestone.open_issues + milestone.closed_issues) == 0:
                continue

            # Milestones don't have .html_url, so creating it manually:
            milestone_html_url = '{}/milestone/{}'.format(
                repo.html_url, milestone.number)

            # skip milestones without due date
            if milestone.due_on is None:
                print(
                    'Skipped {} (No due date set)'.format(milestone_html_url))
                found_milestone_without_due_date = True
                continue

            # Print milestone header including progress and due date.
            output += '### [{}]({}) {}/{} issues ({:.0f}%) - Due on {}\n\n'.format(
                milestone.title, milestone_html_url, milestone.closed_issues,
                milestone.open_issues + milestone.closed_issues,
                100 * milestone.closed_issues /
                (milestone.open_issues + milestone.closed_issues),
                milestone.due_on.date() if milestone.due_on else '???')

            # Go through all issues. First the closed ones, then the open ones
            output += process_issues(repo, 'closed', milestone=milestone)
            output += process_issues(repo, 'open', milestone=milestone)

        if open_milestones.totalCount == 0 or found_milestone_without_due_date:
            output += 'No milestones open or no milestone with due date set.\n\n'

    # Go through the given repositories that don't use milestones.
    for repo_name, name, label_name in REPOS_P:
        repo = g.get_repo(repo_name)

        # Print repo header
        output_name = repo.name if name is None else name
        output += '# [{}]({})\n\n'.format(output_name, repo.html_url)

        # Get closed issues that were closed within the last week.
        last_week = datetime.today() - timedelta(days=7)
        output_closed_issues = process_issues(repo,
                                              'closed',
                                              since=last_week,
                                              label_names=[label_name])

        # Now get all open issues.
        output_open_issues = process_issues(repo,
                                            'open',
                                            label_names=[label_name])

        # Check if there was actually something to be printed.
        if (len(output_closed_issues) + len(output_open_issues)) > 0:
            # Print info
            output += '_Not based on milestones._\n_✅ -> Closed within the last 7 days._\n\n\n'
            output += output_closed_issues
            output += output_open_issues
        else:
            output += 'No issues to show.\n\n'

    # Write gist.
    gist = g.get_gist(GIST_ID)

    gist.edit(
        description=GIST_DESCRIPTION,
        files={GIST_FILENAME: InputFileContent(content=output)},
    )

    # Print success and gist url for easy access.
    print('View output at {}'.format(gist.html_url))
Ejemplo n.º 13
0
if args.list:
    for gist in gists:
        for file_name in gist.files:
            if gist.files[file_name].language is not None:
                print file_name + " -> " + gist.description + " [" + gist.files[
                    file_name].language + "]"
            else:
                print file_name + " -> " + gist.description

    quit()

if args.create_gist:
    data = sys.stdin.read()
    #	create_gist(public boolean, file content in string, description)
    github.get_user().create_gist(True,
                                  {args.create_gist: InputFileContent(data)},
                                  "Created from terminal")
    quit()

if args.search_filename is not None and len(args.search_filename) > 0:
    for gist in gists:
        for file_name in gist.files:
            if fnmatch.fnmatch(file_name, args.search_filename):
                print gist.description + " -> " + file_name + " [" + gist.files[
                    file_name].language + "]"
    quit()

if args.search_description is not None and len(args.search_description) > 0:
    for gist in gists:
        for file_name in gist.files:
            if args.search_description in gist.description:
Ejemplo n.º 14
0
import subprocess

from github import Github
from github.InputFileContent import InputFileContent

from credentials import CREDS
from driver import main

subprocess.call(["python", "driver.py"])

g = Github(CREDS['TOKEN'])
spotify_gist = g.get_gist(CREDS['GIST_ID'])
f = InputFileContent(main())
spotify_gist.edit('🎧 My music over 4 weeks',
                  {'🎧 My music over 4 weeks': f})