Ejemplo n.º 1
0
def convert(input):
    rules_path = input
    rules = list_filter(
        is_app_rules,
        list_map(lambda item: rules_path + os.sep + item,
                 os.listdir(rules_path)))
    print('Found rules count: %d' % (len(rules)))

    # Make output path
    output_path = input + os.sep + 'output'
    if os.path.isfile(output_path):
        os.remove(output_path)
    if not os.path.exists(output_path):
        os.mkdir(output_path)
    print('Output to ' + output_path)

    # Convert and write out results
    for rule in rules:
        with codecs.open(rule, mode='r', encoding='utf-8') as f:
            model = json.loads(f.read(), object_pairs_hook=OrderedDict)
            data_converter.convert_old_data(model)

            with codecs.open(output_path + os.sep + model['package'] + '.json',
                             mode='w',
                             encoding='utf-8') as out:
                out.write(json.dumps(model, indent=2, ensure_ascii=False))
                out.close()
            f.close()
    print('Finished converting.')
Ejemplo n.º 2
0
def download_issues(github):
    repo = github.get_repo(ISSUE_REPO)

    # Get issues only created by auto wizard
    issues_list = repo.get_issues(state='open')
    issues = []
    for i in range(0, int(issues_list.totalCount / 30)):
        for issue in issues_list.get_page(i):
            if issue.title.startswith(
                    '[New rules request][AUTO]'
            ) and not is_issue_need_discussion(issue):
                issues.append(issue)

    # Make output path
    output_path = os.getcwd() + os.sep + 'output'
    if os.path.isfile(output_path):
        os.remove(output_path)
    if not os.path.exists(output_path):
        os.mkdir(output_path)
    print('Output to ' + output_path)

    total = len(issues)
    current = 0
    print_progress = lambda a, b: \
        print('Start downloading rules... %d/%d' % (a, b), end='')
    CODE_BLOCK = '```'

    print_progress(current, total)

    # Download json output from issues
    for issue in issues:
        current += 1

        # Get information from issue
        package_name = issue.title[issue.title.rindex(' ') + 1:]
        print(' Package: ' + package_name, end='\r')

        if (not os.path.isfile(output_path + os.sep + package_name + '.json')):
            body = repo.get_issue(issue.number).body
            content = body[body.index(CODE_BLOCK) +
                           len(CODE_BLOCK):body.rindex(CODE_BLOCK)]

            # Try to convert old data
            try:
                content = json.dumps(data_converter.convert_old_data(
                    json.loads(content, object_pairs_hook=OrderedDict)),
                                     indent=2,
                                     ensure_ascii=False)
            except:
                pass

            # Add to cache
            with codecs.open(output_path + os.sep + package_name + '.json',
                             mode='w',
                             encoding='utf-8') as f:
                f.write(content)
                f.close()

        print_progress(current, total)

    # Done downloading
    print('\nDownloaded %d rules' % (current))

    print('\nFinished downloading issues. Remember to check if rules are ' \
          'vaild.')