Example #1
0
def game_index(entry):
    e = {
        'url': make_url(entry['href'], entry['Title']),
        'anchor-id': entry['anchor-id']
    }
    tags = []
    if 'beta' in entry['State']:
        tags.append('beta')
    if osg.is_inactive(entry):
        tags.append('inactive since {}'.format(osg.extract_inactive_year(entry)))
    if tags:
        e['tags'] = make_text('({})'.format(', '.join(tags)), 'is-light is-size-7')
    return e
Example #2
0
    def update_html(self):
        """
        Parses all entries, collects interesting info and stores it in a json file suitable for displaying
        with a dynamic table in a browser.
        """
        if not self.entries:
            print('entries not yet loaded')
            return

        # make database out of it
        db = {
            'headings':
            ['Game', 'Description', 'Download', 'State', 'Keyword', 'Source']
        }

        entries = []
        for info in self.entries:

            # game & description
            entry = [
                '{} (<a href="{}">home</a>, <a href="{}">entry</a>)'.format(
                    info['Title'], info['Home'][0],
                    r'https://github.com/Trilarion/opensourcegames/blob/master/entries/'
                    + info['File']),
                textwrap.shorten(info.get('Note', ''),
                                 width=60,
                                 placeholder='..')
            ]

            # download
            field = 'Download'
            if field in info and info[field]:
                entry.append('<a href="{}">Link</a>'.format(info[field][0]))
            else:
                entry.append('')

            # state (field state is essential)
            entry.append('{} / {}'.format(
                info['State'][0],
                'inactive since {}'.format(osg.extract_inactive_year(info))
                if osg.is_inactive(info) else 'active'))

            # keywords
            keywords = info['Keyword']
            keywords = [x.value for x in keywords]
            entry.append(', '.join(keywords))

            # source
            text = []
            field = 'Code repository'
            if field in info and info[field]:
                text.append('<a href="{}">Source</a>'.format(
                    info[field][0].value))
            languages = info['Code language']
            languages = [x.value for x in languages]
            text.append(', '.join(languages))
            licenses = info['Code license']
            licenses = [x.value for x in licenses]
            text.append(', '.join(licenses))
            entry.append(' - '.join(text))

            # append to entries
            entries.append(entry)

        # sort entries by game name
        entries.sort(key=lambda x: str.casefold(x[0]))

        db['data'] = entries

        # output
        text = json.dumps(db, indent=1)
        utils.write_text(c.json_db_file, text)

        print('HTML updated')
Example #3
0
    def update_statistics(self):
        """
        Generates the statistics page.

        Should be done every time the entries change.
        """
        if not self.entries:
            print('entries not yet loaded')
            return

        # start the page
        statistics = '[comment]: # (autogenerated content, do not edit)\n# Statistics\n\n'

        # total number
        number_entries = len(self.entries)
        rel = lambda x: x / number_entries * 100  # conversion to percent

        statistics += 'analyzed {} entries on {}\n\n'.format(
            number_entries,
            datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))

        # State (beta, mature, inactive)
        statistics += '## State\n\n'

        number_state_beta = sum(1 for x in self.entries
                                if 'beta' in x['State'])
        number_state_mature = sum(1 for x in self.entries
                                  if 'mature' in x['State'])
        number_inactive = sum(1 for x in self.entries if osg.is_inactive(x))
        statistics += '- mature: {} ({:.1f}%)\n- beta: {} ({:.1f}%)\n- inactive: {} ({:.1f}%)\n\n'.format(
            number_state_mature, rel(number_state_mature), number_state_beta,
            rel(number_state_beta), number_inactive, rel(number_inactive))

        if number_inactive > 0:
            entries_inactive = [(x['Title'], osg.extract_inactive_year(x))
                                for x in self.entries if osg.is_inactive(x)]
            entries_inactive.sort(
                key=lambda x: str.casefold(x[0]))  # first sort by name
            entries_inactive.sort(
                key=lambda x: x[1], reverse=True
            )  # then sort by inactive year (more recently first)
            entries_inactive = ['{} ({})'.format(*x) for x in entries_inactive]
            statistics += '##### Inactive State\n\n' + ', '.join(
                entries_inactive) + '\n\n'

        # Language
        statistics += '## Code Languages\n\n'
        field = 'Code language'

        # get all languages together
        languages = []
        for entry in self.entries:
            languages.extend(entry[field])
        languages = [x.value for x in languages]

        unique_languages = set(languages)
        unique_languages = [(l, languages.count(l) / len(languages))
                            for l in unique_languages]
        unique_languages.sort(
            key=lambda x: str.casefold(x[0]))  # first sort by name

        # print languages to console
        print('\nLanguages\n')
        print('\n'.join('{} ({:.1f}%)'.format(x[0], x[1] * 100)
                        for x in unique_languages))

        unique_languages.sort(
            key=lambda x: x[1],
            reverse=True)  # then sort by occurrence (highest occurrence first)
        unique_languages = [
            '- {} ({:.1f}%)\n'.format(x[0], x[1] * 100)
            for x in unique_languages
        ]
        statistics += '##### Language frequency\n\n' + ''.join(
            unique_languages) + '\n'

        # Licenses
        statistics += '## Code licenses\n\n'
        field = 'Code license'

        # get all licenses together
        licenses = []
        for entry in self.entries:
            licenses.extend(entry[field])
        licenses = [x.value for x in licenses]

        unique_licenses = set(licenses)
        unique_licenses = [(l, licenses.count(l) / len(licenses))
                           for l in unique_licenses]
        unique_licenses.sort(
            key=lambda x: str.casefold(x[0]))  # first sort by name

        # print licenses to console
        print('\nLicenses\n')
        print('\n'.join('{} ({:.1f}%)'.format(x[0], x[1] * 100)
                        for x in unique_licenses))

        unique_licenses.sort(key=lambda x: -x[
            1])  # then sort by occurrence (highest occurrence first)
        unique_licenses = [
            '- {} ({:.1f}%)\n'.format(x[0], x[1] * 100)
            for x in unique_licenses
        ]
        statistics += '##### Licenses frequency\n\n' + ''.join(
            unique_licenses) + '\n'

        # Keywords
        statistics += '## Keywords\n\n'
        field = 'Keyword'

        # get all keywords together
        keywords = []
        for entry in self.entries:
            keywords.extend(entry[field])
        keywords = [x.value for x in keywords]

        # reduce those starting with "multiplayer"
        keywords = [
            x if not x.startswith('multiplayer') else 'multiplayer'
            for x in keywords
        ]

        unique_keywords = set(keywords)
        unique_keywords = [(l, keywords.count(l) / len(keywords))
                           for l in unique_keywords]
        unique_keywords.sort(
            key=lambda x: str.casefold(x[0]))  # first sort by name

        # print keywords to console
        print('\nKeywords\n')
        print('\n'.join('{} ({:.1f}%)'.format(x[0], x[1] * 100)
                        for x in unique_keywords))

        unique_keywords.sort(key=lambda x: -x[
            1])  # then sort by occurrence (highest occurrence first)
        unique_keywords = [
            '- {} ({:.1f}%)'.format(x[0], x[1] * 100) for x in unique_keywords
        ]
        statistics += '##### Keywords frequency\n\n' + '\n'.join(
            unique_keywords) + '\n\n'

        # no download or play field
        statistics += '## Entries without download or play fields\n\n'

        entries = []
        for entry in self.entries:
            if 'Download' not in entry and 'Play' not in entry:
                entries.append(entry['Title'])
        entries.sort(key=str.casefold)
        statistics += '{}: '.format(len(entries)) + ', '.join(entries) + '\n\n'

        # code hosted not on github, gitlab, bitbucket, launchpad, sourceforge
        popular_code_repositories = ('github.com', 'gitlab.com',
                                     'bitbucket.org', 'code.sf.net',
                                     'code.launchpad.net')
        statistics += '## Entries with a code repository not on a popular site\n\n'

        entries = []
        field = 'Code repository'
        for entry in self.entries:
            popular = False
            for repo in entry[field]:
                for popular_repo in popular_code_repositories:
                    if popular_repo in repo.value:
                        popular = True
                        break
            # if there were repositories, but none popular, add them to the list
            if not popular:
                entries.append(entry['Title'])
                # print(info[field])
        entries.sort(key=str.casefold)
        statistics += '{}: '.format(len(entries)) + ', '.join(entries) + '\n\n'

        # Code dependencies
        statistics += '## Code dependencies\n\n'
        field = 'Code dependency'

        # get all code dependencies together
        code_dependencies = []
        entries_with_code_dependency = 0
        for entry in self.entries:
            if field in entry:
                code_dependencies.extend(entry[field])
                entries_with_code_dependency += 1
        code_dependencies = [x.value for x in code_dependencies]
        statistics += 'With code dependency field {} ({:.1f}%)\n\n'.format(
            entries_with_code_dependency, rel(entries_with_code_dependency))

        unique_code_dependencies = set(code_dependencies)
        unique_code_dependencies = [
            (l, code_dependencies.count(l) / len(code_dependencies))
            for l in unique_code_dependencies
        ]
        unique_code_dependencies.sort(
            key=lambda x: str.casefold(x[0]))  # first sort by name

        # print code dependencies to console
        print('\nCode dependencies\n')
        print('\n'.join('{} ({:.1f}%)'.format(x[0], x[1] * 100)
                        for x in unique_code_dependencies))

        unique_code_dependencies.sort(key=lambda x: -x[
            1])  # then sort by occurrence (highest occurrence first)
        unique_code_dependencies = [
            '- {} ({:.1f}%)'.format(x[0], x[1] * 100)
            for x in unique_code_dependencies
        ]
        statistics += '##### Code dependencies frequency\n\n' + '\n'.join(
            unique_code_dependencies) + '\n\n'

        # Build systems:
        statistics += '## Build systems\n\n'
        field = 'Build system'

        # get all build systems together
        build_systems = []
        for entry in self.entries:
            if field in entry['Building']:
                build_systems.extend(entry['Building'][field])
        build_systems = [x.value for x in build_systems]

        statistics += 'Build systems information available for {:.1f}% of all projects.\n\n'.format(
            rel(len(build_systems)))

        unique_build_systems = set(build_systems)
        unique_build_systems = [(l,
                                 build_systems.count(l) / len(build_systems))
                                for l in unique_build_systems]
        unique_build_systems.sort(
            key=lambda x: str.casefold(x[0]))  # first sort by name

        # print build systems to console
        print('\nBuild systems\n')
        print('\n'.join('{} ({:.1f}%)'.format(x[0], x[1] * 100)
                        for x in unique_build_systems))

        unique_build_systems.sort(key=lambda x: -x[
            1])  # then sort by occurrence (highest occurrence first)
        unique_build_systems = [
            '- {} ({:.1f}%)'.format(x[0], x[1] * 100)
            for x in unique_build_systems
        ]
        statistics += '##### Build systems frequency ({})\n\n'.format(
            len(build_systems)) + '\n'.join(unique_build_systems) + '\n\n'

        # C, C++ projects without build system information
        c_cpp_project_without_build_system = []
        for entry in self.entries:
            if field not in entry and ('C' in entry['Code language']
                                       or 'C++' in entry['Code language']):
                c_cpp_project_without_build_system.append(entry['Title'])
        c_cpp_project_without_build_system.sort(key=str.casefold)
        statistics += '##### C and C++ projects without build system information ({})\n\n'.format(
            len(c_cpp_project_without_build_system)) + ', '.join(
                c_cpp_project_without_build_system) + '\n\n'

        # C, C++ projects with build system information but without CMake as build system
        c_cpp_project_not_cmake = []
        for entry in entries:
            if field in entry and 'CMake' in entry[field] and (
                    'C' in entry['Code language']
                    or 'C++' in entry['Code language']):
                c_cpp_project_not_cmake.append(entry['Title'])
        c_cpp_project_not_cmake.sort(key=str.casefold)
        statistics += '##### C and C++ projects with a build system different from CMake ({})\n\n'.format(
            len(c_cpp_project_not_cmake)) + ', '.join(
                c_cpp_project_not_cmake) + '\n\n'

        # Platform
        statistics += '## Platform\n\n'
        field = 'Platform'

        # get all platforms together
        platforms = []
        for entry in self.entries:
            if field in entry:
                platforms.extend(entry[field])
        platforms = [x.value for x in platforms]

        statistics += 'Platform information available for {:.1f}% of all projects.\n\n'.format(
            rel(len(platforms)))

        unique_platforms = set(platforms)
        unique_platforms = [(l, platforms.count(l) / len(platforms))
                            for l in unique_platforms]
        unique_platforms.sort(
            key=lambda x: str.casefold(x[0]))  # first sort by name
        unique_platforms.sort(key=lambda x: -x[
            1])  # then sort by occurrence (highest occurrence first)
        unique_platforms = [
            '- {} ({:.1f}%)'.format(x[0], x[1] * 100) for x in unique_platforms
        ]
        statistics += '##### Platforms frequency\n\n' + '\n'.join(
            unique_platforms) + '\n\n'

        # write to statistics file
        utils.write_text(c.statistics_file, statistics)

        print('statistics updated')