def delete_merged_output_files():
    for entry in common.get_entries_to_process():
        file_for_deletion = entry.get('merged_output_file', None)
        if file_for_deletion != None:
            filepath = common.output_filepath_for_raw_report_data_from_file_name(
                file_for_deletion)
            delete_filepath(filepath)
def write_index():
    website_html = ""
    website_html += generate_webpage_with_error_output.html_file_header(
    ) + "\n"
    website_html += '<p>This page lists OpenStreetMap objects that have <a href="https://wiki.openstreetmap.org/wiki/Key:wikipedia">wikipedia</a> / <a href="https://wiki.openstreetmap.org/wiki/Key:wikipedia">wikidata</a> tags with some problems.</p>'
    website_html += '<p>For example, it allows to detect cases where <a href="https://www.openstreetmap.org/way/693854629/history">incorrect object was linked</a> or where link leads to deleted page or there is some other issue.</p>\n'
    website_html += '<p>This tool is an <a href="https://github.com/matkoniecz/OSM-wikipedia-tag-validator#story-behing-this-tool">unexpected result</a> of creating a detector of interesting places based on OSM Data and Wikipedia. It turned out to require a filter to avoid invalid links. As detected links can be often fixed and it is better to remove invalid rather than keep them, I am sharing this tool.</p>\n'
    website_html += "</hr>\n"
    website_html += "</br>\n"
    website_html += generate_webpage_with_error_output.feedback_request(
    ) + "\n"
    website_html += "</br>\n"
    website_html += "</hr>\n"
    """
    website_html += '<p></p>\n'
    """
    website_html += "</br>\n"

    completed = ""

    for filename in sorted(common.merged_outputs_filenames_list()):
        name = filename.replace('.yaml', '')
        reports_filepath = common.found_errors_storage_location(
        ) + "/" + filename
        website_html += '<a href = "' + common.htmlify(
            name) + '.html">' + common.htmlify(
                name) + '</a> ' + problem_count_string(reports_filepath) + '\n'
    for entry in common.get_entries_to_process():
        if "hidden" in entry:
            if entry["hidden"] == True:
                continue
        website_main_title_part = entry['website_main_title_part']

        reports_filepath = common.output_filepath_for_raw_report_data_from_region_name(
            entry['region_name'])
        report_count = main_report_count_in_report_file(reports_filepath)
        report_count_string = problem_count_string(reports_filepath)

        filename = website_main_title_part + '.html'
        potential_filepath = get_report_directory() + '/' + filename
        if os.path.isfile(potential_filepath):
            line = '<a href = "' + common.htmlify(
                filename) + '">' + common.htmlify(
                    website_main_title_part
                ) + '</a> ' + report_count_string + '\n'
            if report_count != 0:
                website_html += line
            else:
                completed += line
        else:
            print(potential_filepath + ' is not present during write_index')
    website_html += "<br>\n"
    website_html += "<h1>Finished, congratulations :)</h1>\n"
    if completed == "":
        completed = "<p>nothing for now :(<p>\n"
    website_html += completed
    website_html += generate_webpage_with_error_output.html_file_suffix()
    with open('index.html', 'w') as index:
        index.write(website_html)
    move_file('index.html', get_report_directory() + '/' + 'index.html')
def detect_missing_data_in_used_library():
    # trigger what is likely to crash later,
    # rather than doing after expensive and long setup
    helper_object = wikimedia_link_issue_reporter.WikimediaLinkIssueDetector(
        False, None, None, False, False, False)
    for entry in common.get_entries_to_process():
        if 'language_code' in entry:
            helper_object.wikidata_ids_of_countries_with_language(
                entry['language_code'])
def pipeline_entries_from_config_file(script_run_start_time):
    for entry in common.get_entries_to_process():
        pipeline(
            region_name=entry['region_name'],
            website_main_title_part=entry['website_main_title_part'],
            merged_output_file=entry.get('merged_output_file', None),
            language_code=entry.get('language_code', None),
            identifier_of_region=entry['identifier'],
            script_run_start_time=script_run_start_time,
        )
def get_entry_contributing_to_merged_file(name_of_merged):
    for entry in common.get_entries_to_process():
        if entry.get('merged_output_file', None) == name_of_merged:
            return entry
    raise ProcessingException("unexpected")