示例#1
0
def make_header():
    header = collections.OrderedDict()
    dictionaries.append(header)

    header["timestamp"] = u32(get_timestamp(0, None, None))
    header["country_code"] = u8(country_code)
    header["publicity_flag"] = u8(0)
    header["question_version"] = u8(0 if file_type == "r" else 1)
    header["result_version"] = u8(1 if file_type == "r" else 0)
    header["national_question_number"] = u8(national)
    header["national_question_offset"] = u32(0)
    header["worldwide_question_number"] = u8(worldwide)
    header["worldwide_question_offset"] = u32(0)
    header["question_number"] = u8(questions * len(country_language[country_code]))
    header["question_offset"] = u32(0)
    header["national_result_entry"] = u8(national_results)
    header["national_result_offset"] = u32(0)
    header["national_result_detailed_number"] = u16(national_results * region_number[country_code])
    header["national_result_detailed_offset"] = u32(0)
    header["position_number"] = u16(0 if file_type == "q" or national_results == 0 else 22 if country_code == 77 else len(position_table[country_code]) if country_code in position_table.keys() else 0)
    header["position_offset"] = u32(0)
    header["worldwide_result_number"] = u8(worldwide_results)
    header["worldwide_result_offset"] = u32(0)
    header["worldwide_result_detailed_number"] = u16(0)
    header["worldwide_result_detailed_offset"] = u32(0)
    header["country_name_number"] = u16(len(countries) * 7 if file_type == "r" and nw == "w" else 0 if file_type == "q" or file_type == "r" else len(countries) * 7)
    header["country_name_offset"] = u32(0)

    return header
示例#2
0
def make_worldwide_result_detailed_table(header):
    table = collections.OrderedDict()
    dictionaries.append(table)

    country_table_count = 0
    header["worldwide_result_detailed_offset"] = offset_count()

    worldwide_region_number = 0

    for i in results:
        if results[i][8] == "w":
            for j in range(len(countries)):  # 33
                total = 0
                for voters in range(0, 4):
                    total += results[i][voters][j]
                if total > 0:
                    table["unknown_%s" % num()] = u32(0)
                    table["male_voters_response_1_num_%s" % num()] = u32(
                        results[i][0][j])
                    table["male_voters_response_2_num_%s" % num()] = u32(
                        results[i][2][j])
                    table["female_voters_response_1_num_%s" % num()] = u32(
                        results[i][1][j])
                    table["female_voters_response_2_num_%s" % num()] = u32(
                        results[i][3][j])
                    table["country_table_count_%s" % num()] = u16(7)
                    table["starting_country_table_number_%s" %
                          num()] = u32(country_table_count)
                    worldwide_region_number += 1
                country_table_count += 7

    header["worldwide_result_detailed_number"] = u16(worldwide_region_number)

    return table
def make_source_name_copyright(source_table, source, data):
    source_name_copyright = collections.OrderedDict()
    dictionaries.append(source_name_copyright)

    sources = []

    source_names = {}

    for article in list(data.values()):
        if article[8] not in sources:
            if article[8] in source_names:
                source_name = source_names[article[8]]

                source_table["name_size_%s" % article[8]] = u32(len(source_name))  # Size of the source name.

                source_table["name_offset_%s" % article[8]] = offset_count()  # Offset for the source name.

                source_name_copyright["source_name_read_%s" % article[8]] = source_name  # Read the source name.
                source_name_copyright["padding_source_name_%s" % article[8]] = u16(0)  # Padding for the source name.

            copyright = newsdownload.enc(source["copyright"].format(date.today().year))
            
            source_table["copyright_size_%s" % article[8]] = u32(len(copyright))  # Size of the copyright.

            source_table["copyright_offset_%s" % article[8]] = offset_count()  # Offset for the copyright.

            source_name_copyright["copyright_read_%s" % article[8]] = copyright  # Read the copyright.
            source_name_copyright["padding_copyright_%s" % article[8]] = u16(0)  # Padding for the copyright.

            sources.append(article[8])
def make_articles(articles_table, pictures_table, data):
    articles = collections.OrderedDict()
    dictionaries.append(articles)

    numbers = 0

    for article in list(data.values()):
        numbers += 1
        articles_table["headline_%s_offset" % numbers] = offset_count()  # Offset for the headline.
        articles["headline_%s_read" % numbers] = article[3].replace(b'\n', b'')  # Read the headline.
        articles["padding_%s_headline" % numbers] = u16(0)  # Padding for the headline.
        articles_table["article_%s_offset" % numbers] = offset_count()  # Offset for the article.
        articles["article_%s_read" % numbers] = article[2]  # Read the article.
        articles["padding_%s_article" % numbers] = u16(0)  # Padding for the article.

        if article[4] is not None:
            if article[6] is not None:
                pictures_table["captions_%s_offset" % numbers] = offset_count()  # Offset for the caption.
                articles["captions_%s_read" % numbers] = article[6]  # Read the caption.
                articles["padding_%s_captions" % numbers] = u16(0)  # Padding for the caption.
            if article[5] is not None:
                pictures_table["credits_%s_offset" % numbers] = offset_count()  # Offset for the credits.
                articles["credits_%s_read" % numbers] = article[5]  # Read the credits.
                articles["padding_%s_credits" % numbers] = u16(0)  # Padding for the credits.

    return articles
def make_header(data):
    header = collections.OrderedDict()
    dictionaries.append(header)

    header["updated_timestamp_1"] = get_timestamp(1)  # Updated time.
    header["term_timestamp"] = get_timestamp(2)  # Timestamp for the term.
    header["country_code"] = u32_littleendian(country_code)  # Wii Country Code.
    header["updated_timestamp_2"] = get_timestamp(1)  # 3rd timestamp.

    # List of languages that appear on the language select screen 

    numbers = 0

    for language in languages:
        numbers += 1

        header["language_select_%s" % numbers] = u8(language)

    # Fills the rest of the languages as null 

    while numbers < 16:
        numbers += 1

        header["language_select_%s" % numbers] = u8(255)

    header["language_code"] = u8(language_code)  # Wii language code.
    header["goo_flag"] = u8(0)  # Flag to make the Globe display "Powered by Goo".
    header["language_select_screen_flag"] = u8(0)  # Flag to bring up the language select screen.
    header["download_interval"] = u8(30)  # Interval in minutes to check for new articles to display on the Wii Menu.
    header["message_offset"] = u32(0)  # Offset for a message.
    header["topics_number"] = u32(0)  # Number of entries for the topics table.
    header["topics_offset"] = u32(0)  # Offset for the topics table.
    header["articles_number"] = u32(0)  # Number of entries for the articles table.
    header["articles_offset"] = u32(0)  # Offset for the articles table.
    header["source_number"] = u32(0)  # Number of entries for the source table.
    header["source_offset"] = u32(0)  # Offset for the source table.
    header["locations_number"] = u32(0)  # Number of entries for the locations.
    header["locations_offset"] = u32(0)  # Offset for the locations table.
    header["pictures_number"] = u32(0)  # Number of entries for the pictures table.
    header["pictures_offset"] = u32(0)  # Offset for the pictures table.
    header["count"] = u16(480)  # Count value.
    header["unknown"] = u16(0)  # Unknown.
    header["wiimenu_articles_number"] = u32(0)  # Number of Wii Menu article entries.
    header["wiimenu_articles_offset"] = u32(0)  # Offset for the Wii Menu article table.
    header["wiimenu_articles_offset"] = offset_count()  # Offset for the Wii Menu article table.

    numbers = 0

    headlines = []

    for article in list(data.values()):
        if numbers < 11:
            if article[3].replace(b'\n', b'') not in headlines:
                numbers += 1
                headlines.append(article[3])
                header["headline_%s_size" % numbers] = u32(0)  # Size of the headline.
                header["headline_%s_offset" % numbers] = u32(0)  # Offset for the headline.

    return header
示例#6
0
def make_wiimenu_articles(header, data):
    wiimenu_articles = {}
    dictionaries.append(wiimenu_articles)

    numbers = 0

    headlines = []

    for article in list(data.values()):
        if numbers < 11:
            if article[3] not in headlines:
                numbers += 1
                headlines.append(article[3])
                header["headline_%s_size" % numbers] = u32(
                    len(article[3].replace(b"\n", b""))
                )  # Size of the headline.
                header[
                    "headline_%s_offset" % numbers
                ] = offset_count()  # Offset for the headline.
                wiimenu_articles["headline_%s" % numbers] = article[3].replace(
                    b"\n", b""
                )  # Headline.

                # for some reason, the News Channel uses this padding to separate news articles

                if (int(binascii.hexlify(offset_count()), 16) + 2) % 4 == 0:
                    wiimenu_articles["padding_%s" % numbers] = u16(0)  # Padding.
                elif (int(binascii.hexlify(offset_count()), 16) + 4) % 4 == 0:
                    wiimenu_articles["padding_%s" % numbers] = u32(0)  # Padding.

    header["wiimenu_articles_number"] = u32(
        numbers
    )  # Number of Wii Menu article entries.

    return wiimenu_articles
示例#7
0
def make_source_table(header, articles_table, source, data):
    source_table = {}
    dictionaries.append(source_table)

    header["source_offset"] = offset_count()  # Offset for the source table.

    source_articles = []

    numbers = 0

    numbers_article = 0

    for article in list(data.values()):
        if article[8] not in source_articles:
            source_articles.append(article[8])

            source_table["source_picture_%s" % article[8]] = u8(
                source["picture"]
            )  # Picture for the source.
            source_table["source_position_%s" % article[8]] = u8(
                source["position"]
            )  # Position for the source.
            source_table["padding_%s" % article[8]] = u16(0)  # Padding.

            source_table["pictures_size_%s" % article[8]] = u32(
                0
            )  # Size of the source picture.
            source_table["pictures_offset_%s" % article[8]] = u32(
                0
            )  # Offset for the source picture.

            source_table["name_size_%s" % article[8]] = u32(
                0
            )  # Size of the source name.
            source_table["name_offset_%s" % article[8]] = u32(
                0
            )  # Offset for the source name.

            source_table["copyright_size_%s" % article[8]] = u32(
                0
            )  # Size of the copyright.
            source_table["copyright_offset_%s" % article[8]] = u32(
                0
            )  # Offset for the copyright.

            numbers += 1

    for article in list(data.values()):
        numbers_article += 1

        articles_table["source_%s_number" % numbers_article] = u32(
            source_articles.index(article[8])
        )  # Number for the source.

    header["source_number"] = u32(numbers)  # Number of entries for the source table.

    return source_table
def make_topics(topics_table, topics_news):
    topics = collections.OrderedDict()
    dictionaries.append(topics)

    numbers = 0

    for keys in list(topics_news.keys()):
        numbers += 1
        topics_table["topics_%s_offset" % str(numbers)] = offset_count()  # Offset for the topics.
        topics["topics_%s_read" % numbers] = newsdownload.enc(keys)  # Read the topics.
        topics["padding_%s_topics" % numbers] = u16(0)  # Padding for the topics.

    return topics
def make_locations(locations_data, locations_table):
    locations = collections.OrderedDict()
    dictionaries.append(locations)

    numbers = 0

    for loc_text in list(locations_data.values()):
        locations_table["location_%s_offset" % numbers] = offset_count()  # Offset for the locations.

        locations["location_%s_read" % numbers] = loc_text[1]  # Read the locations.
        locations["nullbyte_%s_locations" % numbers] = u16(0)  # Null byte for the locations.

        numbers += 1

    return locations
示例#10
0
def make_locations(locations_data, locations_table):
    locations = collections.OrderedDict()
    dictionaries.append(locations)

    for locations_strings in locations_data.keys():
        numbers = locations_data.keys().index(locations_strings)
        locations_table["location_%s_offset" %
                        numbers] = offset_count()  # Offset for the locations.

        locations["location_%s_read" %
                  numbers] = locations_strings  # Read the locations.
        locations["nullbyte_%s_locations" % numbers] = u16(
            0)  # Null byte for the locations.

    return locations
示例#11
0
def make_topics(topics_table, topics_news):
    topics = collections.OrderedDict()
    dictionaries.append(topics)

    numbers = 0

    for keys in topics_news.keys():
        numbers += 1
        topics_table["topics_%s_offset" %
                     str(numbers)] = offset_count()  # Offset for the topics.
        topics["topics_%s_read" % numbers] = keys.decode("utf-8").encode(
            "utf-16be")  # Read the topics.
        topics["padding_%s_topics" % numbers] = u16(
            0)  # Padding for the topics.

    return topics
示例#12
0
def locations_download(language_code, data):
    locations = collections.OrderedDict()
    gmaps = googlemaps.Client(key=config["google_maps_api_key"])
    """This dictionary is used to determine languages."""

    languages = {
        0: "ja",
        1: "en",
        2: "de",
        3: "fr",
        4: "es",
        5: "it",
        6: "nl",
    }

    for keys, values in list(data.items()):
        location = values[7]

        if location is not None:
            if location not in locations:
                locations[location] = [None, None, []]

            locations[location][2].append(keys)

    for name in list(locations.keys()):
        if name == "":
            continue

        uni_name = name if languages[language_code] == "ja" else unidecode(
            name
        )  # If using unidecode with Japanese, it'll translate all the characters to English

        print(uni_name)

        if name not in cities:
            try:
                read = gmaps.geocode(uni_name,
                                     language=languages[language_code])
                loc_name = read[0]["address_components"][0]["long_name"]

                if languages[language_code] == "ja":
                    loc_name = enc(loc_name)
                else:
                    loc_name = enc(unidecode(loc_name))
                """Not doing anything with these."""

                country = u8(0)
                region = u8(0)
                location = u16(0)
                zoom_factor = u32_littleendian(6)

                coordinates = s16(int(read[0]["geometry"]["location"]["lat"] / (360 / 65536))) + \
                                s16(int(read[0]["geometry"]["location"]["lng"] / (360 / 65536))) + \
                                country + region + location + zoom_factor
            except:
                log("There was a error downloading the location data.", "INFO")

        else:
            coordinates = binascii.unhexlify(cities[name][0] +
                                             "0000000006000000")
            loc_name = enc(cities[name][1])

        if locations[name][0] is None:
            locations[name][0] = coordinates

        if locations[name][1] is None:
            locations[name][1] = loc_name

    return locations
示例#13
0
def locations_download(
    language_code, data
):  # using Google Maps API is so much better than the crap Nintendo used for say, AP news.
    locations = {}
    gmaps = googlemaps.Client(key=config["google_maps_api_key"])

    languages = {  # corresponds to the Wii's language codes
        0: "ja",
        1: "en",
        2: "de",
        3: "fr",
        4: "es",
        5: "it",
        6: "nl",
    }

    for keys, values in list(data.items()):
        location = values[7]

        if location and location != "":
            if location not in locations:
                locations[location] = [None, None, []]

            locations[location][2].append(keys)

    for name in list(locations.keys()):
        if name == "":
            continue

        uni_name = (
            name if languages[language_code] == "ja" else unidecode(name)
        )  # if using unidecode with Japanese, it'll translate all the characters to English

        print(uni_name)

        coordinates = None

        if name not in cities:
            try:
                read = gmaps.geocode(uni_name,
                                     language=languages[language_code])
                loc_name = read[0]["address_components"][0]["long_name"]

                if languages[language_code] == "ja":
                    loc_name = enc(loc_name)
                else:
                    loc_name = enc(unidecode(loc_name))
                """Not doing anything with these."""

                country = u8(0)
                region = u8(0)
                location = u16(0)
                zoom_factor = u32_littleendian(
                    6
                )  # Nintendo used the value of 3 for states and countries but we probably don't have any articles that are just states or countries

                coordinates = (
                    s16(
                        int(read[0]["geometry"]["location"]["lat"] /
                            (360 / 65536))) + s16(
                                int(read[0]["geometry"]["location"]["lng"] /
                                    (360 / 65536))) + country + region +
                    location + zoom_factor
                )  # latitude and longitude is divided by the value of 360 (degrees of a full circle) divided by the max int for a 16-bit int
            except Exception as e:
                ex = "There was a error downloading the location data - line {}: {}".format(
                    sys.exc_info()[-1].tb_lineno, str(e))
                print(ex)
                log(ex, "INFO")

        else:
            coordinates = binascii.unhexlify(cities[name][0] +
                                             "0000000006000000")
            loc_name = enc(cities[name][1])

        if locations[name][0] is None and coordinates is not None:
            locations[name][0] = coordinates
        else:
            del locations[name]
            continue

        if locations[name][1] is None:
            locations[name][1] = loc_name

    return locations
示例#14
0
def locations_download(language_code, data):
    locations = collections.OrderedDict()
    locations_return = collections.OrderedDict()
    gmaps = googlemaps.Client(key=config["google_maps_api_key"])

    """This dictionary is used to determine languages."""

    languages = {
        0: "ja",
        1: "en",
        2: "de",
        3: "fr",
        4: "es",
        5: "it",
        6: "nl",
    }

    for keys, values in data.items():
        location = values[7]

        if location is not None:
            if location not in locations:
                locations[location] = []

            locations[location].append(keys)

    for name in locations.keys():
        read = None

        if name == "":
            continue

        uni_name = name if languages[language_code] == "ja" else unidecode(name)

        print uni_name

        if name not in cities:
            try:
                read = gmaps.geocode(uni_name, language=languages[language_code])
            except:
                log("There was a error downloading the location data.", "INFO")

        if read is None and name in cities:
            coordinates = binascii.unhexlify(cities[name][0] + "0000000006000000")
            new_name = enc(cities[name][1])

            for filenames in locations[name]:
                if new_name not in locations_return:
                    locations_return[new_name] = [coordinates, []]

                locations_return[new_name][1].append(filenames)

        elif read is not None:
            try:
                new_name = read[0]["address_components"][0]["long_name"].encode("utf-16be")

                """Not doing anything with these at this time."""

                country = u8(0)
                region = u8(0)
                location = u16(0)
                zoom_factor = u32_littleendian(6)

                coordinates = u16(int(read[0]["geometry"]["location"]["lat"] / 0.0054931640625) & 0xFFFF) + u16(int(
                    read[0]["geometry"]["location"][
                        "lng"] / 0.0054931640625) & 0xFFFF) + country + region + location + zoom_factor

                for filenames in locations[name]:
                    if new_name not in locations_return: locations_return[new_name] = [coordinates, []]

                    locations_return[new_name][1].append(filenames)
            except:
                log("There was a error downloading the location data.", "INFO")

    return locations_return
示例#15
0
def make_source_name_copyright(source_table, data):
    source_name_copyright = collections.OrderedDict()
    dictionaries.append(source_name_copyright)

    sources = []

    source_names = {}
    """Text for the copyright. Some of these I had to make up, because if you don't specify a copyright there will be a line that will be in the way in the news article."""

    copyrights = {
        "AP":
        ("Copyright %s The Associated Press. All rights reserved. This material may not be published, broadcast, rewritten or redistributed."
         % date.today().year).decode("utf-8").encode("utf-16be"),
        "Reuters":
        ("© %s Thomson Reuters. All rights reserved. Republication or redistribution of Thomson Reuters content, including by framing or similar means, is prohibited without the prior written consent of Thomson Reuters. Thomson Reuters and the Kinesis logo are trademarks of Thomson Reuters and its affiliated companies."
         % date.today().year).decode("utf-8").encode("utf-16be"),
        "AFP":
        ("All reproduction and representation rights reserved. © %s Agence France-Presse"
         % date.today().year).decode("utf-8").encode("utf-16be"),
        "AFP_French":
        ("Tous droits de reproduction et de diffusion réservés. © %s Agence France-Presse"
         % date.today().year).decode("utf-8").encode("utf-16be"),
        "ANP":
        ("All reproduction and representation rights reserved. © %s B.V. Algemeen Nederlands Persbureau ANP"
         % date.today().year).decode("utf-8").encode("utf-16be"),
        "ANSA":
        ("© %s ANSA, Tutti i diritti riservati. Testi, foto, grafica non potranno essere pubblicali, riscritti, commercializzati, distribuiti, videotrasmessi, da parte dagli tanti e del terzi in genere, in alcun modo e sotto qualsiasi forma."
         % date.today().year).decode("utf-8").encode("utf-16be"),
        "SID":
        ("Alle Rechte für die Wiedergabe, Verwertung und Darstellung reserviert. © %s SID"
         % date.today().year).decode("utf-8").encode("utf-16be"),
        "dpa":
        ("Alle Rechte für die Wiedergabe, Verwertung und Darstellung reserviert. © %s dpa"
         % date.today().year).decode("utf-8").encode("utf-16be"),
        "NU.nl":
        ("© %s Sanoma Digital The Netherlands B.V. NU - onderdeel van Sanoma Media Netherlands Group"
         % date.today().year).decode("utf-8").encode("utf-16be"),
        "Reuters_Japanese":
        ("© Copyright Reuters %s. All rights reserved. ユーザーは、自己の個人的使用及び非商用目的に限り、このサイトにおけるコンテンツの抜粋をダウンロードまたは印刷することができます。ロイターが事前に書面により承認した場合を除き、ロイター・コンテンツを再発行や再配布すること(フレーミングまたは類似の方法による場合を含む)は、明示的に禁止されています。Reutersおよび地球をデザインしたマークは、登録商標であり、全世界のロイター・グループの商標となっています。 "
         % date.today().year).decode("utf-8").encode("utf-16be"),
    }

    for article in data.values():
        if article[8] not in sources:
            if article[8] in source_names:
                source_name = source_names[article[8]]

                source_table["name_size_%s" % article[8]] = u32(
                    len(source_name))  # Size of the source name.

                source_table[
                    "name_offset_%s" %
                    article[8]] = offset_count()  # Offset for the source name.

                source_name_copyright[
                    "source_name_read_%s" %
                    article[8]] = source_name  # Read the source name.
                source_name_copyright["padding_source_name_%s" %
                                      article[8]] = u16(
                                          0)  # Padding for the source name.

            copyright = copyrights[article[8]]

            source_table["copyright_size_%s" % article[8]] = u32(
                len(copyright))  # Size of the copyright.

            source_table[
                "copyright_offset_%s" %
                article[8]] = offset_count()  # Offset for the copyright.

            source_name_copyright[
                "copyright_read_%s" %
                article[8]] = copyright  # Read the copyright.
            source_name_copyright["padding_copyright_%s" % article[8]] = u16(
                0)  # Padding for the copyright.

            sources.append(article[8])
示例#16
0
def make_source_table(header, articles_table, data):
    source_table = collections.OrderedDict()
    dictionaries.append(source_table)

    header["source_offset"] = offset_count()  # Offset for the source table.

    source_articles = []
    """These are the picture and position values."""

    source_nums = {
        "AP": [0, 1],
        "Reuters": [0, 4],
        "AFP": [4, 4],
        "AFP_French": [4, 4],
        "ANP": [0, 5],
        "ANSA": [6, 6],
        "dpa": [0, 4],
        "SID": [0, 4],
        "NU.nl": [0, 5],
        "Reuters_Japanese": [0, 4],
    }

    numbers = 0

    numbers_article = 0

    for article in data.values():
        if article[8] not in source_articles:
            source_articles.append(article[8])

            source = source_nums[article[8]]

            source_table["source_picture_%s" % article[8]] = u8(
                source[0])  # Picture for the source.
            source_table["source_position_%s" % article[8]] = u8(
                source[1])  # Position for the source.
            source_table["padding_%s" % article[8]] = u16(0)  # Padding.

            source_table["pictures_size_%s" % article[8]] = u32(
                0)  # Size of the source picture.
            source_table["pictures_offset_%s" % article[8]] = u32(
                0)  # Offset for the source picture.

            source_table["name_size_%s" % article[8]] = u32(
                0)  # Size of the source name.
            source_table["name_offset_%s" % article[8]] = u32(
                0)  # Offset for the source name.

            source_table["copyright_size_%s" % article[8]] = u32(
                0)  # Size of the copyright.
            source_table["copyright_offset_%s" % article[8]] = u32(
                0)  # Offset for the copyright.

            numbers += 1

    for article in data.values():
        numbers_article += 1

        articles_table["source_%s_number" % numbers_article] = u32(
            source_articles.index(article[8]))  # Number for the source.

    header["source_number"] = u32(
        numbers)  # Number of entries for the source table.

    return source_table