Example #1
0
def main():
  """Processes input files and outputs results in specified format.
  """
  # Argument parsing.
  parser = argparse.ArgumentParser(
      description='Display translation status by app and language.')
  parser.add_argument('--key_file', default='json' + os.path.sep + 'keys.json',
                      help='file with complete list of keys.')
  parser.add_argument('--output', default='text', choices=['text', 'html'],
                      help='output format')
  parser.add_argument('--verbose', action='store_true', default=False,
                      help='whether to indicate which messages were translated')
  parser.add_argument('--app', default=None, choices=APPS,
                      help='if set, only consider the specified app (prefix).')
  parser.add_argument('lang_files', nargs='+',
                      help='names of JSON files to examine')
  args = parser.parse_args()

  # Read in JSON files.
  messages = {}  # A dictionary of dictionaries.
  messages[TOTAL] = read_json_file(args.key_file)
  for lang_file in args.lang_files:
    prefix = get_prefix(os.path.split(lang_file)[1])
    # Skip non-language files.
    if prefix not in ['qqq', 'keys']:
      messages[prefix] = read_json_file(lang_file)

  # Output results.
  if args.output == 'text':
    output_as_text(messages, args.verbose)
  elif args.output == 'html':
    output_as_html(messages, args.verbose)
  else:
    print('No output?!')
def _process_file(path_to_json, target_lang, key_dict):
    """Creates an .xlf file corresponding to the specified .json input file.

    The name of the input file must be target_lang followed by '.json'.
    The name of the output file will be target_lang followed by '.js'.

    Args:
        path_to_json: Path to the directory of xx.json files.
        target_lang: A IETF language code (RFC 4646), such as 'es' or 'pt-br'.
        key_dict: Dictionary mapping Blockly keys (e.g., Maze.turnLeft) to
            Closure keys (hash numbers).

    Raises:
        IOError: An I/O error occurred with an input or output file.
        InputError: Input JSON could not be parsed.
        KeyError: Key found in input file but not in key file.
    """
    keyfile = os.path.join(path_to_json, target_lang + '.json')
    j = read_json_file(keyfile)
    out_file = _create_xlf(target_lang)
    for key in j:
        if key != '@metadata':
            try:
                identifier = key_dict[key]
            except KeyError, e:
                print('Key "%s" is in %s but not in %s' %
                      (key, keyfile, args.key_file))
                raise e
            target = j.get(key)
            out_file.write(u"""
      <trans-unit id="{0}" datatype="html">
        <target>{1}</target>
      </trans-unit>""".format(identifier, target))
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("json_file", help=".json file containing the lab data")
    parser.add_argument("--outfolder",
                        "-o",
                        help="output folder",
                        default="output")
    parser.add_argument("--attribute-x",
                        "-ax",
                        help="attribute to use in x axis",
                        default="max_abstraction_states")
    parser.add_argument("--attribute-y",
                        "-ay",
                        help="attribute to use in y axis",
                        default="computation_time")
    parser.add_argument("--unsolvable-only",
                        "-u",
                        help="only count the unsolvable instances",
                        dest='unsolvable_only',
                        action='store_true')
    parser.add_argument("--log-x",
                        "-lx",
                        help='use log scaling on x axis',
                        dest='log_x',
                        action='store_true')
    parser.add_argument("--log-y",
                        "-ly",
                        help='use log scaling on y axis',
                        dest='log_y',
                        action='store_true')
    parser.add_argument(
        "--filter",
        "-f",
        help="filter the intersection domains and problems only",
        dest='filter',
        action='store_true')
    # parser.add_argument("--latex", "-ltx", help="print the data in latex", dest='latex',
    # action='store_true')
    # parser.add_argument('--order', '-ord',
    # help='space separated string, the order of the algorithm column shown in the table',
    # type=str)
    # parser.set_defaults(log=False, unsolvable_only=False, latex=False)
    parser.set_defaults(log_x=False, log_y=False, unsolvable_only=False)
    args = parser.parse_args()
    # if args.order is not None:
    #   args.order = args.order.split(' ')

    data, problems = read_json_file(args.json_file, args.filter,
                                    args.unsolvable_only)
    if not check_attribute_exists(data, args.attribute_x):
        print('Attribute', args.attribute_x, 'does not exist')
        return
    if not check_attribute_exists(data, args.attribute_y):
        print('Attribute', args.attribute_y, 'does not exist')
        return
    create_dirs_if_necessary(data, args.outfolder)
    data = get_graph_data_per_problem(data, args.attribute_x, args.attribute_y)
    create_graph_from_plot_data(data, args.outfolder, args.attribute_x,
                                args.attribute_y, args.log_x, args.log_y)
Example #4
0
def save_json_wikidata_author_data_dir(inputdir):

    neo_db = Neo4jManager()
    json_wikidata_author_data_label = neo_db.create_label(JSON_WIKIDATA_AUTHOR_DATA_LABEL)
    for (dirpath, dirnames, filenames) in walk(inputdir):
        for filename in filenames:
            data = common.read_json_file(dirpath + common.SLASH + filename)
            neo_db.save_json_wikidata_author_data(json_wikidata_author_data_label, data, filename.replace(common.JSON_EXT,''))
Example #5
0
def load_constants(filename):
  """Read in constants file, which must be output in every language."""
  constant_defs = read_json_file(filename);
  constants_text = '\n'
  for key in constant_defs:
    value = constant_defs[key]
    value = value.replace('"', '\\"')
    constants_text += '\nBlockly.Msg.{0} = \"{1}\";'.format(key, value)  
  return constants_text
Example #6
0
def load_constants(filename):
    """Read in constants file, which must be output in every language."""
    constant_defs = read_json_file(filename)
    constants_text = '\n'
    for key, val in constant_defs.items():
        value = val
        value = value.replace('"', '\\"')
        constants_text += u'\n/** @export */ Blockly.Msg.{0} = \"{1}\";'.format(
            key, value)
    return constants_text
Example #7
0
def save_json_wikidata_author_data_dir(inputdir):

    neo_db = Neo4jManager()
    json_wikidata_author_data_label = neo_db.create_label(JSON_WIKIDATA_AUTHOR_DATA_LABEL)
    for (dirpath, dirnames, filenames) in walk(inputdir):
        for filename in filenames:
            data = common.read_json_file(dirpath + common.SLASH + filename)
            neo_db.save_json_wikidata_author_data(
                json_wikidata_author_data_label, data, filename.replace(common.JSON_EXT, "")
            )
Example #8
0
def summarize_compositions():

    with codecs.open(SUMMARY_COMPOSITIONS_FILE, 'w') as csvfile:
        writer = csv.DictWriter(csvfile, delimiter=';', fieldnames=composition_fieldnames, lineterminator='\n')
        writer.writeheader()

        for inputfile in glob.glob(FREEBASE_COMPOSITIONS_DIR + common.SLASH + '*'):
            print inputfile
            compositions_content_json = common.read_json_file(inputfile)
            #compositions_content_json = json.loads(compositions_content)
            composition_list = compositions_content_json['result'][0]['compositions']
            entry = build_composition_entry(inputfile.split('\\')[-1], len(composition_list))
            writer.writerow(entry)
Example #9
0
def main():
    """Processes input files and outputs results in specified format.
  """
    # Argument parsing.
    parser = argparse.ArgumentParser(description="Display translation status by app and language.")
    parser.add_argument(
        "--key_file", default="json" + os.path.sep + "keys.json", help="file with complete list of keys."
    )
    parser.add_argument("--output", default="text", choices=["text", "html", "csv"], help="output format")
    parser.add_argument(
        "--verbose",
        action="store_true",
        default=False,
        help="whether to indicate which messages were translated " "(only used in text and html output modes)",
    )
    parser.add_argument("--app", default=None, choices=APPS, help="if set, only consider the specified app (prefix).")
    parser.add_argument("lang_files", nargs="+", help="names of JSON files to examine")
    args = parser.parse_args()
    apps = [args.app] if args.app else APPS

    # Read in JSON files.
    messages = {}  # A dictionary of dictionaries.
    messages[TOTAL] = read_json_file(args.key_file)
    for lang_file in args.lang_files:
        prefix = get_prefix(os.path.split(lang_file)[1])
        # Skip non-language files.
        if prefix not in ["qqq", "keys"]:
            messages[prefix] = read_json_file(lang_file)

    # Output results.
    if args.output == "text":
        output_as_text(messages, apps, args.verbose)
    elif args.output == "html":
        output_as_html(messages, apps, args.verbose)
    elif args.output == "csv":
        output_as_csv(messages, apps)
    else:
        print ("No output?!")
Example #10
0
def analyze_compositions():

    with codecs.open(AUTHOR_COMPOSITIONS_FILE, 'w') as csvfile:
        writer = csv.DictWriter(csvfile,
                                delimiter=';',
                                fieldnames=author_composition_fieldnames,
                                lineterminator='\n')
        writer.writeheader()

        for inputfile in glob.glob(FREEBASE_COMPOSITIONS_DIR + common.SLASH +
                                   '*'):
            print inputfile
            compositions_content_json = common.read_json_file(inputfile)
            name = compositions_content_json['result'][0]['name']
            composition_json_list = compositions_content_json['result'][0][
                'compositions']
            composition_list = get_composition_string_list_from_json_list(
                composition_json_list)

            if len(composition_list) > 0:
                #                parent = assign_parent(composition_list[0])

                parent = ''
                for index, composition in enumerate(composition_list):
                    main = composition
                    if index == 0:
                        parent = assign_parent(composition_list[0])
                    else:
                        #if parent not in composition:
                        if not composition.startswith(parent):
                            parent = assign_parent(composition)
                        else:
                            parent_new = common.find_common_substring(
                                parent, composition_list[index - 1])
                            #parent_new = common.find_common_parent(parent,composition_list[index-1])
                            # parent ending must be either ' ' or ','
                            if parent_new != '':
                                print 'parent:', parent, 'parent_new:', parent_new, 'composition:', composition
                                if (len(parent_new) <= len(composition)
                                    and composition[len(parent_new)-1] != ' ' \
                                    and composition[len(parent_new)-1] != ','):
                                    parent_new = composition
                                parent = parent_new
                    entry = build_author_composition_entry(
                        common.toByteStr(name), composition, parent, main)
                    writer.writerow(entry)
Example #11
0
def summarize_compositions():

    with codecs.open(SUMMARY_COMPOSITIONS_FILE, 'w') as csvfile:
        writer = csv.DictWriter(csvfile,
                                delimiter=';',
                                fieldnames=composition_fieldnames,
                                lineterminator='\n')
        writer.writeheader()

        for inputfile in glob.glob(FREEBASE_COMPOSITIONS_DIR + common.SLASH +
                                   '*'):
            print inputfile
            compositions_content_json = common.read_json_file(inputfile)
            #compositions_content_json = json.loads(compositions_content)
            composition_list = compositions_content_json['result'][0][
                'compositions']
            entry = build_composition_entry(
                inputfile.split('\\')[-1], len(composition_list))
            writer.writerow(entry)
Example #12
0
def analyze_compositions():

    with codecs.open(AUTHOR_COMPOSITIONS_FILE, 'w') as csvfile:
        writer = csv.DictWriter(csvfile, delimiter=';', fieldnames=author_composition_fieldnames, lineterminator='\n')
        writer.writeheader()

        for inputfile in glob.glob(FREEBASE_COMPOSITIONS_DIR + common.SLASH + '*'):
            print inputfile
            compositions_content_json = common.read_json_file(inputfile)
            name = compositions_content_json['result'][0]['name']
            composition_json_list = compositions_content_json['result'][0]['compositions']
            composition_list = get_composition_string_list_from_json_list(composition_json_list)

            if len(composition_list) > 0:
#                parent = assign_parent(composition_list[0])

                parent = ''
                for index, composition in enumerate(composition_list):
                    main = composition
                    if index == 0:
                        parent = assign_parent(composition_list[0])
                    else:
                        #if parent not in composition:
                        if not composition.startswith(parent):
                            parent = assign_parent(composition)
                        else:
                            parent_new = common.find_common_substring(parent,composition_list[index-1])
                            #parent_new = common.find_common_parent(parent,composition_list[index-1])
                            # parent ending must be either ' ' or ','
                            if parent_new != '':
                                print 'parent:', parent, 'parent_new:', parent_new, 'composition:', composition
                                if (len(parent_new) <= len(composition)
                                    and composition[len(parent_new)-1] != ' ' \
                                    and composition[len(parent_new)-1] != ','):
                                    parent_new = composition
                                parent = parent_new
                    entry = build_author_composition_entry(common.toByteStr(name), composition, parent, main)
                    writer.writerow(entry)
Example #13
0
def aggregate_compositions_data():

    with codecs.open(COMPOSITIONS_DATA_FILE, 'w') as csvfile:
        writer = csv.DictWriter(csvfile, delimiter=';', fieldnames=composition_data_fieldnames, lineterminator='\n')
        writer.writeheader()

        for inputfile in glob.glob(FREEBASE_COMPOSITIONS_DIR + common.SLASH + '*'):
            print inputfile
            compositions_content_json = common.read_json_file(inputfile)
            composition_json_list = compositions_content_json['result'][0]['compositions']
            composition_list = get_composition_id_list_from_json_list(composition_json_list)

            if len(composition_list) > 0:
                for index, composition_id in enumerate(composition_list):
                    composition_data = retrieve_compositions_data(composition_id)
                    if composition_data:
                        try:
                            mid = composition_data['result'][0]['mid']
                            name = composition_data['result'][0]['name']
                            entry = build_composition_data_entry(composition_id, mid, common.toByteStr(name))
                            writer.writerow(entry)
                        except:
                            print 'Composition values mid and/or name is empty.'
def main():
    """Generate .js files defining Blockly core and language messages."""

    # Process command-line arguments.
    parser = argparse.ArgumentParser(description="Convert JSON files to JS.")
    parser.add_argument("--source_lang", default="en", help="ISO 639-1 source language code")
    parser.add_argument(
        "--source_lang_file", default=os.path.join("json", "en.json"), help="Path to .json file for source language"
    )
    parser.add_argument(
        "--source_synonym_file",
        default=os.path.join("json", "synonyms.json"),
        help="Path to .json file with synonym definitions",
    )
    parser.add_argument("--output_dir", default="js/", help="relative directory for output files")
    parser.add_argument("--key_file", default="keys.json", help="relative path to input keys file")
    parser.add_argument("--quiet", action="store_true", default=False, help="do not write anything to standard output")
    parser.add_argument("files", nargs="+", help="input files")
    args = parser.parse_args()
    if not args.output_dir.endswith(os.path.sep):
        args.output_dir += os.path.sep

    # Read in source language .json file, which provides any values missing
    # in target languages' .json files.
    source_defs = read_json_file(os.path.join(os.curdir, args.source_lang_file))
    # Make sure the source file doesn't contain a newline or carriage return.
    for key, value in source_defs.items():
        if _NEWLINE_PATTERN.search(value):
            print("ERROR: definition of {0} in {1} contained a newline character.".format(key, args.source_lang_file))
            sys.exit(1)
    sorted_keys = source_defs.keys()
    sorted_keys.sort()

    # Read in synonyms file, which must be output in every language.
    synonym_defs = read_json_file(os.path.join(os.curdir, args.source_synonym_file))
    synonym_text = "\n".join(
        ["Blockscad.Msg.{0} = Blockscad.Msg.{1};".format(key, synonym_defs[key]) for key in synonym_defs]
    )

    # Create each output file.
    for arg_file in args.files:
        (_, filename) = os.path.split(arg_file)
        target_lang = filename[: filename.index(".")]
        if target_lang not in ("qqq", "keys", "synonyms"):
            target_defs = read_json_file(os.path.join(os.curdir, arg_file))

            # Verify that keys are 'ascii'
            bad_keys = [key for key in target_defs if not string_is_ascii(key)]
            if bad_keys:
                print(u"These keys in {0} contain non ascii characters: {1}".format(filename, ", ".join(bad_keys)))

            # If there's a '\n' or '\r', remove it and print a warning.
            for key, value in target_defs.items():
                if _NEWLINE_PATTERN.search(value):
                    print(u"WARNING: definition of {0} in {1} contained " "a newline character.".format(key, arg_file))
                    target_defs[key] = _NEWLINE_PATTERN.sub(" ", value)

            # Output file.
            outname = os.path.join(os.curdir, args.output_dir, target_lang + ".js")
            with codecs.open(outname, "w", "utf-8") as outfile:
                outfile.write(
                    """// This file was automatically generated.  Do not modify.

'use strict';

goog.provide('Blockscad.Msg.{0}');


""".format(
                        target_lang.replace("-", ".")
                    )
                )
                # For each key in the source language file, output the target value
                # if present; otherwise, output the source language value with a
                # warning comment.
                for key in sorted_keys:
                    if key in target_defs:
                        value = target_defs[key]
                        comment = ""
                        del target_defs[key]
                    else:
                        value = source_defs[key]
                        comment = "  // untranslated"
                    value = value.replace('"', '\\"')
                    outfile.write(u'Blockscad.Msg.{0} = "{1}";{2}\n'.format(key, value, comment))

                # Announce any keys defined only for target language.
                if target_defs:
                    extra_keys = [key for key in target_defs if key not in synonym_defs]
                    synonym_keys = [key for key in target_defs if key in synonym_defs]
                    if not args.quiet:
                        if extra_keys:
                            print(u"These extra keys appeared in {0}: {1}".format(filename, ", ".join(extra_keys)))
                        if synonym_keys:
                            print(u"These synonym keys appeared in {0}: {1}".format(filename, ", ".join(synonym_keys)))

                outfile.write(synonym_text)

            if not args.quiet:
                print("Created {0}.".format(outname))
def update_i18n_messages(blockly_ios_root, blockly_web_root):
    """Updates JSON message files for all supported languages in Blockly iOS,
  using a local Blockly web repo as the source.

  Args:
    blockly_ios_root: Root directory of the Blockly iOS repo.
    blockly_web_root: Root directory of the Blockly web repo.
  """

    ios_messages_dir = path.realpath(
        path.join(blockly_ios_root, "Resources/Localized/Messages"))
    web_json_dir = path.realpath(path.join(blockly_web_root, "msg/json"))
    ios_constants_file_name = "bky_constants.json"
    ios_messages_file_name = "bky_messages.json"
    ios_synonyms_file_name = "bky_synonyms.json"

    # Copy constants and synonyms directly from web repo to iOS dir
    shutil.copyfile(path.join(web_json_dir, "constants.json"),
                    path.join(ios_messages_dir, ios_constants_file_name))
    shutil.copyfile(path.join(web_json_dir, "synonyms.json"),
                    path.join(ios_messages_dir, ios_synonyms_file_name))

    # The English JSON file contains all possible messages in Blockly.
    # Use it as a basis for re-building all other localized JSON files
    # (whose original files may not contain translations for all keys).
    all_messages = read_json_file(path.join(web_json_dir, "en.json"))

    # Get list of JSON files from web repo
    json_files = []
    for (dirpath, dirnames, filenames) in walk(web_json_dir):
        json_files.extend(filenames)
        break

    # Create corresponding JSON files in iOS for those web files
    for json_file in json_files:
        if (not json_file.endswith(".json") or json_file
                in ["qqq.json", "constants.json", "synonyms.json"]):
            # Ignore these files.
            continue

        # Check if localization has been set up for this language in Blockly iOS
        language_code = mapped_language_code(json_file.replace(".json", ""))
        ios_language_dir = path.join(ios_messages_dir,
                                     language_code + ".lproj")

        if language_code in [
                "ab", "ba", "bcc", "diq", "hrx", "ia", "lki", "oc", "pms",
                "sc", "sd", "shn", "tcy", "tl", "tlh"
        ]:
            print """Skipping "{0}", which is an unsupported language code \
in iOS.""".format(language_code)
            continue
        elif not path.exists(ios_language_dir):
            # Skip this language since it needs to be setup in the iOS library first.
            print """[WARNING] Skipping "{0}" since its localization hasn't been set \
up in Blockly iOS.
To fix: In Xcode, go to Project Settings > Info > Localizations, and add \
localization for "{1}".""".format(json_file, language_code)
            continue

        # Create a JSON dictionary that starts with all messages as a base.
        all_localized_messages = all_messages.copy()

        # Now overwrite those messages with any found for this language.
        web_json_path = path.join(web_json_dir, json_file)
        all_localized_messages.update(read_json_file(web_json_path))

        # Output JSON dictionary to iOS file
        ios_json_path = path.join(ios_language_dir, ios_messages_file_name)
        with open(ios_json_path, "w") as outfile:
            json.dump(all_localized_messages,
                      outfile,
                      sort_keys=True,
                      indent=2,
                      separators=(',', ': '))

    print "Finished updating localization files."
Example #16
0
File: main.py Project: hnmx4/PeNRec
# -*- coding:utf-8 -*-

import modeling
import numpy as np
import matplotlib.pyplot as plot
import pprint

from os.path import join, abspath, dirname
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from common import read_json_file

model = modeling.create_word2vec_model()

nouns = read_json_file('nouns')

features = np.empty([0, 200], float)
for noun in nouns:
    features = np.append(features,
                         np.array(model[noun])[np.newaxis, :],
                         axis=0)

# PCA
pca = PCA(n_components=2)
pc_features = pca.fit_transform(features)

# K-means
kmeans = KMeans(n_clusters=6, random_state=10).fit(pc_features)

labels = kmeans.labels_
cluster = {noun: label for label, noun in zip(labels, nouns)}
Example #17
0
def main():
    """Generate .js files defining Blockly core and language messages."""

    # Process command-line arguments.
    parser = argparse.ArgumentParser(description='Convert JSON files to JS.')
    parser.add_argument('--source_lang',
                        default='en',
                        help='ISO 639-1 source language code')
    parser.add_argument('--source_lang_file',
                        default=os.path.join('json', 'en.json'),
                        help='Path to .json file for source language')
    parser.add_argument('--source_synonym_file',
                        default=os.path.join('json', 'synonyms.json'),
                        help='Path to .json file with synonym definitions')
    parser.add_argument('--output_dir',
                        default='js/',
                        help='relative directory for output files')
    parser.add_argument('--key_file',
                        default='keys.json',
                        help='relative path to input keys file')
    parser.add_argument('--min_length',
                        default=30,
                        help='minimum line length (not counting last line)')
    parser.add_argument('--max_length',
                        default=50,
                        help='maximum line length (not guaranteed)')
    parser.add_argument('files', nargs='+', help='input files')
    args = parser.parse_args()
    if not args.output_dir.endswith(os.path.sep):
        args.output_dir += os.path.sep

    # Read in source language .json file, which provides any values missing
    # in target languages' .json files.
    source_defs = read_json_file(os.path.join(os.curdir,
                                              args.source_lang_file))
    sorted_keys = source_defs.keys()
    sorted_keys.sort()

    # Read in synonyms file, which must be output in every language.
    synonym_defs = read_json_file(
        os.path.join(os.curdir, args.source_synonym_file))
    synonym_text = '\n'.join([
        'Blockly.Msg.{0} = Blockly.Msg.{1};'.format(key, synonym_defs[key])
        for key in synonym_defs
    ])

    # Create each output file.
    for arg_file in args.files:
        (_, filename) = os.path.split(arg_file)
        target_lang = filename[:filename.index('.')]
        if target_lang not in ('qqq', 'keys', 'synonyms'):
            target_defs = read_json_file(os.path.join(os.curdir, arg_file))

            # Output file.
            outname = os.path.join(os.curdir, args.output_dir,
                                   target_lang + '.js')
            with codecs.open(outname, 'w', 'utf-8') as outfile:
                outfile.write(
                    """// This file was automatically generated.  Do not modify.

'use strict';

goog.provide('Blockly.Msg.{0}');

goog.require('Blockly.Msg');

""".format(target_lang))
                # For each key in the source language file, output the target value
                # if present; otherwise, output the source language value with a
                # warning comment.
                for key in sorted_keys:
                    if key in target_defs:
                        value = target_defs[key]
                        comment = ''
                        del target_defs[key]
                    else:
                        value = source_defs[key]
                        comment = '  // untranslated'
                    value = value.replace('"', '\\"')
                    outfile.write(u'Blockly.Msg.{0} = "{1}";{2}\n'.format(
                        key, value, comment))

                # Announce any keys defined only for target language.
                if target_defs:
                    extra_keys = [
                        key for key in target_defs if key not in synonym_defs
                    ]
                    synonym_keys = [
                        key for key in target_defs if key in synonym_defs
                    ]
                    if extra_keys:
                        print('These extra keys appeared in {0}: {1}'.format(
                            filename, ', '.join(extra_keys)))
                    if synonym_keys:
                        print('These synonym keys appeared in {0}: {1}'.format(
                            filename, ', '.join(synonym_keys)))

                outfile.write(synonym_text)

            print('Created {0}.'.format(outname))
def main():
    """Generate .js files defining Blockly core and language messages."""

    # Process command-line arguments.
    parser = argparse.ArgumentParser(description="Convert JSON files to JS.")
    parser.add_argument("--source_lang", default="en", help="ISO 639-1 source language code")
    parser.add_argument(
        "--source_lang_file", default=os.path.join("json", "en.json"), help="Path to .json file for source language"
    )
    parser.add_argument(
        "--source_synonym_file",
        default=os.path.join("json", "synonyms.json"),
        help="Path to .json file with synonym definitions",
    )
    parser.add_argument("--output_dir", default="js/", help="relative directory for output files")
    parser.add_argument("--key_file", default="keys.json", help="relative path to input keys file")
    parser.add_argument("--min_length", default=30, help="minimum line length (not counting last line)")
    parser.add_argument("--max_length", default=50, help="maximum line length (not guaranteed)")
    parser.add_argument("files", nargs="+", help="input files")
    args = parser.parse_args()
    if not args.output_dir.endswith(os.path.sep):
        args.output_dir += os.path.sep

    # Read in source language .json file, which provides any values missing
    # in target languages' .json files.
    source_defs = read_json_file(os.path.join(os.curdir, args.source_lang_file))
    sorted_keys = source_defs.keys()
    sorted_keys.sort()

    # Read in synonyms file, which must be output in every language.
    synonym_defs = read_json_file(os.path.join(os.curdir, args.source_synonym_file))
    synonym_text = "\n".join(
        ["Blockly.Msg.{0} = Blockly.Msg.{1};".format(key, synonym_defs[key]) for key in synonym_defs]
    )

    # Create each output file.
    for arg_file in args.files:
        (_, filename) = os.path.split(arg_file)
        target_lang = filename[: filename.index(".")]
        if target_lang not in ("qqq", "keys", "synonyms"):
            target_defs = read_json_file(os.path.join(os.curdir, arg_file))

            # Output file.
            outname = os.path.join(os.curdir, args.output_dir, target_lang + ".js")
            with codecs.open(outname, "w", "utf-8") as outfile:
                outfile.write(
                    """// This file was automatically generated.  Do not modify.

'use strict';

goog.provide('Blockly.Msg.{0}');

goog.require('Blockly.Msg');

""".format(
                        target_lang
                    )
                )
                # For each key in the source language file, output the target value
                # if present; otherwise, output the source language value with a
                # warning comment.
                for key in sorted_keys:
                    if key in target_defs:
                        value = target_defs[key]
                        comment = ""
                        del target_defs[key]
                    else:
                        value = source_defs[key]
                        comment = "  // untranslated"
                    value = value.replace('"', '\\"')
                    outfile.write(u'Blockly.Msg.{0} = "{1}";{2}\n'.format(key, value, comment))

                # Announce any keys defined only for target language.
                if target_defs:
                    extra_keys = [key for key in target_defs if key not in synonym_defs]
                    synonym_keys = [key for key in target_defs if key in synonym_defs]
                    if extra_keys:
                        print("These extra keys appeared in {0}: {1}".format(filename, ", ".join(extra_keys)))
                    if synonym_keys:
                        print("These synonym keys appeared in {0}: {1}".format(filename, ", ".join(synonym_keys)))

                outfile.write(synonym_text)

            print("Created {0}.".format(outname))
def main():
  """Generate .js files defining Blockly core and language messages."""

  # Process command-line arguments.
  parser = argparse.ArgumentParser(description='Convert JSON files to JS.')
  parser.add_argument('--source_lang', default='en',
                      help='ISO 639-1 source language code')
  parser.add_argument('--source_lang_file',
                      default=os.path.join('json', 'en.json'),
                      help='Path to .json file for source language')
  parser.add_argument('--source_synonym_file',
                      default=os.path.join('json', 'synonyms.json'),
                      help='Path to .json file with synonym definitions')
  parser.add_argument('--output_dir', default='js/',
                      help='relative directory for output files')
  parser.add_argument('--key_file', default='keys.json',
                      help='relative path to input keys file')
  parser.add_argument('--quiet', action='store_true', default=False,
                      help='do not write anything to standard output')
  parser.add_argument('files', nargs='+', help='input files')
  args = parser.parse_args()
  if not args.output_dir.endswith(os.path.sep):
    args.output_dir += os.path.sep

  # Read in source language .json file, which provides any values missing
  # in target languages' .json files.
  source_defs = read_json_file(os.path.join(os.curdir, args.source_lang_file))
  # Make sure the source file doesn't contain a newline or carriage return.
  for key, value in source_defs.items():
    if _NEWLINE_PATTERN.search(value):
      print('ERROR: definition of {0} in {1} contained a newline character.'.
            format(key, args.source_lang_file))
      sys.exit(1)
  sorted_keys = source_defs.keys()
  sorted_keys.sort()

  # Read in synonyms file, which must be output in every language.
  synonym_defs = read_json_file(os.path.join(
      os.curdir, args.source_synonym_file))
  synonym_text = '\n'.join(['Blockly.Msg.{0} = Blockly.Msg.{1};'.format(
      key, synonym_defs[key]) for key in synonym_defs])

  # Create each output file.
  for arg_file in args.files:
    (_, filename) = os.path.split(arg_file)
    target_lang = filename[:filename.index('.')]
    if target_lang not in ('qqq', 'keys', 'synonyms'):
      target_defs = read_json_file(os.path.join(os.curdir, arg_file))
      # If there's a '\n' or '\r', remove it and print a warning.
      for key, value in target_defs.items():
        if _NEWLINE_PATTERN.search(value):
          print('WARNING: definition of {0} in {1} contained '
                'a newline character.'.
                format(key, arg_file))
          target_defs[key] = _NEWLINE_PATTERN.sub(' ', value)

      # Output file.
      outname = os.path.join(os.curdir, args.output_dir, target_lang + '.js')
      with codecs.open(outname, 'w', 'utf-8') as outfile:
        outfile.write(
            """// This file was automatically generated.  Do not modify.

'use strict';

goog.provide('Blockly.Msg.{0}');

goog.require('Blockly.Msg');

""".format(target_lang.replace('-', '.')))
        # For each key in the source language file, output the target value
        # if present; otherwise, output the source language value with a
        # warning comment.
        for key in sorted_keys:
          if key in target_defs:
            value = target_defs[key]
            comment = ''
            del target_defs[key]
          else:
            value = source_defs[key]
            comment = '  // untranslated'
          value = value.replace('"', '\\"')
          outfile.write(u'Blockly.Msg.{0} = "{1}";{2}\n'.format(
              key, value, comment))

        # Announce any keys defined only for target language.
        if target_defs:
          extra_keys = [key for key in target_defs if key not in synonym_defs]
          synonym_keys = [key for key in target_defs if key in synonym_defs]
          if not args.quiet:
            if extra_keys:
              print('These extra keys appeared in {0}: {1}'.format(
                  filename, ', '.join(extra_keys)))
            if synonym_keys:
              print('These synonym keys appeared in {0}: {1}'.format(
                  filename, ', '.join(synonym_keys)))

        outfile.write(synonym_text)

      if not args.quiet:
        print('Created {0}.'.format(outname))
Example #20
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("json_file", help=".json file containing the lab data")
    parser.add_argument(
        '--order',
        '-o',
        help=
        'space separated string, the order of the algorithm column shown in the table',
        type=str)
    parser.add_argument('--attribute',
                        '-a',
                        help='the attribute to be computed and shown')
    parser.add_argument(
        '--stats',
        '-s',
        help=
        'the statistics to be shown. use & for multiple stats like max&avg',
        required=True)
    parser.add_argument("--domain-detail",
                        "-d",
                        help="print the detailed per domain data",
                        dest='domain',
                        action='store_true')
    parser.add_argument("--problem-detail",
                        "-p",
                        help="print the detailed per problem data",
                        dest='problem',
                        action='store_true')
    parser.add_argument(
        "--filter",
        "-f",
        help="filter the intersection domains and problems only",
        dest='filter',
        action='store_true')
    parser.add_argument("--latex",
                        "-l",
                        help="use latex output format",
                        dest='latex',
                        action='store_true')
    parser.set_defaults(domain=False, problem=False, filter=False, latex=False)
    args = parser.parse_args()

    supported = {'max': get_max_data, 'avg': get_avg_data}
    stats = args.stats.split('&')
    for s in stats:
        if s not in supported:
            print(s + ' is not supported.')
            print('the supported stats are as follows:')
            print(list(supported.keys()))
            return

    if args.order is not None:
        args.order = args.order.split(' ')

    raw_data, problems = read_json_file(args.json_file, args.filter, False,
                                        SUITE_NONTRIVIAL_UNSOLVABLE)
    data = {}
    for s, f in supported.items():
        if s in stats:
            data[s] = f(raw_data, args.attribute)
    if args.domain:
        domain_table = get_table_detail_per_domain(data, problems)
        print_data(domain_table, stats, args.order, args.latex)
    if args.problem:
        problem_table = get_table_detail_per_problem(data)
        print_data(problem_table, stats, args.order, args.latex)
    total_table = get_table_total_per_algo(data, problems)
    print_data(total_table, stats, args.order, args.latex)