예제 #1
0
def build_blockly_file(dataset, destination):
    indexed_values = {index: get_values_grouped_by_index(dataset.values, index)
                      for index in dataset.indexes}
    destination += snake_case(dataset.name) + "_blockly.js"
    code = env.get_template('blockly.js').stream(dataset=dataset, indexed_values=indexed_values)
    code.dump(destination)
    return destination
예제 #2
0
def build_data_files(dataset, destination):
    name = snake_case(dataset.name)

    json_path = name + ".js"
    json_bar_path = name + "_bar.js"

    data = list(dataset.as_dictionary_of_lists(LANGUAGE_TYPE_NAMES).values())
    # TODO: Avoid removing outliers based on list from metadata file?
    data = json.dumps(data, indent=2)
    md = env.get_template('dataset.js').stream(dataset=dataset, data=data)
    md.dump(destination + json_path)

    # Get all
    bar_data = []
    for index in dataset.indexes:
        indexed_values = get_values_grouped_by_index(dataset.values, index)
        aggregated_values = aggregate_statistics(indexed_values)
        best_indexes = get_best_indexes(aggregated_values)
        bar_data.append({
            'data': aggregated_values,
            'name': index.name,
            # 'indexes': [k.replace(',', '') for k in indexed_values.keys()],
            'indexes': [str(k) for k in indexed_values.keys()],
            'best_indexes': [str(k) for k in best_indexes],
            'pretty': index.name
        })

    bar_data = json.dumps(bar_data, indent=2)
    md = env.get_template('dataset_bar.js').stream(dataset=dataset, data=bar_data)
    md.dump(destination + json_bar_path)

    return ['dataset.js', 'dataset_bar.js']
예제 #3
0
def build(dataset, configuration):
    # Prelude
    start = time.time()

    # Processing
    dataset_name = snake_case(dataset.name)
    destination = configuration.destination.format(dataset=dataset_name,
                                                   format='visualizer')
    makedirs(destination, exist_ok=True)
    files = [*build_data_files(dataset, destination),
             build_image_files(dataset, configuration),
             build_website_file(dataset, destination, env, LANGUAGE_INFO)]

    # Wrap up
    duration = time.time() - start
    return BuildReport(dataset, 'visualizer', duration, files, 'success')
예제 #4
0
def build(dataset, configuration):
    # Prelude
    start = time.time()

    # Processing
    dataset_name = snake_case(dataset.name)
    destination = configuration.destination.format(dataset=dataset_name,
                                                   format='blockpy')
    makedirs(destination, exist_ok=True)
    files = [build_blockly_file(dataset, destination),
             build_skulpt_file(dataset, destination),
             build_complete_file(dataset, destination),
             build_slimmer_file(dataset, destination),
             build_image_files(dataset, configuration),
             build_website_file(dataset, destination, env, LANGUAGE_INFO)]

    # Wrap up
    duration = time.time() - start
    return BuildReport(dataset, 'blockpy', duration, files, 'success')
예제 #5
0
def build_skulpt_file(dataset, destination):
    destination += snake_case(dataset.name) + "_skulpt.js"
    tifa_definitions = build_tifa_definitions(dataset)
    code = env.get_template('skulpt.js').stream(dataset=dataset, tifa_definitions=tifa_definitions)
    code.dump(destination)
    return destination
예제 #6
0
def build_complete_file(dataset, destination):
    destination += snake_case(dataset.name) + "_complete.js"
    code = env.get_template('complete.js').stream(dataset=dataset)
    code.dump(destination)
    return destination
예제 #7
0
def build_slimmer_file(dataset, destination):
    destination += snake_case(dataset.name) + "_dataset.js"
    data = dataset.as_dictionary_of_lists(LANGUAGE_TYPE_NAMES)
    code = env.get_template('dataset.js').stream(dataset=dataset, data=data)
    code.dump(destination)
    return destination
예제 #8
0
def build_json_file(dataset: Dataset, destination):
    destination += snake_case(dataset.name) + ".json"
    with open(destination, 'w') as output_file:
        json.dump(dataset.nested_values, output_file, indent=2)
    return destination
예제 #9
0
파일: common.py 프로젝트: corgis-edu/corgis
def build_website_file(dataset, destination, env, language_info):
    destination += snake_case(dataset.name) + ".md"
    md = env.get_template('index.md').stream(dataset=dataset, **language_info)
    md.dump(destination)
    return destination