def show_result(request): settings = request.registry.settings data_folder = settings['data.folder'] tool_definition = settings['tool_definition'] result_id = basename(request.matchdict['id']) result_response_folder = join( data_folder, 'results', result_id, 'response') if not exists(result_response_folder): raise HTTPNotFound result_arguments, result_properties = load_result_configuration( result_response_folder) data_type_by_suffix = get_data_type_by_suffix() tool_items = get_data_items( result_arguments, tool_definition, data_type_by_suffix) result_errors = get_data_items(merge_dictionaries( result_properties.pop('standard_errors', {}), result_properties.pop('type_errors', {}), ), tool_definition, data_type_by_suffix) result_items = get_data_items( result_properties.pop('standard_outputs', {}), tool_definition, data_type_by_suffix) result_properties = get_data_items( result_properties, tool_definition, data_type_by_suffix) return merge_dictionaries( get_template_variables(tool_definition, 'tool', tool_items), get_template_variables(tool_definition, 'result', result_items), { 'data_types': set(x.data_type for x in tool_items + result_items), 'tool_id': 1, 'result_id': result_id, 'result_errors': result_errors, 'result_properties': result_properties, })
def prepare_cost_summary(cost_by_year, d, keywords, prefix): """ Summarize costs using the values provided in *d* """ discounted_cost = compute_discounted_cash_flow( cost_by_year, keywords['financing_year'], keywords['discount_rate_as_percent_of_cash_flow_per_year']) levelized_cost = divide_safely(discounted_cost, keywords[ 'discounted_consumption_in_kwh'], 0) return merge_dictionaries(d, { prefix + 'cost_by_year': cost_by_year, prefix + 'initial_cost': sum([ sum_by_suffix(d, '_raw_cost'), sum_by_suffix(d, '_installation_cost'), ]), prefix + 'recurring_fixed_cost_per_year': sum([ sum_by_suffix(d, '_maintenance_cost_per_year'), sum_by_suffix(d, '_replacement_cost_per_year'), ]), prefix + 'recurring_variable_cost_per_year': sum([ d.get('final_electricity_production_cost_per_year', 0), ]), prefix + 'discounted_cost': discounted_cost, prefix + 'levelized_cost_per_kwh_consumed': levelized_cost, })
def get_result_template_variables(result, result_folder): result_configuration = ResultConfiguration(result_folder) tool_definition = result_configuration.tool_definition result_arguments = result_configuration.result_arguments result_properties = result_configuration.result_properties tool_items = get_data_items(result_arguments, tool_definition) result_items = get_data_items( result_properties.pop('standard_outputs', {}), tool_definition) result_errors = get_data_items( merge_dictionaries(result_properties.pop('standard_errors', {}), result_properties.pop('type_errors', {})), tool_definition) result_properties = get_data_items(result_properties, tool_definition) tool = result.tool tool.title, tool.template_parts = parse_template_from( tool_definition, 'tool', tool_items) result.title, result.template_parts = parse_template_from( tool_definition, 'result', result_items) return { 'data_types': set(x.data_type for x in tool_items + result_items), 'tool': tool, 'result': result, 'result_errors': result_errors, 'result_properties': result_properties, }
def get_tool_template_variables(tool, tool_definition): tool_arguments = get_tool_arguments(tool_definition) tool_items = get_data_items(tool_arguments, tool_definition) return merge_dictionaries( get_template_variables(tool_definition, 'tool', tool_items), { 'data_types': set(x.data_type for x in tool_items), 'tool': tool, })
def get_data_type_by_suffix(data_type_by_suffix=None): d = {} x_manager = ExtensionManager('crosscompute.types') for x in x_manager.extensions: data_type = x.plugin for suffix in data_type.suffixes: d[suffix] = data_type return merge_dictionaries(d, data_type_by_suffix or {})
def show_tool(request): settings = request.registry.settings data_type_by_suffix = settings['data_type_by_suffix'] tool_definition = settings['tool_definition'] tool_arguments = get_tool_arguments(tool_definition) tool_items = get_data_items( tool_arguments, tool_definition, data_type_by_suffix) return merge_dictionaries( get_template_variables(tool_definition, 'tool', tool_items), { 'data_types': set(x.data_type for x in tool_items), 'tool_id': 1, })
def run(main_functions, g): g['infrastructure_graph'] = get_graph_from_table(g['demand_point_table']) for f in main_functions: print(f.func_name) if '_total_' in f.func_name: g.update(compute(f, g)) continue for node_id, node_d in g['infrastructure_graph'].cycle_nodes(): v = merge_dictionaries(node_d, { 'node_id': node_id, 'local_overrides': dict(g['demand_point_table'].ix[node_id])}) node_d.update(compute(f, v, g)) return g
def get_result_template_variables(result, result_folder): result_configuration = ResultConfiguration(result_folder) tool_definition = result_configuration.tool_definition result_arguments = result_configuration.result_arguments result_properties = result_configuration.result_properties tool_items = get_data_items(result_arguments, tool_definition) result_errors = get_data_items(merge_dictionaries( result_properties.pop('standard_errors', {}), result_properties.pop('type_errors', {})), tool_definition) result_items = get_data_items( result_properties.pop('standard_outputs', {}), tool_definition) result_properties = get_data_items(result_properties, tool_definition) return merge_dictionaries( get_template_variables(tool_definition, 'tool', tool_items), get_template_variables(tool_definition, 'result', result_items), { 'data_types': set(x.data_type for x in tool_items + result_items), 'tool': result.tool, 'result': result, 'result_errors': result_errors, 'result_properties': result_properties, })
def compute_raw(f, l, g=None): if not g: g = {} # If the function wants every argument, provide every argument argument_specification = inspect.getargspec(f) if argument_specification.keywords: return f(**merge_dictionaries(g, l)) # Otherwise, provide only requested arguments keywords = {} for argument_name in argument_specification.args: argument_value = l.get(argument_name, g.get(argument_name)) if argument_value is None: raise ValidationError(argument_name, 'required') keywords[argument_name] = argument_value return f(**keywords)
def run_script(tool_definition, result_arguments, result_folder, target_folder=None, environment=None): timestamp, environment = time.time(), environment or {} if 'target_folder' in tool_definition['argument_names']: y = make_folder(abspath(target_folder or join(result_folder, 'y'))) result_arguments = OrderedDict(result_arguments, target_folder=y) # Record result_configuration = ResultConfiguration(result_folder) result_configuration.save_tool_location(tool_definition) result_configuration.save_result_arguments(result_arguments, environment) # Run command_terms = split_arguments( render_command(tool_definition['command_template'], result_arguments).replace('\n', ' ')) result_properties = OrderedDict() try: with cd(tool_definition['configuration_folder']): command_process = subprocess.Popen(command_terms, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=merge_dictionaries( environment, SCRIPT_ENVIRONMENT)) except OSError: standard_output, standard_error = None, 'Command not found' else: standard_output, standard_error = [ x.rstrip().decode('utf-8') for x in command_process.communicate() ] if command_process.returncode: result_properties['return_code'] = command_process.returncode # Save result_properties.update( _process_streams(standard_output, standard_error, result_folder, tool_definition)) result_properties['execution_time_in_seconds'] = time.time() - timestamp result_configuration.save_result_properties(result_properties) result_configuration.save_result_script(tool_definition, result_arguments) if 'target_folder' in tool_definition['argument_names']: link_path(join(result_folder, 'y'), result_arguments['target_folder']) return result_properties
def estimate_external_distribution_cost(node_id, latitude, longitude, line_length_adjustment_factor, grid_mv_line_cost_per_meter_by_year, infrastructure_graph, **keywords): d = defaultdict(float) key = 'grid_mv_line_adjusted_length_in_meters' relative_keys = [ 'grid_mv_line_raw_cost_per_meter', 'grid_mv_line_installation_cost_per_meter', 'grid_mv_line_maintenance_cost_per_meter_per_year', 'grid_mv_line_replacement_cost_per_meter_per_year', 'grid_mv_line_final_cost_per_meter_per_year', 'grid_mv_line_discounted_cost_per_meter', ] # Note that node_id is real but edge_node_id can be fake for edge_node_id, edge_d in infrastructure_graph.edge[node_id].items(): edge_node_d = infrastructure_graph.node[edge_node_id] edge_node_ll = edge_node_d['latitude'], edge_node_d['longitude'] line_length = get_distance((latitude, longitude), edge_node_ll).meters if 'name' in edge_node_d: # If both nodes are real, then the computation will reappear when # we process the other node, so we halve it here line_length /= 2. line_adjusted_length = line_length * line_length_adjustment_factor # Aggregate over each node that is connected to the edge edge_d[key] = edge_d.get(key, 0) + line_adjusted_length d[key] += line_adjusted_length for relative_key in relative_keys: cost_per_meter = keywords[relative_key] x = cost_per_meter * line_adjusted_length k = relative_key.replace('_per_meter', '') edge_d[k] = edge_d.get(k, 0) + x d[k] += x line_adjusted_length = d.get(key, 0) return merge_dictionaries( d, { 'external_distribution_cost_by_year': line_adjusted_length * grid_mv_line_cost_per_meter_by_year, })
def estimate_external_distribution_cost( node_id, latitude, longitude, line_length_adjustment_factor, grid_mv_line_cost_per_meter_by_year, infrastructure_graph, **keywords): d = defaultdict(float) key = 'grid_mv_line_adjusted_length_in_meters' relative_keys = [ 'grid_mv_line_raw_cost_per_meter', 'grid_mv_line_installation_cost_per_meter', 'grid_mv_line_maintenance_cost_per_meter_per_year', 'grid_mv_line_replacement_cost_per_meter_per_year', 'grid_mv_line_final_cost_per_meter_per_year', 'grid_mv_line_discounted_cost_per_meter', ] # Note that node_id is real but edge_node_id can be fake for edge_node_id, edge_d in infrastructure_graph.edge[node_id].items(): edge_node_d = infrastructure_graph.node[edge_node_id] edge_node_ll = edge_node_d['latitude'], edge_node_d['longitude'] line_length = get_distance((latitude, longitude), edge_node_ll).meters if 'name' in edge_node_d: # If both nodes are real, then the computation will reappear when # we process the other node, so we halve it here line_length /= 2. line_adjusted_length = line_length * line_length_adjustment_factor # Aggregate over each node that is connected to the edge edge_d[key] = edge_d.get(key, 0) + line_adjusted_length d[key] += line_adjusted_length for relative_key in relative_keys: cost_per_meter = keywords[relative_key] x = cost_per_meter * line_adjusted_length k = relative_key.replace('_per_meter', '') edge_d[k] = edge_d.get(k, 0) + x d[k] += x line_adjusted_length = d.get(key, 0) return merge_dictionaries(d, { 'external_distribution_cost_by_year': line_adjusted_length * grid_mv_line_cost_per_meter_by_year, })
def run_script( tool_definition, result_arguments, result_folder, target_folder=None, environment=None): timestamp, environment = time.time(), environment or {} if 'target_folder' in tool_definition['argument_names']: y = make_folder(abspath(target_folder or join(result_folder, 'y'))) result_arguments = OrderedDict(result_arguments, target_folder=y) # Record result_configuration = ResultConfiguration(result_folder) result_configuration.save_tool_location(tool_definition) result_configuration.save_result_arguments(result_arguments, environment) # Run command_terms = split_arguments(render_command(tool_definition[ 'command_template'], result_arguments).replace('\n', ' ')) result_properties = OrderedDict() try: with cd(tool_definition['configuration_folder']): command_process = subprocess.Popen( command_terms, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=merge_dictionaries(environment, SCRIPT_ENVIRONMENT)) except OSError: standard_output, standard_error = None, 'Command not found' else: standard_output, standard_error = [x.rstrip().decode( 'utf-8') for x in command_process.communicate()] if command_process.returncode: result_properties['return_code'] = command_process.returncode # Save result_properties.update(_process_streams( standard_output, standard_error, result_folder, tool_definition)) result_properties['execution_time_in_seconds'] = time.time() - timestamp result_configuration.save_result_properties(result_properties) result_configuration.save_result_script(tool_definition, result_arguments) if 'target_folder' in tool_definition['argument_names']: link_path(join(result_folder, 'y'), result_arguments['target_folder']) return result_properties
def load_files(g): file_key_pattern = re.compile(r'(.*)_(\w+)_path') file_value_by_name = {} for k, v in g.items(): try: key_base, key_type = file_key_pattern.match(k).groups() except AttributeError: continue try: if key_type == 'text': name = key_base value = load_text(v) elif key_type == 'table': name = key_base + '_table' value = load_table(v) elif key_type == 'geotable': name = key_base + '_geotable' value = load_geotable(v) else: continue except UnsupportedFormat as e: raise ValidationError(k, e) file_value_by_name[name] = value return merge_dictionaries(g, file_value_by_name)