示例#1
0
    def run(self):
        """
        run metrics calculations and then  minimum spanning forest algorithm
        on inputs and write output based on configuration
        """

        # make output dir if not exists
        if not os.path.exists(self.output_directory):
            os.makedirs(self.output_directory)

        metric_config = json.load(open(
            self.config['metric_model_parameters_file']))
        # read in metrics and setup dataset_store
        demand_proj = csv_projection(self.config['demand_nodes_file'])
        target_path = os.path.join(self.output_directory, "dataset.db")
        self.store = dataset_store.create(target_path,
            self.config['demand_nodes_file'])

        metric_model = metric.getModel(self.config['metric_model'])
        metric_vbobs = self._run_metric_model(metric_model, metric_config)
        demand_nodes = self._get_demand_nodes(input_proj=demand_proj)
        existing, msf = self._build_network(demand_nodes)
        self._store_networks(msf, existing)
        metric_vbobs = self._update_metrics(metric_model, metric_vbobs)
        self._save_output(metric_vbobs, metric_config, metric_model)
示例#2
0
 def validateParameters(self):
     'Warn if parameters are missing or unknown'
     # Initialize
     scenarioInput = self.input
     pushWarning = lambda x: store.pushWarning(self.id, x)
     configurationPacks = [
         ('Metric', metric.getModel(scenarioInput['metric model name']), scenarioInput['metric configuration']),
         ('Network', network.getModel(scenarioInput['network model name']), scenarioInput['network configuration']),
     ]
     # For each configurationPack,
     for configurationName, configurationModel, configuration in configurationPacks:
         # Load default
         valueByOptionBySection = configurationModel.VariableStore().getValueByOptionBySection()
         # Make sure that the configuration has the same key hierarchy as the default
         for section, valueByOption in valueByOptionBySection.iteritems():
             if section not in configuration:
                 pushWarning('%s section missing: %s' % (configurationName, section))
                 continue
             for option in valueByOption:
                 if option not in configuration[section]:
                     pushWarning('%s option missing: %s > %s' % (configurationName, section, option))
         # Make sure that the input does not have unnecessary parameters
         for section, valueByOption in configuration.iteritems():
             if section not in valueByOptionBySection:
                 pushWarning('%s section unknown: %s' % (configurationName, section))
                 continue
             for option in valueByOption:
                 if option not in valueByOptionBySection[section]:
                     pushWarning('%s option unknown: %s > %s' % (configurationName, section, option))
示例#3
0
    def run(self):
        """
        run metrics calculations and then  minimum spanning forest algorithm
        on inputs and write output based on configuration
        """

        # make output dir if not exists
        if not os.path.exists(self.output_directory):
            os.makedirs(self.output_directory)

        metric_config = json.load(
            open(self.config['metric_model_parameters_file']))
        # read in metrics and setup dataset_store
        demand_proj = nio.read_csv_projection(self.config['demand_nodes_file'])
        target_path = os.path.join(self.output_directory, "dataset.db")
        self.store = dataset_store.create(target_path,
                                          self.config['demand_nodes_file'])

        log.info("running metric model {}".format(self.config['metric_model']))
        metric_model = metric.getModel(self.config['metric_model'])
        metric_vbobs = self._run_metric_model(metric_model, metric_config)
        demand_nodes = self._get_demand_nodes(input_proj=demand_proj)

        existing_networks = None

        if 'existing_networks' in self.config:
            existing_networks = networker_runner.load_existing_networks(
                prefix="grid-", **self.config['existing_networks'])

        network_algorithm = self.config['network_algorithm']

        min_node_count = 0
        single_network = True
        if 'network_parameters' in self.config:
            network_params = self.config['network_parameters']
            min_node_count = network_params.get('minimum_node_count', 0)
            single_network = network_params.get('single_network', True)

        header_type = VS.HEADER_TYPE_SECTION_OPTION
        if 'output_parameters' in self.config:
            output_params = self.config['output_parameters']
            header_type = output_params.get('header_type',
                                            VS.HEADER_TYPE_SECTION_OPTION)

        log.info("building network")
        msf = networker_runner.build_network(
            demand_nodes,
            existing=existing_networks,
            min_node_count=min_node_count,
            single_network=single_network,
            network_algorithm=network_algorithm,
            one_based=True)

        log.info("writing output")
        self._store_networks(msf, existing_networks)
        metric_vbobs = self._update_metrics(metric_model, metric_vbobs)
        self._save_output(metric_vbobs,
                          metric_config,
                          metric_model,
                          header_type=header_type)
    def run(self):
        """
        run metrics calculations and then  minimum spanning forest algorithm
        on inputs and write output based on configuration
        """

        # make output dir if not exists
        if not os.path.exists(self.output_directory):
            os.makedirs(self.output_directory)

        metric_config = json.load(open(
            self.config['metric_model_parameters_file']))
        # read in metrics and setup dataset_store
        demand_proj = nio.read_csv_projection(self.config['demand_nodes_file'])
        target_path = os.path.join(self.output_directory, "dataset.db")
        self.store = dataset_store.create(target_path,
                                          self.config['demand_nodes_file'])

        log.info("running metric model {}".format(self.config['metric_model']))
        metric_model = metric.getModel(self.config['metric_model'])
        metric_vbobs = self._run_metric_model(metric_model, metric_config)
        demand_nodes = self._get_demand_nodes(input_proj=demand_proj)

        existing_networks = None

        if 'existing_networks' in self.config:
            existing_networks = networker_runner.load_existing_networks(
                prefix="grid-",
                **self.config['existing_networks'])

        network_algorithm = self.config['network_algorithm']

        min_node_count = 0
        single_network = True
        if 'network_parameters' in self.config:
            network_params = self.config['network_parameters']
            min_node_count = network_params.get('minimum_node_count', 0)
            single_network = network_params.get('single_network', True)

        header_type = VS.HEADER_TYPE_SECTION_OPTION
        if 'output_parameters' in self.config:
            output_params = self.config['output_parameters']
            header_type = output_params.get('header_type',
                                            VS.HEADER_TYPE_SECTION_OPTION)

        log.info("building network")
        msf = networker_runner.build_network(
            demand_nodes,
            existing=existing_networks,
            min_node_count=min_node_count,
            single_network=single_network,
            network_algorithm=network_algorithm,
            one_based=True)

        log.info("writing output")
        self._store_networks(msf, existing_networks)
        metric_vbobs = self._update_metrics(metric_model, metric_vbobs)
        self._save_output(metric_vbobs, metric_config, metric_model,
                          header_type=header_type)
示例#5
0
 def run(self):
     # Prepare
     scenarioInput = self.input
     scenarioFolder = self.getFolder()
     expandPath = lambda x: os.path.join(scenarioFolder, x)
     # Register demographics
     print 'Registering demographics'
     nodesPath = expandPath('nodes')
     targetPath = self.getDatasetPath()
     sourcePath = expandPath(scenarioInput['demographic file name'])
     datasetStore = dataset_store.create(targetPath, sourcePath)
     datasetStore.saveNodesSHP(nodesPath)
     datasetStore.saveNodesCSV(nodesPath)
     # Apply metric
     print 'Applying metric'
     metricModel = metric.getModel(scenarioInput['metric model name'])
     metricConfiguration = scenarioInput['metric configuration']
     metricValueByOptionBySection = datasetStore.applyMetric(metricModel, metricConfiguration)
     # Build network
     print 'Building network'
     networkModel = network.getModel(scenarioInput['network model name'])
     networkConfiguration = scenarioInput['network configuration']
     networkValueByOptionBySection = datasetStore.buildNetwork(networkModel, networkConfiguration)
     # Update metric
     print 'Updating metric'
     metricValueByOptionBySection = datasetStore.updateMetric(metricModel, metricValueByOptionBySection)
     # Save output
     print 'Saving output'
     metric.saveMetricsCSV(expandPath('metrics-global'), metricModel, metricValueByOptionBySection)
     datasetStore.saveMetricsCSV(expandPath('metrics-local'), metricModel)
     datasetStore.saveSegmentsSHP(expandPath('networks-existing'), is_existing=True)
     datasetStore.saveSegmentsSHP(expandPath('networks-proposed'), is_existing=False)
     # Bundle
     store.zipFolder(scenarioFolder + '.zip', scenarioFolder)
     # Validate
     self.validateParameters()
     # Save output
     self.output = {
         'variables': { 
             'node': dict((str(x.id), dict(input=x.input, output=x.output)) for x in datasetStore.cycleNodes()),
             'metric': metricValueByOptionBySection,
             'network': networkValueByOptionBySection,
         }, 
         'statistics': { 
             'node': datasetStore.getNodeStatistics(), 
             'metric': datasetStore.getMetricStatistics(), 
             'network': datasetStore.getNetworkStatistics(), 
         }, 
         'warnings': store.popWarnings(self.id),
     }
     # Commit
     Session.commit()
    def test_scenarioRun(self):
        'for now, just make sure it runs'
        sourcePath = os.path.join(inputDataPath, "sample_demand_nodes.csv")
        # make output dir if not exists
        if not os.path.exists(outputDataPath):
            os.makedirs(outputDataPath)

        targetPath = os.path.join(outputDataPath, "dataset.db")
        datasetStore = dataset_store.create(targetPath, sourcePath)
        
        """
        // Sample Model Parameter JSON
        metricValueByOptionBySection = {
            'demand (household)': 
                {'household unit demand per household per year': 50}
        }
        """
        metricConfigPath = os.path.join(baseDirPath, "sample_metric_params.json")
        metricConfiguration = json.load(open(metricConfigPath, 'r'))

        """
        // Sample Model Parameter JSON
        networkValueByOptionBySection = {
            'algorithm': 
                {'minimum node count per subnetwork': 2}
        }
        """
        networkConfigPath = os.path.join(baseDirPath, "network_params.json")
        networkConfiguration = json.load(open(networkConfigPath, 'r'))

        # Run metric model
        metricModel = metric.getModel("mvMax5")
        metricValueByOptionBySection = datasetStore.applyMetric(metricModel, metricConfiguration)

        # Now that metrics (mvMax in particular) have been calculated
        # we can build the network
        networkModel = network.getModel("modKruskal")
        networkValueByOptionBySection = datasetStore.buildNetwork(networkModel, networkConfiguration)

        # Now that the network's been built (and the electrification option 
        # is chosen) run the aggregate calculations
        metricValueByOptionBySection = datasetStore.updateMetric(metricModel, metricValueByOptionBySection)

        metric.saveMetricsConfigurationCSV(os.path.join(outputDataPath, 'metrics-job-input'), metricConfiguration)
        metric.saveMetricsCSV(os.path.join(outputDataPath, 'metrics-global'), metricModel, metricValueByOptionBySection)
        datasetStore.saveMetricsCSV(os.path.join(outputDataPath, 'metrics-local'), metricModel)
        datasetStore.saveSegmentsSHP(os.path.join(outputDataPath, 'networks-proposed'), is_existing=False)
示例#7
0
 def clone(self, scenarioID):
     'Show form to create a new item based on datasets and parameters from existing scenario'
     # Make sure the user is logged in
     personID = h.getPersonID()
     if not personID:
         return redirect(url('person_login', targetURL=h.encodeURL(request.path)))
     # Make sure the user has access to the scenario
     scenario = Session.query(model.Scenario).filter(model.getScopeFilter(personID)).filter(model.Scenario.id==scenarioID).first()
     if not scenario:
         return redirect(url('new_scenario'))
     # Load
     scenarioInput = scenario.input
     # Prepare
     c.scenario = scenario
     c.metricModel = metric.getModel(request.GET.get('metricModel', scenarioInput['metric model name']))
     c.metricConfiguration = scenarioInput['metric configuration']
     c.networkModel = network.getModel(request.GET.get('networkModel', scenarioInput['network model name']))
     c.networkConfiguration = scenarioInput['network configuration']
     # Return
     return render('/scenarios/new.mako')
示例#8
0
 def new(self, format='html'):
     'GET /scenarios/new: Show form to create a new item'
     # If the user is not logged in,
     if not h.isPerson():
         # Redirect to login
         return redirect(url('person_login', targetURL=h.encodeURL(h.url('new_scenario'))))
     # Make sure that the requested metric model exists
     metricModelNames = metric.getModelNames()
     metricModelName = request.GET.get('metricModel')
     if metricModelName not in metricModelNames:
         metricModelName = metricModelNames[0]
     c.metricModel = metric.getModel(metricModelName)
     c.metricConfiguration = {}
     # Make sure that the requested network model exists
     networkModelNames = network.getModelNames()
     networkModelName = request.GET.get('networkModel')
     if networkModelName not in networkModelNames:
         networkModelName = networkModelNames[0]
     c.networkModel = network.getModel(networkModelName)
     c.networkConfiguration = {}
     # Render form
     c.scenario = None
     return render('/scenarios/new.mako')
示例#9
0
# build up variable dicts
vars = []
if args.from_model:

    # check if we have the networkplanner modules available

    try:
        from np.lib import variable_store as VS
        from np.lib.variable_store import Variable as V
        from np.lib import metric
    except ImportError:
        raise ImportError(
            "Analyzing via model name requires networkplanner library")

    # import the model so that the subclasses of Variable are found
    mvModel = metric.getModel(args.model_dir_or_name)

    for var in all_subclasses(V):
        var_info = analyze_variable(var)
        var_info['dep_string'] = ";".join(var_info['dependencies'])
        vars.append(var_info)

elif os.path.isdir(args.model_dir_or_name):

    # for each file in path analyze all its variable classes
    for py_file in glob.glob(args.model_dir_or_name + "/*.py"):
        module_name = os.path.basename(py_file)[:-3]
        with open(py_file) as pyf:
            ast_module = ast.parse(pyf.read())

        for ast_node in ast_module.body:
            node_from = graph.get_node(getName(var_from, 'id'))[0]
            node_to = graph.get_node(getName(var_to[0], 'id'))[0]
            graph.add_edge(pydot.Edge(node_from, node_to))

    return graph


if __name__ == '__main__':

    if (len(sys.argv) < 4):
        sys.stderr.write("example usage:  python model_demand_dependencies.py model variable outfile [name_type]\n")
        sys.exit()

    # setup model
    model = sys.argv[1]
    variable = sys.argv[2] 
    outfile = sys.argv[3]
    nameType = "alias"
    if len(sys.argv) == 5:
        nameType = sys.argv[4]

    mvModel = metric.getModel(model)
    modelVar = util.getSubModuleFromString(mvModel, variable)
    dependencies = VS.buildOrderedDependencies(modelVar)
    graph = buildPyDotGraph(dependencies, nameType)
    
    graph.write(outfile)
    

    
示例#11
0
 def show(self, id, format='html'):
     'GET /scenarios/id: Show a specific item'
     # If the output format is not supported, 
     if format not in ['html', 'zip', 'geojson', 'json']: 
         return 'Unsupported output format: ' + format 
     try:
         id = int(id)
     except ValueError:
         return redirect(url('scenarios'))
     # Load
     personID = h.getPersonID()
     c.scenario = Session.query(model.Scenario).filter(model.Scenario.id==id).filter(model.getScopeFilter(personID)).first()
     # If user does not have access to the scenario,
     if not c.scenario:
         c.status = model.statusFailed
         if format == 'html':
             return render('/scenarios/show.mako')
         elif format == 'zip':
             return ''
         elif format == 'geojson':
             return geojson.dumps(geojson.FeatureCollection([]))
         elif format == 'json':
             return cjson.encode({})
     # If the scenario has an error,
     if c.scenario.status == model.statusFailed:
         c.traceback = c.scenario.output['traceback']
         c.status = model.statusFailed
         if format == 'html':
             return render('/scenarios/show.mako')
         elif format == 'zip':
             return forward(FileApp(c.scenario.getFolder() + '.zip'))
         elif format == 'geojson':
             return geojson.dumps(geojson.FeatureCollection([]))
         elif format == 'json':
             return c.scenario.exportJSON()
     # If the scenario has not been processed,
     if c.scenario.isQueued():
         c.status = model.statusPending
         if format == 'html':
             return render('/scenarios/show.mako')
         elif format == 'zip':
             return forward(FileApp(c.scenario.getFolder() + '.zip'))
         elif format == 'geojson':
             return geojson.dumps(geojson.FeatureCollection([]))
         elif format == 'json':
             return c.scenario.exportJSON()
     # Prepare
     c.status = model.statusDone
     c.scenarioInput = c.scenario.input
     c.scenarioOutput = c.scenario.output
     transform_point = geometry_store.get_transform_point(geometry_store.proj4LL, geometry_store.proj4SM)
     # If the user wants HTML,
     if format == 'html':
         # Render scenario
         c.metricModel = metric.getModel(c.scenarioInput['metric model name'])
         scenarioStatistics = c.scenarioOutput['statistics']
         nodeStatistics = scenarioStatistics['node']
         # Prepare map
         centerX, centerY = transform_point(nodeStatistics['mean longitude'], nodeStatistics['mean latitude'])
         box1X, box1Y = transform_point(nodeStatistics['minimum longitude'], nodeStatistics['maximum latitude'])
         box2X, box2Y = transform_point(nodeStatistics['maximum longitude'], nodeStatistics['minimum latitude'])
         # Render map
         datasetStore = c.scenario.getDataset()
         c.mapFeatures = datasetStore.exportGeoJSON(transform_point)
         c.mapCenter = '%s, %s' % (centerX, centerY)
         c.mapBox = '%s, %s, %s, %s' % (box1X, box1Y, box2X, box2Y)
         # Render nodes
         c.nodes = list(datasetStore.cycleNodes())
         c.populationQuartiles = scenarioStatistics['metric']['population quartiles']
         # Render scenarios
         c.scenarios = Session.query(model.Scenario).filter(model.getScopeFilter(personID)).filter(model.Scenario.status==model.statusDone).filter(model.Scenario.id!=c.scenario.id).order_by(model.Scenario.id.desc()).all()
         # Return
         return render('/scenarios/show.mako')
     elif format == 'zip':
         return forward(FileApp(c.scenario.getFolder() + '.zip'))
     elif format == 'geojson':
         return c.scenario.getDataset().exportGeoJSON(transform_point)
     elif format == 'json':
         return c.scenario.exportJSON(request.params.get('nodeID'))
示例#12
0
 def run(self):
     # Prepare
     scenarioInput = self.input
     scenarioFolder = self.getFolder()
     expandPath = lambda x: os.path.join(scenarioFolder, x)
     # Setup status reporting
     from time import localtime, strftime
     time_format = "%Y-%m-%d %H:%M:%S"
     
     # Register demographics
     Job.log("Registering demographics")
     print "%s Registering demographics" % strftime(time_format, localtime())
     nodesPath = expandPath('nodes')
     targetPath = self.getDatasetPath()
     sourcePath = expandPath(scenarioInput['demographic file name'])
     datasetStore = dataset_store.create(targetPath, sourcePath)
     datasetStore.saveNodesSHP(nodesPath)
     datasetStore.saveNodesCSV(nodesPath)
     # Apply metric
     Job.log("Applying metric")
     print "%s Applying metric" % strftime(time_format, localtime())
     metricModel = metric.getModel(scenarioInput['metric model name'])
     metricConfiguration = scenarioInput['metric configuration']
     metricValueByOptionBySection = datasetStore.applyMetric(metricModel, metricConfiguration)
     # Build network
     Job.log("Building network")
     print "%s Building network" % strftime(time_format, localtime())
     networkModel = network.getModel(scenarioInput['network model name'])
     networkConfiguration = scenarioInput['network configuration']
     networkValueByOptionBySection = datasetStore.buildNetwork(networkModel, networkConfiguration, jobLogger=Job)
     # Update metric
     Job.log("Updating metric")
     print "%s Updating metric" % strftime(time_format, localtime())
     metricValueByOptionBySection = datasetStore.updateMetric(metricModel, metricValueByOptionBySection)
     # Save output
     Job.log("Saving output")
     print "%s Saving output" % strftime(time_format, localtime())
     metric.saveMetricsConfigurationCSV(expandPath('metrics-job-input'), metricConfiguration)
     metric.saveMetricsCSV(expandPath('metrics-global'), metricModel, metricValueByOptionBySection)
     datasetStore.saveMetricsCSV(expandPath('metrics-local'), metricModel)
     datasetStore.saveSegmentsSHP(expandPath('networks-existing'), is_existing=True)
     datasetStore.saveSegmentsSHP(expandPath('networks-proposed'), is_existing=False)
     # Bundle
     store.zipFolder(scenarioFolder + '.zip', scenarioFolder)
     # Validate
     self.validateParameters()
     # Save output
     self.output = {
         'variables': { 
             'node': dict((str(x.id), dict(input=x.input, output=x.output)) for x in datasetStore.cycleNodes()),
             'metric': metricValueByOptionBySection,
             'network': networkValueByOptionBySection,
         }, 
         'statistics': { 
             'node': datasetStore.getNodeStatistics(), 
             'metric': datasetStore.getMetricStatistics(), 
             'network': datasetStore.getNetworkStatistics(), 
         }, 
         'warnings': store.popWarnings(self.id),
     }
     # Commit
     Session.commit()
示例#13
0
            # Output code
            lines.append('\n::\n\n%s\n\n\n' % '\n'.join('    ' + x for x in inspect.getsource(variable).splitlines()))
        # For each variable sorted by option,
        for variable in sorted(variables, key=lambda x: x.option):
            # Append long alias, short alias, units
            rows.append((':ref:`%s > %s <%s>`' % (variable.section, variable.option, variable_store.formatLabel(variable)), ' '.join(variable.aliases or []), variable.units))
    # Return
    return roots, lines, rows


# If we are running the script from the command-line,
if __name__ == '__main__':
    # For each metric model,
    for metricModelName in metric.getModelNames():
        # Load metric model
        metricModel = metric.getModel(metricModelName)
        metricRoots, metricLines, metricRows = generateDocumentation(metricModel)
        # Save and close
        referenceFile = open(os.path.join(script_process.basePath, 'docs/metric-%s.rst' % metricModelName), 'wt')
        referenceFile.write(formatHeader('Metric Model %s' % metricModelName, '='))
        for root in sorted(metricRoots, key=lambda x: metricModel.roots.index(x)):
            referenceFile.write('- :ref:`%s`\n' % variable_store.formatLabel(root))
        referenceFile.write('\n\n' + 'You can override the value of any variable in the model on a node-by-node basis.  To perform a node-level override, use the aliases in the following table as additional columns in your spreadsheet or fields in your shapefile.  Both long and short aliases are recognized.')
        referenceFile.write('\n\n' + formatTable(['Long alias', 'Short alias', 'Units'], metricRows))
        referenceFile.write('\n\n' + '\n'.join(metricLines))
        referenceFile.close()
    # For each network model,
    for networkModelName in network.getModelNames():
        # Load network model
        networkModel = network.getModel(networkModelName)
        networkRoots, networkLines, networkRows = generateDocumentation(networkModel)
示例#14
0
                        default=VS.HEADER_TYPE_ALIAS,
                        help="the output file header field name type")                       
 
                       
    args = parser.parse_args()

    # make output dir if not exists
    outputDataPath = args.output_path
    if not os.path.exists(outputDataPath):
        os.makedirs(outputDataPath)

    targetPath = os.path.join(outputDataPath, "dataset.db")
    datasetStore = dataset_store.create(targetPath, args.input_nodes_file)

    # setup models
    metricModel = metric.getModel(args.metric_model_name)
    metricConfiguration = json.load(args.metric_model_params)
    networkModel = network.getModel(args.network_model_name)
    networkConfiguration = json.load(args.network_model_params)

    # Run metric model
    metricValueByOptionBySection = datasetStore.applyMetric(metricModel, metricConfiguration)

    # Now that metrics (mvMax in particular) have been calculated
    # we can build the network
    networkValueByOptionBySection = datasetStore.buildNetwork(networkModel, networkConfiguration)

    # Now that the network's been built (and the electrification option 
    # is chosen) run the aggregate calculations
    metricValueByOptionBySection = datasetStore.updateMetric(metricModel, metricValueByOptionBySection)

# If the user is running the script from the command-line,
if __name__ == '__main__':
    # Connect (get config and setup model from appropriate DB)
    configuration = script_process.connect()

    # get ids from stdin into a list
    ids = []
    for id in sys.stdin:
        ids.append(int(id))

    # required to know where scenarios dir is
    config['storage_path'] = configuration.get('app:main', 'storage_path')

       # Iterate through scenarios
    scenarios = Session.query(model.Scenario).\
            filter(and_(model.Scenario.id.in_(ids), 
                   (model.Scenario.status == model.statusDone))).\
            order_by(model.Scenario.id)

    for scenario in scenarios:
        scenarioFolder = scenario.getDatasetPath()
         
        datasetPath = store.replaceFileExtension(scenarioFolder, 'db')
        ds = dataset_store.load(datasetPath)
        metricModel = metric.getModel(scenario.input['metric model name'])
        vs = metricModel.VariableStore()
        specialSaveMetricsCSV(ds, "metrics-local-%s.csv" % scenario.id, metricModel)