def evaluate(rometa, minim, target, purpose): """ Evaluate a RO against a minimum information model for a particular purpose with respect to a particular target resource. rometa is an ro_metadata object used to access the RO being evaluated minim is a URI-reference (relative to the RO, or absolute) of the minim description to be used. target is a URI-reference (relative to the RO, or absolute) of a target resource with respect to which the evaluation is performed. purpose is a string that identifies a purpose w.r.t. the target for which completeness will be evaluated. 'target' and 'purpose' are ued together to select a particular minim Model that will be used for the evaluation. For example, to evaluate whether an RO is sufficiently complete to support creation (a purpose) of a specified output file (a target). There are two main steps to the evaluation process: 1. locate the minim model constraint for the target resource and purpose 2. evaluate the RO against the selected model. The result indicates a summary and details of the analysis; e.g. { 'summary': [MINIM.fullySatisfies, MINIM.nominallySatisfies, MINIM.minimallySatisfies] , 'missingMust': [] , 'missingShould': [] , 'missingMay': [] , 'rouri': rouri , 'minimuri': minim , 'target': target , 'purpose': purpose , 'constrainturi': constraint['uri'] , 'modeluri': model['uri'] } """ # Locate the constraint model requirements rouri = rometa.getRoUri() minimuri = rometa.getComponentUri(minim) minimgraph = ro_minim.readMinimGraph(minimuri) constraint = ro_minim.getConstraint(minimgraph, rouri, target, purpose) cbindings = { 'targetro': constraint['targetro_actual'] , 'targetres': constraint['targetres_actual'] , 'onresource': constraint['onresource_actual'] } assert constraint != None, "Missing minim:Constraint for target %s, purpose %s"%(target, purpose) model = ro_minim.getModel(minimgraph, constraint['model']) assert model != None, "Missing minim:Model for target %s, purpose %s"%(target, purpose) requirements = ro_minim.getRequirements(minimgraph, model['uri']) # Evaluate the individual model requirements reqeval = [] for r in requirements: log.info("evaluate: %s %s"%(r['level'],str(r['uri']))) if 'datarule' in r: # @@TODO: factor to separate function? # (This is a deprecated form, as it locks the rule to a particular resource) satisfied = rometa.roManifestContains( (rouri, ORE.aggregates, r['datarule']['aggregates']) ) reqeval.append((r,satisfied,{})) log.debug("- %s: %s"%(repr((rouri, ORE.aggregates, r['datarule']['aggregates'])), satisfied)) elif 'softwarerule' in r: # @@TODO: factor to separate function cmnd = r['softwarerule']['command'] resp = r['softwarerule']['response'] log.debug("softwarerule: %s -> %s"%(cmnd,resp)) out = unicode(subprocess.check_output(cmnd.split(), stderr=subprocess.STDOUT)) exp = re.compile(resp) satisfied = exp.match(out) reqeval.append((r,satisfied,{})) log.debug("- Software %s: response %s, satisfied %s"%(cmnd, resp, "OK" if satisfied else "Fail")) elif 'contentmatchrule' in r: (satisfied, bindings) = evalContentMatch(rometa, r['contentmatchrule'], cbindings) reqeval.append((r,satisfied,bindings)) log.debug("- ContentMatch: rule %s, bindings %s, satisfied %s"% (repr(r['contentmatchrule']), repr(bindings), "OK" if satisfied else "Fail")) else: raise ValueError("Unrecognized requirement rule: %s"%repr(r.keys())) # Evaluate overall satisfaction of model sat_levels = ( { 'MUST': MINIM.minimallySatisfies , 'SHOULD': MINIM.nominallySatisfies , 'MAY': MINIM.fullySatisfies }) eval_result = ( { 'summary': [] , 'missingMust': [] , 'missingShould': [] , 'missingMay': [] , 'satisfied': [] , 'rouri': rouri , 'minimuri': minimuri , 'target': target , 'purpose': purpose , 'constrainturi': constraint['uri'] , 'modeluri': model['uri'] }) for (r, satisfied, binding) in reqeval: if satisfied: eval_result['satisfied'].append((r, binding)) else: if r['level'] == "MUST": eval_result['missingMust'].append((r, binding)) sat_levels['MUST'] = None sat_levels['SHOULD'] = None sat_levels['MAY'] = None elif r['level'] == "SHOULD": eval_result['missingShould'].append((r, binding)) sat_levels['SHOULD'] = None sat_levels['MAY'] = None elif r['level'] == "MAY": eval_result['missingMay'].append((r, binding)) sat_levels['MAY'] = None eval_result['summary'] = [ sat_levels[k] for k in sat_levels if sat_levels[k] ] return (minimgraph, eval_result)
def evaluate(rometa, minim, target, purpose): """ Evaluate a RO against a minimum information model for a particular purpose with respect to a particular target resource. rometa is an ro_metadata object used to access the RO being evaluated minim is a URI-reference (relative to the RO, or absolute) of the minim description to be used. target is a URI-reference (relative to the RO, or absolute) of a target resource with respect to which the evaluation is performed. purpose is a string that identifies a purpose w.r.t. the target for which completeness will be evaluated. 'target' and 'purpose' are ued together to select a particular minim Model that will be used for the evaluation. For example, to evaluate whether an RO is sufficiently complete to support creation (a purpose) of a specified output file (a target). There are two main steps to the evaluation process: 1. locate the minim model constraint for the target resource and purpose 2. evaluate the RO against the selected model. The function returns a pair of values (minimgraph, evalresult) minimgraph is a copy of the minim graph on which the evaluation was based. The evalresult indicates a summary and details of the analysis; e.g. { 'summary': [MINIM.fullySatisfies, MINIM.nominallySatisfies, MINIM.minimallySatisfies] , 'missingMust': [] , 'missingShould': [] , 'missingMay': [] , 'rouri': rouri , 'roid': roid , 'description': rodesc , 'minimuri': minim , 'target': target , 'targetlabel': targetlabel , 'purpose': purpose , 'constrainturi': constraint['uri'] , 'modeluri': model['uri'] } """ # Locate the constraint model requirements rouri = rometa.getRoUri() (roid, rotitle) = getIdLabel(rometa, rouri) # roid = rometa.getResourceValue(rouri, DCTERMS.identifier) # if roid == None: # roid = str(rouri) # if roid.endswith('/'): roid = roid[0:-1] # roid = roid.rpartition('/')[2] # rotitle = ( rometa.getAnnotationValue(rouri, DCTERMS.title) or # rometa.getAnnotationValue(rouri, RDFS.label) or # roid # ) rodesc = rometa.getAnnotationValue(rouri, DCTERMS.description) or rotitle minimuri = rometa.getComponentUri(minim) minimgraph = ro_minim.readMinimGraph(minimuri) constraint = ro_minim.getConstraint(minimgraph, rouri, target, purpose) assert constraint != None, "Missing minim:Constraint for target %s, purpose %s"%(target, purpose) (targetid, targetlabel) = getIdLabel(rometa, constraint['targetres_actual']) cbindings = { 'targetro': constraint['targetro_actual'] , 'targetres': constraint['targetres_actual'] , 'targetid': targetid , 'targetlabel': targetlabel } model = ro_minim.getModel(minimgraph, constraint['model']) assert model != None, "Missing minim:Model for target %s, purpose %s"%(target, purpose) requirements = ro_minim.getRequirements(minimgraph, model['uri']) # Evaluate the individual model requirements reqeval = [] # requirements = [] # SHORT_CIRCUIT ACTUAL EVALUATION FOR BENCHMARKING for r in requirements: if 'datarule' in r: # @@TODO: factor to separate function? # (This is a deprecated form, as it locks the rule to a particular resource) satisfied = rometa.roManifestContains( (rouri, ORE.aggregates, r['datarule']['aggregates']) ) reqeval.append((r,satisfied,{})) log.debug("- %s: %s"%(repr((rouri, ORE.aggregates, r['datarule']['aggregates'])), satisfied)) elif 'softwarerule' in r: # @@TODO: factor to separate function cmnd = r['softwarerule']['command'] resp = r['softwarerule']['response'] log.debug("softwarerule: %s -> %s"%(cmnd,resp)) out = unicode(subprocess.check_output(cmnd.split(), stderr=subprocess.STDOUT)) exp = re.compile(resp) satisfied = exp.match(out) reqeval.append((r,satisfied,{})) log.debug("- Software %s: response %s, satisfied %s"% (cmnd, resp, "OK" if satisfied else "Fail")) elif 'contentmatchrule' in r: (satisfied, bindings) = evalContentMatch(rometa, r['contentmatchrule'], cbindings) reqeval.append((r,satisfied,bindings)) log.debug("- ContentMatch: rule %s, bindings %s, satisfied %s"% (repr(r['contentmatchrule']), repr(bindings), "OK" if satisfied else "Fail")) elif 'querytestrule' in r: (satisfied, bindings, msg) = evalQueryTest(rometa, r['querytestrule'], cbindings) reqeval.append((r,satisfied,bindings)) log.debug("- QueryTest: rule %s, bindings %s, satisfied %s"% (repr(r['querytestrule']), repr(bindings), "OK" if satisfied else "Fail")) else: raise ValueError("Unrecognized requirement rule: %s"%repr(r.keys())) log.info("evaluate: [%s] %s %s (%s)"% (r['seq'][:10], r['level'], str(r['ruleuri']), "pass" if satisfied else "fail")) # Evaluate overall satisfaction of model eval_result = ( { 'summary': [] , 'missingMust': [] , 'missingShould': [] , 'missingMay': [] , 'satisfied': [] , 'rouri': rouri , 'roid': roid , 'title': rotitle , 'description': rodesc , 'minimuri': minimuri , 'target': target , 'targetid': targetid , 'targetlabel': targetlabel , 'purpose': purpose , 'constrainturi': constraint['uri'] , 'modeluri': model['uri'] }) # sat_levels initially assume all requirements pass, then reset levels achieved as # individual requirements are examined. sat_levels = ( { 'MUST': MINIM.minimallySatisfies , 'SHOULD': MINIM.nominallySatisfies , 'MAY': MINIM.fullySatisfies }) for (r, satisfied, binding) in reqeval: if satisfied: eval_result['satisfied'].append((r, binding)) else: if r['level'] == "MUST": eval_result['missingMust'].append((r, binding)) sat_levels['MUST'] = None sat_levels['SHOULD'] = None sat_levels['MAY'] = None elif r['level'] == "SHOULD": eval_result['missingShould'].append((r, binding)) sat_levels['SHOULD'] = None sat_levels['MAY'] = None elif r['level'] == "MAY": eval_result['missingMay'].append((r, binding)) sat_levels['MAY'] = None eval_result['summary'] = [ sat_levels[k] for k in sat_levels if sat_levels[k] ] return (minimgraph, eval_result)