def summary_data(self): saved_data = self.context.saved_assessment_data.last() _fields = [] for name, title in self.summary_fields: _name = '{}_{}'.format( self.article, name ) text = t2rt(saved_data.get(_name, None)) _fields.append((title, text)) return _fields
def _get_article_data(self, region_code, descriptor, assess_data, article): phase_overall_scores = OverallScores(ARTICLE_WEIGHTS) # Get the coherence scores from regional descriptors phase_overall_scores.coherence = self.get_coherence_data( region_code, descriptor, article ) cscore_val, conclusion = phase_overall_scores.coherence['conclusion'] # score = phase_overall_scores.get_score_for_phase('coherence') coherence = ("{} ({})".format(conclusion, cscore_val), phase_overall_scores.coherence['color']) overallscore_val, score = phase_overall_scores.get_overall_score( article, is_national=False ) conclusion = self.get_conclusion(overallscore_val) overall_score_2018 = ( "{} ({})".format(conclusion, overallscore_val), self.get_color_for_score(overallscore_val) ) assessment_summary = t2rt( assess_data.get('{}_assessment_summary'.format(article)) or '-' ) progress_assessment = t2rt( assess_data.get('{}_progress'.format(article)) or '-' ) recommendations = t2rt( assess_data.get('{}_recommendations'.format(article)) or '-' ) __key_2018 = (region_code, descriptor, article, '2018') self.overall_scores[__key_2018] = overall_score_2018 reg_assess_2012 = self.get_reg_assessments_data_2012( article, region_code, descriptor ) coherence_2012 = ('-', '0') coherence_change_since_2012 = '-' if reg_assess_2012: __score = int(reg_assess_2012[0].overall_score) coherence_2012 = ("{} ({})".format(reg_assess_2012[0].conclusion, __score), self.get_color_for_score(__score)) if cscore_val == '-': cscore_val = 0 coherence_change_since_2012 = int(cscore_val - __score) __key_2012 = (region_code, descriptor, article, '2012') self.overall_scores[__key_2012] = coherence_2012 res = DESCRIPTOR_SUMMARY( assessment_summary, progress_assessment, recommendations, "", "", coherence, overall_score_2018, "", "", coherence_2012, coherence_change_since_2012 ) return res
def _get_article_data(self, region_code, country_name, descriptor, assess_data, article): """ Given the result from '_setup_phase_overall_scores' method return a DESCRIPTOR_SUMMARY namedtuple with summaries, adequacy/consistency/coherence scores, 2012 scores, conclusions, score changes :param region_code: 'BAL' :param country_name: 'Finland' :param descriptor: 'D1.1' :param assess_data: saved_assessment_data dictionary :param article: Art9 :return: DESCRIPTOR_SUMMARY namedtuple """ phase_overall_scores = OverallScores(ARTICLE_WEIGHTS) # Get the adequacy, consistency scores from national descriptors phase_overall_scores = self._setup_phase_overall_scores( phase_overall_scores, assess_data, article) # Get the coherence scores from regional descriptors phase_overall_scores.coherence = self.get_coherence_data( region_code, descriptor, article ) adequacy_score_val, conclusion = \ phase_overall_scores.adequacy['conclusion'] # score = phase_overall_scores.get_score_for_phase('adequacy') adequacy = ("{} ({})".format(conclusion, adequacy_score_val), phase_overall_scores.adequacy['color']) score_val, conclusion = phase_overall_scores.consistency['conclusion'] # score = phase_overall_scores.get_score_for_phase('consistency') consistency = ("{} ({})".format(conclusion, score_val), phase_overall_scores.consistency['color']) cscore_val, conclusion = phase_overall_scores.coherence['conclusion'] # score = phase_overall_scores.get_score_for_phase('coherence') coherence = ("{} ({})".format(conclusion, cscore_val), phase_overall_scores.coherence['color']) overallscore_val, score = phase_overall_scores.get_overall_score( article ) conclusion = self.get_conclusion(overallscore_val) overall_score_2018 = ( "{} ({})".format(conclusion, overallscore_val), self.get_color_for_score(overallscore_val) ) assessment_summary = t2rt( assess_data.get('{}_assessment_summary'.format(article)) or '-' ) progress_assessment = t2rt( assess_data.get('{}_progress'.format(article)) or '-' ) recommendations = t2rt( assess_data.get('{}_recommendations'.format(article)) or '-' ) score_2012, conclusion_2012 = self.get_assessment_data_2012( region_code, country_name, descriptor, article ) overall_score_2012 = ("{} ({})".format(conclusion_2012, score_2012), self.get_color_for_score(score_2012)) __key_2018 = (region_code, descriptor, article, '2018') __key_2012 = (region_code, descriptor, article, '2012') self.overall_scores[__key_2012] = overall_score_2012 self.overall_scores[__key_2018] = overall_score_2018 if adequacy_score_val == '-': # if adequacy is not relevant change_since_2012 = 'Not relevant (-)' else: change_since_2012 = int(adequacy_score_val - score_2012) reg_assess_2012 = self.get_reg_assessments_data_2012( article, region_code, descriptor ) coherence_2012 = ('Not scored', '0') coherence_change_since_2012 = 'Not relevant (-)' if reg_assess_2012: __score = float(reg_assess_2012[0].overall_score) coherence_2012 = ("{} ({})".format(reg_assess_2012[0].conclusion, int(__score)), self.get_color_for_score(__score)) if cscore_val == '-': cscore_val = 0 coherence_change_since_2012 = int(cscore_val - __score) res = DESCRIPTOR_SUMMARY( assessment_summary, progress_assessment, recommendations, adequacy, consistency, coherence, overall_score_2018, overall_score_2012, change_since_2012, coherence_2012, coherence_change_since_2012 ) return res
def t2rt(self, text): return t2rt(text)
def filter_assessment_data_2012(data, region_code, descriptor_criterions): """ Filters and formats the raw db data for 2012 assessment data """ gescomponents = [c.id for c in descriptor_criterions] assessments = {} criterias = [] for row in data: fields = row._fields def col(col): return row[fields.index(col)] country = col('Country') # The 2012 assessment data have the region in the country name # For example: United Kingdom (North East Atlantic) # When we display the assessment data (which we do, right now, based on # subregion), we want to match the data according to the "big" region if '(' in country: region = REGION_RE.match(country).groupdict()['region'] if region not in SUBREGIONS_TO_REGIONS[region_code]: continue summary = col('Conclusions') score = col('OverallScore') overall_ass = col('OverallAssessment') criteria = Criteria( col('AssessmentCriteria'), t2rt(col('Assessment')) ) concl_crit = t2rt(col('Criteria')) # TODO test for other countries beside LV # Condition changed because of LV report, where score is 0 # if not score: if score is None: criterias.append(criteria) elif country not in assessments: criterias.insert(0, criteria) if round(float(score)) == float(score): # score is int like 2 score = int(score) else: # score is float like 1.5 score = float(score) assessment = Assessment2012( gescomponents, criterias, summary, concl_crit, overall_ass, score, ) assessments[country] = assessment else: assessments[country].criteria.append(criteria) # if country not in assessments: # assessment = Assessment2012( # gescomponents, # [criteria], # summary, # overall_ass, # score, # ) # assessments[country] = assessment # else: # assessments[country].criteria.append(criteria) if not assessments: assessment = Assessment2012( gescomponents, criterias, summary, concl_crit, overall_ass, score, ) assessments[country] = assessment return assessments
def format_assessment_data(article, elements, questions, muids, data, descriptor, article_weights, self): """ Builds a data structure suitable for display in a template This is used to generate the assessment data overview table for 2018 TODO: this is doing too much. Need to be simplified and refactored. """ answers = [] phases = article_weights.values()[0].keys() phase_overall_scores = OverallScores(article_weights) descr_id = hasattr(descriptor, 'id') and descriptor.id or descriptor for question in questions: values = [] choices = dict(enumerate(question.answers)) q_scores = question.scores q_klass = question.klass if question.use_criteria == 'none': field_name = '{}_{}'.format(article, question.id) color_index = 0 label = 'Not filled in' v = data.get(field_name, None) if v is not None: label = choices[v] color_index = ANSWERS_COLOR_TABLE[q_scores[v]] value = (label, color_index, u'All criteria') values.append(value) else: for element in elements: field_name = '{}_{}_{}'.format(article, question.id, element.id) color_index = 0 label = u'{}: Not filled in'.format(element.title) v = data.get(field_name, None) if v is not None: label = u'{}: {}'.format(element.title, choices[v]) try: color_index = ANSWERS_COLOR_TABLE[q_scores[v]] except Exception: logger.exception('Invalid color table') color_index = 0 # label = 'Invalid color table' value = (label, color_index, get_crit_val(question, element, descriptor)) values.append(value) summary_title = '{}_{}_Summary'.format(article, question.id) summary = t2rt(data.get(summary_title) or '') sn = '{}_{}_Score'.format(article, question.id) score = data.get(sn, {}) conclusion = getattr(score, 'conclusion', '') score_value = getattr(score, 'score_value', 0) conclusion_color = CONCLUSION_COLOR_TABLE[score_value] weighted_score = getattr(score, 'final_score', 0) q_weight = getattr(score, 'weight', float(question.score_weights.get(descr_id, 0))) max_weighted_score = q_weight is_not_relevant = getattr(score, 'is_not_relevant', False) # is_not_relevant is True if all answered options are 'Not relevant' # maximum overall score is incremented if the is_not_relevant is False if not is_not_relevant: p_score = getattr(phase_overall_scores, q_klass) p_score['score'] += weighted_score p_score['max_score'] += max_weighted_score qr = AssessmentRow(question.definition, summary, conclusion, conclusion_color, score, values) answers.append(qr) # assessment summary and recommendations assess_sum = data.get('%s_assessment_summary' % article) recommend = data.get('%s_recommendations' % article) for phase in phases: # set the conclusion and color based on the score for each phase phase_scores = getattr(phase_overall_scores, phase) phase_score = phase_overall_scores.get_score_for_phase(phase) if (phase == 'consistency' and article == 'Art9' or phase_scores['max_score'] == 0): phase_scores['conclusion'] = ('-', 'Not relevant') phase_scores['color'] = 0 continue if phase == 'consistency' and phase_scores['score'] == 0: phase_scores['conclusion'] = (0, 'Not consistent') phase_scores['color'] = 3 continue phase_scores['conclusion'] = get_overall_conclusion(phase_score) phase_scores['color'] = \ CONCLUSION_COLOR_TABLE[get_range_index(phase_score)] # for national descriptors and primary articles (Art 8, 9, 10) # override the coherence score with the score from regional descriptors if self.section == 'national-descriptors' and self.is_primary_article: phase_overall_scores.coherence = self.get_coherence_data( self.country_region_code, self.descriptor, article) # the overall score and conclusion for the whole article 2018 overall_score_val, overall_score = phase_overall_scores.\ get_overall_score(article) overall_conclusion = get_overall_conclusion(overall_score) overall_conclusion_color = CONCLUSION_COLOR_TABLE.get(overall_score_val, 0) assessment = Assessment(elements, answers, assess_sum or '-', recommend or '-', phase_overall_scores, overall_score, overall_conclusion, overall_conclusion_color) return assessment