def _make_application_mode_section( best_application_mode: Optional[job_pb2.ModePercentage], project: project_pb2.Project, user_id: str, scoring_project: scoring.ScoringProject) -> Optional[dict[str, str]]: if not best_application_mode or best_application_mode.mode == job_pb2.OTHER_CHANNELS: return None application_mode_advice = '' if best_application_mode.mode == job_pb2.SPONTANEOUS_APPLICATION: application_mode_advice = 'spontaneous-application' elif best_application_mode.mode == job_pb2.PERSONAL_OR_PROFESSIONAL_CONTACTS: application_mode_advice = next(( advice.advice_id for advice in project.advices if advice.advice_id.startswith('network')), '') application_mode_link = '' if application_mode_advice: application_mode_link = campaign.get_deep_link_advice( user_id, project, application_mode_advice) return { 'link': application_mode_link, 'title': scoring_project.translate_static_string( _APPLICATION_MODES_SHORT[best_application_mode.mode]), 'name': scoring_project.translate_static_string( scoring.APPLICATION_MODES[best_application_mode.mode]), 'percent': str(round(best_application_mode.percentage)), }
def _make_employment_type_section( employment_types: Sequence[job_pb2.EmploymentTypePercentage], scoring_project: scoring.ScoringProject) \ -> Optional[dict[str, Any]]: if not employment_types: return None best_employment_type = employment_types[0] employment_types = employment_types[1:] if not best_employment_type.employment_type: return None if not employment_types: best_employment_type_ratio = 0 else: best_employment_type_ratio = round( best_employment_type.percentage / employment_types[-1].percentage) best_employment_name = scoring_project.translate_static_string( jobs.EMPLOYMENT_TYPES[best_employment_type.employment_type]) best_employment_title = scoring_project.translate_static_string( _EMPLOYMENT_TYPES_TITLE[best_employment_type.employment_type]) return { 'name': best_employment_name, 'percent': str(round(best_employment_type.percentage)), 'ratio': best_employment_type_ratio, 'title': best_employment_title, }
def _compute_diagnostic_text( scoring_project: scoring.ScoringProject, unused_overall_score: float) \ -> Tuple[str, List[int]]: """Create the left-side text of the diagnostic for a given project. Returns: A tuple containing the text, and a list of the orders of missing sentences (if text is empty). """ sentences = [] missing_sentences_orders = [] templates_per_order = itertools.groupby( _SENTENCE_TEMPLATES.get_collection(scoring_project.database), key=lambda template: template.order) for order, templates_iterator in templates_per_order: templates = list(templates_iterator) template = next( scoring.filter_using_score(templates, lambda template: template.filters, scoring_project), None) if not template: if any(template.optional for template in templates): continue # TODO(pascal): Set to warning when we have theoretical complete coverage. logging.debug('Could not find a sentence %d for user.', order) missing_sentences_orders.append(order) continue translated_template = scoring_project.translate_string( template.sentence_template) sentences.append( scoring_project.populate_template(translated_template)) return '\n\n'.join( sentences ) if not missing_sentences_orders else '', missing_sentences_orders
def _translate_tip( tip: action_pb2.ActionTemplate, scoring_project: scoring.ScoringProject) -> action_pb2.ActionTemplate: result = action_pb2.ActionTemplate() result.MergeFrom(tip) result.title = scoring_project.translate_string(tip.title, is_genderized=True) result.short_description = scoring_project.translate_string( tip.short_description, is_genderized=True) return result
def compute_actions_for_project( scoring_project: scoring.ScoringProject, ) -> Iterable[action_pb2.Action]: """Compute all actions possible for a project.""" action_templates = { action.action_template_id: action for action in _ACTION_TEMPLATES.get_collection( scoring_project.database) } if scoring_project.user.features_enabled.all_modules: scores: Mapping[str, float] = {key: 3 for key in action_templates} else: scores = scoring_project.score_and_explain_all( (key, action_template.trigger_scoring_model) for key, action_template in action_templates.items()).scores sorted_action_templates = sorted( action_templates.values(), key=lambda m: (scores.get(m.action_template_id, 0), m.action_template_id), reverse=True) deployment = os.getenv('BOB_DEPLOYMENT', 'fr') for action_template in sorted_action_templates: action_id = action_template.action_template_id if not (score := scores.get(action_id)) or score <= 0: break scoring_project.details.actions.add( action_id=action_id, title=scoring_project.translate_airtable_string( 'actionTemplates', action_id, 'title', hint=action_template.title, is_genderized=True), short_description=scoring_project.translate_airtable_string( 'actionTemplates', action_id, 'short_description', hint=action_template.short_description, is_genderized=True), tags=[ scoring_project.translate_airtable_string( 'actionTemplates', 'tags', tag) for tag in action_template.tags ], duration=action_template.duration, status=action_pb2.ACTION_UNREAD, advice_id=action_template.advice_id, resource_url=scoring_project.translate_airtable_string( 'actionTemplates', action_id, 'resource_url', hint=action_template.resource_url, context=deployment), )
def _translate_category( category: diagnostic_pb2.DiagnosticCategory, project: scoring.ScoringProject) \ -> diagnostic_pb2.DiagnosticCategory: translated = diagnostic_pb2.DiagnosticCategory() translated.CopyFrom(category) translated.ClearField('relevance_scoring_model') translated.metric_title = project.translate_string(category.metric_title) translated.ClearField('metric_details_feminine') details = project.user_profile.gender == user_pb2.FEMININE and \ category.metric_details_feminine or \ category.metric_details translated.metric_details = project.translate_string(details) return translated
def compute_sub_diagnostic_observations( scoring_project: scoring.ScoringProject, topic: diagnostic_pb2.DiagnosticTopic) \ -> Iterator[diagnostic_pb2.SubDiagnosticObservation]: """Find all relevant observations for a given sub-diagnostic topic.""" templates = scoring.filter_using_score( (template for template in _SUBTOPIC_OBSERVATION_TEMPLATES.get_collection( scoring_project.database) if template.topic == topic), lambda template: template.filters, scoring_project) for template in templates: yield diagnostic_pb2.SubDiagnosticObservation( text=scoring_project.populate_template( scoring_project.translate_string(template.sentence_template)), is_attention_needed=template.is_attention_needed)
def _compute_diagnostic_topic_score( topic: 'diagnostic_pb2.DiagnosticTopic', scorers: Iterable[diagnostic_pb2.DiagnosticSubmetricScorer], scoring_project: scoring.ScoringProject) \ -> Optional[diagnostic_pb2.SubDiagnostic]: """Create the score for a given diagnostic submetric on a given project. Args: topic: the diagnostic topic we wish to evaluate scorers: a list of scorers for the given topic, with a weight on each. scoring_project: the project we want to score Returns: the populated subdiagnostic protobuf. """ topic_score = 0. topic_weight = 0. sub_diagnostic = diagnostic_pb2.SubDiagnostic(topic=topic) for scorer in scorers: try: score = scoring_project.score(scorer.trigger_scoring_model) except scoring.NotEnoughDataException: continue # Use default weight of 1 weight = scorer.weight or 1 weighted_score = score * weight topic_score += weighted_score topic_weight += weight if not topic_weight: return None sub_diagnostic.score = round(topic_score / topic_weight * 100 / 3) return sub_diagnostic
def _get_find_what_you_like_relevance( project: scoring.ScoringProject) -> diagnostic_pb2.CategoryRelevance: if project.details.passionate_level == project_pb2.LIKEABLE_JOB: return diagnostic_pb2.NEUTRAL_RELEVANCE market_stress = project.market_stress() if project.details.passionate_level < project_pb2.LIKEABLE_JOB and \ market_stress and market_stress < 10 / 7: return diagnostic_pb2.NEUTRAL_RELEVANCE return diagnostic_pb2.RELEVANT_AND_GOOD
def _translate_tip( tip: action_pb2.ActionTemplate, scoring_project: scoring.ScoringProject) -> action_pb2.ActionTemplate: is_feminine = scoring_project.user_profile.gender == user_pb2.FEMININE title = (is_feminine and tip.title_feminine) or tip.title short_description = (is_feminine and tip.short_description_feminine ) or tip.short_description result = action_pb2.ActionTemplate() result.MergeFrom(tip) result.ClearField('title_feminine') result.ClearField('short_description_feminine') result.title = scoring_project.translate_string(title) result.short_description = scoring_project.translate_string( short_description) return result
def _assert_proper_explanations(self, explanations: Iterable[str], scoring_project: scoring.ScoringProject, msg: str) -> None: self.assertIsInstance(explanations, list, msg=msg) for explanation in explanations: self.assertIsInstance(explanation, str, msg=msg) try: resolved_explanation = scoring_project.populate_template( explanation, raise_on_missing_var=True) except ValueError: self.fail(msg=msg) self.assertNotRegex(resolved_explanation, r'^[A-Z]', msg=msg)
def _compute_diagnostic_overall( project: scoring.ScoringProject, diagnostic: diagnostic_pb2.Diagnostic, category: Optional[diagnostic_pb2.DiagnosticCategory] ) -> diagnostic_pb2.Diagnostic: all_overalls = _DIAGNOSTIC_OVERALL.get_collection(project.database) restricted_overalls: Iterable[diagnostic_pb2.DiagnosticTemplate] = [] if category and (not category.are_strategies_for_alpha_only or project.features_enabled.alpha): restricted_overalls = \ [o for o in all_overalls if o.category_id == category.category_id] if not restricted_overalls: restricted_overalls = [o for o in all_overalls if not o.category_id] overall_template = next((scoring.filter_using_score( restricted_overalls, lambda t: t.filters, project)), None) if not overall_template: # TODO(cyrille): Put a warning here once enough cases are covered with overall templates. return diagnostic diagnostic.overall_sentence = project.populate_template( project.translate_string(overall_template.sentence_template)) diagnostic.text = project.populate_template( project.translate_string(overall_template.text_template)) diagnostic.strategies_introduction = project.populate_template( project.translate_string(overall_template.strategies_introduction)) diagnostic.overall_score = overall_template.score return diagnostic
def _get_relevance( category: diagnostic_pb2.DiagnosticCategory, project: scoring.ScoringProject) \ -> Tuple['diagnostic_pb2.CategoryRelevance', Set[str]]: try: if project.check_filters(category.filters): return diagnostic_pb2.NEEDS_ATTENTION, set() missing_fields: Set[str] = set() except scoring.NotEnoughDataException as err: # We don't have enough info about this category for the project, # so we let the relevance model decide. missing_fields = err.fields return _get_relevance_from_its_model(category, project, bool(missing_fields)), missing_fields
def _compute_diagnostic_overall( project: scoring.ScoringProject, diagnostic: diagnostic_pb2.Diagnostic, main_challenge: diagnostic_pb2.DiagnosticMainChallenge ) -> diagnostic_pb2.Diagnostic: all_overalls = _DIAGNOSTIC_OVERALL.get_collection(project.database) restricted_overalls = [ o for o in all_overalls if o.category_id == main_challenge.category_id ] try: overall_template = next( (scoring.filter_using_score(restricted_overalls, lambda t: t.filters, project))) except StopIteration: logging.warning('No overall template for project: %s', main_challenge.category_id) return diagnostic diagnostic.overall_sentence = project.populate_template( project.translate_airtable_string( 'diagnosticOverall', overall_template.id, 'sentence_template', is_genderized=True, hint=overall_template.sentence_template)) diagnostic.text = project.populate_template( project.translate_airtable_string('diagnosticOverall', overall_template.id, 'text_template', is_genderized=True, hint=overall_template.text_template)) diagnostic.strategies_introduction = project.populate_template( project.translate_airtable_string( 'diagnosticOverall', overall_template.id, 'strategies_introduction', is_genderized=True, hint=overall_template.strategies_introduction)) diagnostic.overall_score = overall_template.score diagnostic.bob_explanation = main_challenge.bob_explanation all_responses = _DIAGNOSTIC_RESPONSES.get_collection(project.database) self_diagnostic_category_id = project.details.original_self_diagnostic.category_id response_id = f'{self_diagnostic_category_id}:{main_challenge.category_id}' response_text = next( (response.text for response in all_responses if response.response_id == response_id), '') diagnostic.response = project.translate_airtable_string( 'diagnosticResponses', response_id, 'text', is_genderized=True, hint=response_text) return diagnostic
def _compute_sub_diagnostic_text( scoring_project: scoring.ScoringProject, sub_diagnostic: diagnostic_pb2.SubDiagnostic) \ -> str: """Create the sentence of the diagnostic for a given project on a given topic. Returns: The text for the diagnostic submetric. """ template = next( scoring.filter_using_score( (template for template in _SUBTOPIC_SENTENCE_TEMPLATES.get_collection( scoring_project.database) if template.topic == sub_diagnostic.topic), lambda template: template.filters, scoring_project), None) if not template: # TODO(cyrille): Change to warning once we have theoretical complete coverage. logging.debug('Could not find a sentence for topic %s for user.', sub_diagnostic.topic) return '' translated_template = scoring_project.translate_string( template.sentence_template) return scoring_project.populate_template(translated_template)
def _get_relevance( main_challenge: diagnostic_pb2.DiagnosticMainChallenge, project: scoring.ScoringProject, should_be_neutral: bool) \ -> Tuple['diagnostic_pb2.MainChallengeRelevance.V', Set[str]]: try: if project.check_filters(main_challenge.filters): if should_be_neutral: return diagnostic_pb2.NEUTRAL_RELEVANCE, set() return diagnostic_pb2.NEEDS_ATTENTION, set() missing_fields: Set[str] = set() except scoring.NotEnoughDataException as err: # We don't have enough info about this main challenge for the project, # so we let the relevance model decide. missing_fields = err.fields return _get_relevance_from_its_model(main_challenge, project, bool(missing_fields), should_be_neutral), missing_fields
def _get_relevance_from_its_model( main_challenge: diagnostic_pb2.DiagnosticMainChallenge, project: scoring.ScoringProject, has_missing_fields: bool, relevant_should_be_neutral: bool) \ -> 'diagnostic_pb2.MainChallengeRelevance.V': if main_challenge.relevance_scoring_model: relevance_score = project.score(main_challenge.relevance_scoring_model) if relevance_score <= 0: return diagnostic_pb2.NOT_RELEVANT if relevance_score >= 3 and not relevant_should_be_neutral: return diagnostic_pb2.RELEVANT_AND_GOOD return diagnostic_pb2.NEUTRAL_RELEVANCE if has_missing_fields: return diagnostic_pb2.NEUTRAL_RELEVANCE return diagnostic_pb2.NEUTRAL_RELEVANCE if relevant_should_be_neutral else \ diagnostic_pb2.RELEVANT_AND_GOOD
def _get_relevance_from_its_model( category: diagnostic_pb2.DiagnosticCategory, project: scoring.ScoringProject, has_missing_fields: bool) \ -> diagnostic_pb2.CategoryRelevance: if category.relevance_scoring_model: relevance_score = project.score(category.relevance_scoring_model) if relevance_score <= 0: return diagnostic_pb2.NOT_RELEVANT if relevance_score >= 3: return diagnostic_pb2.RELEVANT_AND_GOOD return diagnostic_pb2.NEUTRAL_RELEVANCE try: return _CATEGORIES_RELEVANCE_GETTERS[category.category_id](project) except KeyError: if has_missing_fields: return diagnostic_pb2.NEUTRAL_RELEVANCE return diagnostic_pb2.RELEVANT_AND_GOOD
def translate_main_challenge( main_challenge: diagnostic_pb2.DiagnosticMainChallenge, project: scoring.ScoringProject) \ -> diagnostic_pb2.DiagnosticMainChallenge: """Translate the fields of a main challenge template according to a project's preference.""" translated = diagnostic_pb2.DiagnosticMainChallenge() translated.CopyFrom(main_challenge) translated.ClearField('relevance_scoring_model') for field in _MAIN_CHALLENGE_TRANSLATABLE_FIELDS: setattr( translated, field, project.translate_airtable_string('diagnosticMainChallenges', main_challenge.category_id, field, is_genderized=True, hint=getattr( main_challenge, field))) return translated
def _make_departements_section( user_departement_id: str, best_departements: list[str], area_type: 'geo_pb2.AreaType.V', database: mongo.NoPiiMongoDatabase, scoring_project: scoring.ScoringProject) -> Optional[dict[str, str]]: if area_type < geo_pb2.COUNTRY or not best_departements: return None best_departements_title = '<br />'.join( geo.get_departement_name(database, dep) for dep in best_departements) try: best_departements.remove(user_departement_id) is_best_departement = True except ValueError: is_best_departement = False best_departements_sentence = scoring_project.translate_static_string(' et ').join( geo.get_in_a_departement_text(database, dep) for dep in best_departements) return { 'count': str(len(best_departements)), 'isInBest': campaign.as_template_boolean(is_best_departement), 'title': best_departements_title, 'sentence': best_departements_sentence, }
def _make_strategy( project: scoring.ScoringProject, module: strategy_pb2.StrategyModule, advice_scores: dict[str, float]) -> Optional[strategy_pb2.Strategy]: if module.is_for_alpha and not project.features_enabled.alpha: return None score = project.score(module.trigger_scoring_model) if not score: return None score = min(score * 100 / 3, 100 - project.details.diagnostic.overall_score) pieces_of_advice = [] for advice in module.pieces_of_advice: user_advice_id = next((a for a in advice_scores if a.startswith(advice.advice_id)), None) if not user_advice_id: if advice.is_required: # A necessary advice is missing, we drop everything. return None continue pieces_of_advice.append(strategy_pb2.StrategyAdvice( advice_id=user_advice_id, teaser=project.populate_template(project.translate_string(advice.teaser_template)), why=project.populate_template(project.translate_string(advice.why_template)))) if _SPECIFIC_TO_JOB_ADVICE_ID in advice_scores: specific_to_job_config = project.specific_to_job_advice_config() if specific_to_job_config and module.strategy_id in specific_to_job_config.strategy_ids: pieces_of_advice.append(strategy_pb2.StrategyAdvice( advice_id=_SPECIFIC_TO_JOB_ADVICE_ID)) if not pieces_of_advice and not module.external_url_template: # Don't want to show a strategy without any advice modules. return None strategy = project.details.strategies.add( description=project.populate_template(project.translate_airtable_string( 'strategyModules', module.strategy_id, 'description_template', hint=module.description_template)), score=int(score), is_secondary=score <= 10, title=project.translate_airtable_string( 'strategyModules', module.strategy_id, 'title', hint=module.title), header=project.populate_template(project.translate_airtable_string( 'strategyModules', module.strategy_id, 'header_template', hint=module.header_template)), strategy_id=module.strategy_id, external_url=project.populate_template(module.external_url_template), infinitive_title=project.translate_airtable_string( 'strategyModules', module.strategy_id, 'infinitive_title', hint=module.infinitive_title), action_ids=module.action_ids, description_speech=project.populate_template(project.translate_airtable_string( 'strategyModules', module.strategy_id, 'description_speech', hint=module.description_speech, is_genderized=True)), ) if strategy.external_url and pieces_of_advice: logging.error( 'Strategy %s has both an external URL and some pieces of advice:\n%s', strategy.strategy_id, ', '.join(a.advice_id for a in pieces_of_advice)) # Sort found pieces of advice by descending score. pieces_of_advice.sort(key=lambda a: advice_scores[a.advice_id], reverse=True) strategy.pieces_of_advice.extend(pieces_of_advice) return strategy
def _compute_available_methods( scoring_project: scoring.ScoringProject, method_modules: Iterable[advisor_pb2.AdviceModule], scoring_timeout_seconds: float ) -> Generator[project_pb2.Advice, None, Set[str]]: scores: Dict[str, float] = {} reasons: Dict[str, List[str]] = {} missing_fields: Set[str] = set() for module in method_modules: if not module.is_ready_for_prod and not scoring_project.features_enabled.alpha: continue scoring_model = scoring.get_scoring_model(module.trigger_scoring_model) if scoring_model is None: logging.warning( 'Not able to score advice "%s", the scoring model "%s" is unknown.', module.advice_id, module.trigger_scoring_model) continue if scoring_project.user.features_enabled.all_modules: scores[module.advice_id] = 3 else: thread = threading.Thread(target=_compute_score_and_reasons, args=(scores, reasons, module, scoring_model, scoring_project, missing_fields)) thread.start() # TODO(pascal): Consider scoring different models in parallel. thread.join(timeout=scoring_timeout_seconds) if thread.is_alive(): logging.warning('Timeout while scoring advice "%s" for:\n%s', module.trigger_scoring_model, scoring_project) modules = sorted(method_modules, key=lambda m: (scores.get(m.advice_id, 0), m.advice_id), reverse=True) incompatible_modules: Set[str] = set() has_module = False for module in modules: score = scores.get(module.advice_id) if not score: # We can break as others will have 0 score as well. break if module.airtable_id in incompatible_modules and \ not scoring_project.user.features_enabled.all_modules: continue piece_of_advice = project_pb2.Advice( advice_id=module.advice_id, num_stars=score, is_for_alpha_only=not module.is_ready_for_prod) piece_of_advice.explanations.extend( scoring_project.populate_template(reason) for reason in reasons.get(module.advice_id, [])) incompatible_modules.update(module.incompatible_advice_ids) _maybe_override_advice_data(piece_of_advice, module, scoring_project) has_module = True yield piece_of_advice if not has_module and method_modules: logging.warning( 'We could not find *any* advice for a project:\nModules tried:\n"%s"\nProject:\n%s', '", "'.join(m.advice_id for m in method_modules), scoring_project) return missing_fields
def compute_available_methods( scoring_project: scoring.ScoringProject, method_modules: Iterable[advisor_pb2.AdviceModule], scoring_timeout_seconds: float = 3) \ -> Iterator[Tuple[project_pb2.Advice, frozenset[str]]]: """Advise on a user project. Args: scoring_project: the user's data. advice_modules: a list of modules, from which we want to derive the advices. scoring_timeout_seconds: how long we wait to compute each advice scoring model. Returns: an Iterator of recommendations, each with a list of fields that would help improve the process. """ ready_modules = { module.advice_id: module.trigger_scoring_model for module in method_modules if module.is_ready_for_prod or scoring_project.features_enabled.alpha } scores: Mapping[str, float] = {} reasons: Mapping[str, tuple[str, ...]] = {} missing_fields: Mapping[str, frozenset[str]] = {} if scoring_project.user.features_enabled.all_modules: scores = {key: 3 for key in ready_modules} else: scores, reasons, missing_fields = scoring_project.score_and_explain_all( ready_modules.items(), scoring_timeout_seconds=scoring_timeout_seconds) modules = sorted(method_modules, key=lambda m: (scores.get(m.advice_id, 0), m.advice_id), reverse=True) incompatible_modules: Set[str] = set() has_module = False for module in modules: score = scores.get(module.advice_id) if not score: # We can break as others will have 0 score as well. break if module.airtable_id in incompatible_modules and \ not scoring_project.user.features_enabled.all_modules: continue piece_of_advice = project_pb2.Advice( advice_id=module.advice_id, num_stars=score, is_for_alpha_only=not module.is_ready_for_prod) piece_of_advice.explanations.extend( scoring_project.populate_template(reason) for reason in reasons.get(module.advice_id, [])) incompatible_modules.update(module.incompatible_advice_ids) _maybe_override_advice_data(piece_of_advice, module, scoring_project) has_module = True yield piece_of_advice, missing_fields.get(module.advice_id, frozenset()) if not has_module and method_modules: logging.warning( 'We could not find *any* advice for a project:\nModules tried:\n"%s"\nProject:\n%s', '", "'.join(m.advice_id for m in method_modules), scoring_project)
def _get_stuck_market_relevance( project: scoring.ScoringProject) -> diagnostic_pb2.CategoryRelevance: if project.market_stress() is None: return diagnostic_pb2.NEUTRAL_RELEVANCE return diagnostic_pb2.RELEVANT_AND_GOOD
def _make_strategy( project: scoring.ScoringProject, module: strategy_pb2.StrategyModule, advice_scores: Dict[str, float]) -> Optional[strategy_pb2.Strategy]: score = project.score(module.trigger_scoring_model) if not score: return None score = min(score * 100 / 3, 100 - project.details.diagnostic.overall_score) pieces_of_advice = [] for advice in module.pieces_of_advice: user_advice_id = next( (a for a in advice_scores if a.startswith(advice.advice_id)), None) if not user_advice_id: if advice.is_required: # A necessary advice is missing, we drop everything. return None continue pieces_of_advice.append( strategy_pb2.StrategyAdvice( advice_id=user_advice_id, teaser=project.populate_template( project.translate_string(advice.teaser_template)), header=project.populate_template( project.translate_string(advice.header_template)))) if _SPECIFIC_TO_JOB_ADVICE_ID in advice_scores: specific_to_job_config = project.specific_to_job_advice_config() if specific_to_job_config and module.strategy_id in specific_to_job_config.strategy_ids: pieces_of_advice.append( strategy_pb2.StrategyAdvice( advice_id=_SPECIFIC_TO_JOB_ADVICE_ID)) if not pieces_of_advice: # Don't want to show a strategy without any advice modules. return None strategy = project.details.strategies.add( description=project.populate_template( project.translate_string(module.description_template)), score=int(score), is_secondary=score <= 10, title=project.translate_string(module.title), header=project.populate_template( project.translate_string(module.header_template)), strategy_id=module.strategy_id) # Sort found pieces of advice by descending score. pieces_of_advice.sort(key=lambda a: advice_scores[a.advice_id], reverse=True) strategy.pieces_of_advice.extend(pieces_of_advice) return strategy
def _get_enhance_methods_relevance( project: scoring.ScoringProject) -> diagnostic_pb2.CategoryRelevance: if project.get_search_length_at_creation() < 0: return diagnostic_pb2.NEUTRAL_RELEVANCE return diagnostic_pb2.RELEVANT_AND_GOOD