def get_winner_assessment_for_conformance(experiment_resource: ExperimentResource): """ Get winner assessment using experiment resource for Conformance """ was = WinnerAssessmentAnalysis() versions = [experiment_resource.spec.versionInfo.baseline] # no version assessments data ... # this is because there are no objectives in the experiment to satisfy ... # declare all versions to be feasible if experiment_resource.status.analysis.version_assessments.data is None or \ len(experiment_resource.status.analysis.version_assessments.data) == 0: feasible_versions = versions else: # there is version assessment data # filter out feasible versions feasible_versions = list(filter(lambda version: \ all(experiment_resource.status.analysis.version_assessments.data[version.name]), versions)) # extract names of feasible versions fvn = list(map(lambda version: version.name, feasible_versions)) if versions[0].name in fvn: was.data = WinnerAssessmentData(winnerFound = True, winner = versions[0].name, \ bestVersions = [versions[0].name]) was.message = Message.join_messages([Message(MessageLevel.INFO, \ "baseline satisfies all objectives")]) return was
def get_winner_assessment_for_conformance( experiment_resource: ExperimentResource): """ Get winner assessment using experiment resource for Conformance """ was = WinnerAssessmentAnalysis() versions = [experiment_resource.spec.versionInfo.baseline] feasible_versions = get_feasible_versions(experiment_resource, versions) # extract names of feasible versions fvn = list(map(lambda version: version.name, feasible_versions)) if versions[0].name in fvn: was.data = WinnerAssessmentData(winnerFound = True, winner = versions[0].name, \ bestVersions = [versions[0].name]) was.message = Message.join_messages([Message(MessageLevel.INFO, \ "baseline satisfies all objectives")]) return was
def get_winner_assessment_for_canarybg( experiment_resource: ExperimentResource): """ Get winner assessment using experiment resource for Canary or BlueGreen experiments """ was = WinnerAssessmentAnalysis() versions = [experiment_resource.spec.versionInfo.baseline] versions += experiment_resource.spec.versionInfo.candidates feasible_versions = get_feasible_versions(experiment_resource, versions) # names of feasible versions fvn = list(map(lambda version: version.name, feasible_versions)) if versions[1].name in fvn: was.data = WinnerAssessmentData(winnerFound = True, winner = versions[1].name, \ bestVersions = [versions[1].name]) was.message = Message.join_messages([Message(MessageLevel.INFO, \ "candidate satisfies all objectives")]) elif versions[0].name in fvn: was.data = WinnerAssessmentData(winnerFound = True, winner = versions[0].name, \ bestVersions = [versions[0].name]) was.message = Message.join_messages([Message(MessageLevel.INFO, \ "baseline satisfies all objectives; candidate does not")]) return was
def get_winner_assessment_for_abn(experiment_resource: ExperimentResource): """ Get winner assessment using experiment resource for ab or abn experiments """ was = WinnerAssessmentAnalysis() versions = [experiment_resource.spec.versionInfo.baseline] versions += experiment_resource.spec.versionInfo.candidates logger.info("Versions: %s", versions) feasible_versions = get_feasible_versions(experiment_resource, versions) logger.info("Feasible versions: %s", feasible_versions) # names of feasible versions fvn = list(map(lambda version: version.name, feasible_versions)) def get_inf_reward(reward: Reward): if reward.preferredDirection == PreferredDirection.HIGH: return -math.inf else: return math.inf def first_better_than_second(first: float, second: float, \ preferred_direction: PreferredDirection): """ Return True if first is better than second, else return False """ if preferred_direction is None: err = "Metrics cannot be compared without preferred direction" logger.error(err) return False, err if preferred_direction is PreferredDirection.HIGH: return (first > second), None return (first < second), None aggregated_metric_data = experiment_resource.status.analysis.aggregated_metrics.data if experiment_resource.spec.criteria.rewards is not None: reward_metric = experiment_resource.spec.criteria.rewards[0].metric if reward_metric in aggregated_metric_data: reward_metric_data = aggregated_metric_data[reward_metric].data (top_reward, best_versions) = (get_inf_reward(\ experiment_resource.spec.criteria.rewards[0]), []) messages = [] if not fvn: messages.append( Message(MessageLevel.INFO, "no version satisfies all objectives")) for fver in fvn: # for each feasible version if fver in reward_metric_data: if reward_metric_data[fver].value is not None: if reward_metric_data[fver].value == top_reward: best_versions.append(fver) else: # this reward not equal to top reward is_better, err = first_better_than_second(\ float(reward_metric_data[fver].value), float(top_reward), \ experiment_resource.spec.criteria.rewards[0].preferredDirection) if err is None: if is_better: (top_reward, best_versions) = \ (reward_metric_data[fver].value, [fver]) else: # there is an error in comparison was.message = Message.join_messages(Message(MessageLevel.ERROR, \ str(err))) return was else: # found a feasible version without reward value messages.append(Message(MessageLevel.WARNING, \ f"reward value for feasible version {fver} is not available")) else: # found a feasible version without reward value messages.append(Message(MessageLevel.WARNING, \ f"reward value for feasible version {fver} is not available")) was.data.bestVersions = best_versions if len(best_versions) == 1: was.data.winnerFound = True was.data.winner = best_versions[0] messages.append( Message(MessageLevel.INFO, "found unique winner")) elif len(best_versions) > 1: messages.append(Message(MessageLevel.INFO, \ "no unique winner; two or more feasible versions with same reward value")) was.message = Message.join_messages(messages) else: # reward metric values are not available was.message = Message.join_messages([Message(MessageLevel.WARNING, \ "reward metric values are not available")]) else: # ab or abn experiment without reward metric was.message = Message.join_messages([Message(MessageLevel.WARNING, \ "No reward metric in experiment. Winner assessment cannot be computed for ab or abn experiments without reward metric.")]) return was
def test_experiment_abn_response_objects(): AggregatedMetricsAnalysis(**abn_am_response) VersionAssessmentsAnalysis(**abn_va_response) WinnerAssessmentAnalysis(**abn_wa_response) WeightsAnalysis(**abn_w_response)