def perform_regret_analysis(results, policyOfInterest, uncertainty1, uncertainty2, resolution, outcomeNames = []): ''' perform a RAND-style regret analysis. That is, calculate regret across all runs. Regret is here understood as the regret of the policy of interest as compared to the best performing other policy. Identify the case in which the regret is maximized. Show a 2-d slice across two specified uncertainties, which contains the case where the regret is maximized. So, in this slice all the uncertainties apart from the 2 specified, are equal to their value in the case were the regret is maximized. Function requires a full factorial sampling as the experimental design to work. input: results default returnValue from modelEnsemble.runExperiments() policyOfInterest name of policy for which you want to calculate the regret uncertainty1 the uncertainty across which you want to slice uncertainty2 the uncertainty across which you want to slice resolution resolution used in generating the full factorial outcomeNames if provided, this should be a list of names of outcomes where high is bad the normalized results for these outcomes will be reverted NOTE: please provide the actual uncertainty, not their name returns: regret 1-d array that specifies the regret of policy to all other policies case ''' def getIndex(range, resolution, value): ''' helper function to transform a case to an index in the regretPlotArray ''' return ((resolution-1) * (value- range[0]))/ (range[1]-range[0]) regret, cases, uncertainties = calculate_regret(results, policyOfInterest, outcomeNames) # transform regret into a dictionary for quick lookup regretDict = {} for entry in zip(cases, regret): regretDict[entry[0]] = entry[1] #identify maximum regret case maximumRegret, case = max_regret(regret, cases) # generate the cases that should be in the slice # # by generating the cases we need for the slice here # and combining it with the dict structure, we can fill the # slice up quickly # # another alternative approach would be to filter the available cases # based on the case that maximizes the regret. Only the specified # uncertainties should be allowed to vary. This, however, would require # us to go over the entire list of cases which can potentially become # very slow # sampler = FullFactorialSampler() designs = sampler.generate_design([uncertainty1, uncertainty2], resolution)[0] designs = [design for design in designs] # get the indexes of the uncertainties # we use the max regret case and only modify the entries for # the uncertainties across which we want to slice index1 = uncertainties.index(uncertainty1.name) index2 = uncertainties.index(uncertainty2.name) # deduce the shape of the slice if len(designs) < resolution**2: resolution1 = len(set(np.asarray(designs)[:, 0])) resolution2 = len(set(np.asarray(designs)[:, 1])) shape = (resolution1, resolution2) else: shape = (resolution, resolution) regretPlot = np.zeros(shape) case = list(case) for design in designs: case[index1] = design[0] case[index2] = design[1] # map case values back to index in regretPlot i = int(round( getIndex(uncertainty1.get_values(), regretPlot.shape[0], design[0]), 0)) j = int(round( getIndex(uncertainty2.get_values(), regretPlot.shape[1], design[1]), 0)) # retrieve regret for particular case try: a = regretDict.get(tuple(case)) # print a regretPlot[i, j] = np.max(a) except KeyError as e: EMAlogging.exception('case not found') raise e return regretPlot