Beispiel #1
0
 def testWrongNbOfSignals(self):
     """
     Tests the case where the number of SRs feeded to the module doesn't match the number of SRs in the json
     """
     # One single json but too much signals
     ws = self.simpleJson([0.9], [10])
     data = PyhfData([[0.9, 0.5]], [ws])
     ulcomputer = PyhfUpperLimitComputer(data)
     ul1 = ulcomputer.ulSigma()
     # Two jsons but only one signal
     ws = [self.simpleJson([0.9], [10]), self.simpleJson([0.8], [9])]
     data = PyhfData([[0.5]], ws)
     ulcomputer = PyhfUpperLimitComputer(data)
     ul2 = ulcomputer.ulSigma(workspace_index=0)
     self.assertIsNone(ul1)
     self.assertIsNone(ul2)
Beispiel #2
0
 def testWSindex(self):
     """
     Tests how the module reacts when giving several jsons but not specifying for which the UL should be computed
     """
     ws = [self.simpleJson([0.9], [10]), self.simpleJson([0.8], [9])]
     data = PyhfData([[0.1], [0.2]], ws)
     ulcomputer = PyhfUpperLimitComputer(data)
     ul = ulcomputer.ulSigma()
     self.assertIsNone(ul)
Beispiel #3
0
 def testNoSignal(self):
     """
     Tests the case where all SRs are empty
     """
     ws = self.simpleJson([0.9], [10])
     data = PyhfData([[0]], [ws])
     ulcomputer = PyhfUpperLimitComputer(data)
     ul = ulcomputer.ulSigma()
     self.assertIsNone(ul)
Beispiel #4
0
 def testFullPyhfModule2(self):
     """
     Same as previous but with two SRs
     """
     bkg = self.simpleJson([0.8, 0.9], [10, 11])
     signals = [0.4, 0.2]
     # Make the patch by hand
     patch = [
         dict(op='add',
              path='/channels/0/samples/0',
              value=dict(name='sig',
                         data=signals,
                         modifiers=[
                             dict(name='lumi', type='lumi', data=None),
                             dict(name='mu_SIG',
                                  type='normfactor',
                                  data=None)
                         ]))
     ]
     llhdSpec = jsonpatch.apply_patch(bkg, patch)
     # Computing the upper limit with the SModelS/pyhf interface
     data = PyhfData([signals], [bkg])
     ulcomputer = PyhfUpperLimitComputer(data)
     ul = ulcomputer.ulSigma()
     # Computing the cls outside of SModelS with POI = ul, should give 0.95
     msettings = {
         'normsys': {
             'interpcode': 'code4'
         },
         'histosys': {
             'interpcode': 'code4p'
         }
     }
     workspace = pyhf.Workspace(llhdSpec)
     model = workspace.model(modifier_settings=msettings)
     bounds = model.config.suggested_bounds()
     bounds[model.config.poi_index] = [0, 100]
     args = {"return_expected": False}
     pver = float(pyhf.__version__[:3])
     if pver < 0.6:
         args["qtilde"] = True
     else:
         args["test_stat"] = "qtilde"
     result = pyhf.infer.hypotest(ul,
                                  workspace.data(model),
                                  model,
                                  par_bounds=bounds,
                                  **args)
     try:
         CLs = float(result[0])
     except IndexError:
         CLs = float(result)
     self.assertAlmostEqual(CLs, 0.05, 2)
Beispiel #5
0
 def getPyhfComputer ( self, nsig ):
     """ create the pyhf ul computer object
     :returns: pyhf upper limit computer, and combinations of signal regions
     """
     # Getting the path to the json files
     jsonFiles = [js for js in self.globalInfo.jsonFiles]
     combinations = [os.path.splitext(os.path.basename(js))[0] for js in jsonFiles]
     jsons = self.globalInfo.jsons.copy()
     datasets = [ds.getID() for ds in self._datasets]
     total = sum(nsig)
     nsig = [s/total for s in nsig] # Normalising signals to get an upper limit on the events count
     # Filtering the json files by looking at the available datasets
     for jsName in self.globalInfo.jsonFiles:
         if all([ds not in self.globalInfo.jsonFiles[jsName] for ds in datasets]):
             # No datasets found for this json combination
             jsIndex = jsonFiles.index(jsName)
             jsonFiles.pop(jsIndex)
             jsons.pop(jsIndex)
             continue
         if not all([ds in datasets for ds in self.globalInfo.jsonFiles[jsName]]):
             # Some SRs are missing for this json combination
             logger.error("Wrong json definition in globalInfo.jsonFiles for json : %s" % jsName)
     logger.debug("list of datasets: {}".format(datasets))
     logger.debug("jsonFiles after filtering: {}".format(jsonFiles))
     # Constructing the list of signals with subsignals matching each json
     nsignals = list()
     for jsName in jsonFiles:
         subSig = list()
         for srName in self.globalInfo.jsonFiles[jsName]:
             try:
                 index = datasets.index(srName)
             except ValueError:
                 logger.error("%s signal region provided in globalInfo is not in the list of datasets" % srName)
             sig = nsig[index]
             subSig.append(sig)
         nsignals.append(subSig)
     # Loading the jsonFiles, unless we already have them (because we pickled)
     from smodels.tools.pyhfInterface import PyhfData, PyhfUpperLimitComputer
     data = PyhfData(nsignals, jsons )
     if data.errorFlag: return None
     ulcomputer = PyhfUpperLimitComputer(data)
     return ulcomputer,combinations
Beispiel #6
0
 def testCorruptJson1Signal(self):
     """
     Tests how the module handles corrupted json files
     Maybe it is needed to test different types of corruptions
     """
     #Defining the channels
     modifiers = []
     modifiers.append(dict(data=None, type='lumi', name='lumi'))
     samples = [dict(name='bkg', data=[10], modifiers=modifiers)]
     channels = [dict(name='SR1', samples=samples)]
     # Defining the measurements
     config = dict(poi='mu_SIG',
                   parameters=[
                       dict(auxdata=[1],
                            bounds=[[0.915, 1.085]],
                            inits=[1],
                            sigmas=[0.017],
                            name='lumi')
                   ])
     measurements = [dict(name='BasicMeasurement', config=config)]
     # Defining the observations
     observations = [dict(name='SR1', data=[0.9])]
     # Missing channels
     ws = dict(  #channels=channels,
         measurements=measurements,
         observations=observations,
         version='1.0.0')
     data = PyhfData([[0.1]], [ws])
     ulcomputer = PyhfUpperLimitComputer(data)
     ul = ulcomputer.ulSigma()
     self.assertEqual(ulcomputer.workspaces, None)
     self.assertEqual(ul, None)
     # Missing measurements
     ws = dict(
         channels=channels,
         #measurements=measurements,
         observations=observations,
         version='1.0.0')
     data = PyhfData([[0.1]], [ws])
     ulcomputer = PyhfUpperLimitComputer(data)
     ul = ulcomputer.ulSigma()
     self.assertEqual(ulcomputer.workspaces, None)
     self.assertEqual(ul, None)
     # Missing observations
     ws = dict(
         channels=channels,
         measurements=measurements,
         #observations=observations,
         version='1.0.0')
     data = PyhfData([[0.1]], [ws])
     ulcomputer = PyhfUpperLimitComputer(data)
     ul = ulcomputer.ulSigma()
     self.assertEqual(ulcomputer.workspaces, None)
     self.assertEqual(ul, None)
     # Missing version
     ws = dict(
         channels=channels,
         measurements=measurements,
         observations=observations,
         #version='1.0.0'
     )
     data = PyhfData([[0.1]], [ws])
     ulcomputer = PyhfUpperLimitComputer(data)
     ul = ulcomputer.ulSigma()
     self.assertIsNone(ulcomputer.workspaces)
     self.assertIsNone(ul)