def meanTableFit(tables): if len(tables) == 0: raise Exception("Got 0 tables") tablesCopy = removeColumnsRowsNotInAll(tables) valueTable = tablesCopy[0].clone() chi2Table = valueTable.clone() nrows = valueTable.getNrows() ncolumns = valueTable.getNcolumns() for iRow in xrange(nrows): for iCol in xrange(ncolumns): values = [] for t in tablesCopy: count = t.getCount(iRow, iCol) if count != None: values.append(count) (m, dm, chi2, ndof) = utilities.leastSquareFitPoly0( [v.value() for v in values], [v.uncertainty() for b in values]) if m != None: valueTable.setCount(iRow, iCol, dataset.Count(m, dm)) chi2Table.setCount(iRow, iCol, dataset.Count(chi2, ndof)) else: valueTable.setCount(iRow, iCol, None) chi2Table.setCount(iRow, iCol, None) return (valueTable, chi2Table)
def meanRow(table, uncertaintyByAverage=False): # Sum if uncertaintyByAverage: row = accumulateRow( table, lambda a, b: dataset.Count(a.value() + b.value(), a.uncertainty() + b.uncertainty())) else: def f(a, b): c = a.clone() c.add(b) return c row = accumulateRow(table, f) row.setName("Mean") # Average for icol in xrange(row.getNcolumns()): count = row.getCount(icol) if count != None: N = table.getNrows() row.setCount( icol, dataset.Count(count.value() / N, count.uncertainty() / N)) return row
def meanTable(tables, uncertaintyByAverage=False): if len(tables) == 0: raise Exception("Got 0 tables") tablesCopy = removeColumnsRowsNotInAll(tables) # Calculate the sums table = tablesCopy[0] nrows = table.getNrows() ncolumns = table.getNcolumns() for t in tablesCopy[1:]: for iRow in xrange(nrows): for iCol in xrange(ncolumns): count1 = table.getCount(iRow, iCol) count2 = t.getCount(iRow, iCol) if count1 != None and count2 != None: if uncertaintyByAverage: count = dataset.Count( count1.value() + count2.value(), count1.uncertainty() + count2.uncertainty()) else: count = count1.clone() count.add(count2) table.setCount(iRow, iCol, count) else: table.setCount(iRow, iCol, None) # Do the average N = len(tablesCopy) for iRow in xrange(nrows): for iCol in xrange(ncolumns): count = table.getCount(iRow, iCol) if count != None: table.setCount( iRow, iCol, dataset.Count(count.value() / N, count.uncertainty() / N)) return table
def meanRowFit(table): valueRow = table.getRow(0).clone() valueRow.setName("Fit") chiRow = valueRow.clone() chiRow.setName("Chi2/ndof") for icol in xrange(table.getNcolumns()): values = [] for irow in xrange(table.getNrows()): count = table.getCount(irow, icol) if count != None: values.append(count) (m, dm, chi2, ndof) = utilities.leastSquareFitPoly0( [v.value() for v in values], [v.uncertainty() for v in values]) if m != None: valueRow.setCount(icol, dataset.Count(m, dm)) chiRow.setCount(icol, dataset.Count(chi2, ndof)) else: valueRow.setCount(icol, None) chiRow.setCount(icol, None) return (valueRow, chiRow)
def counterEfficiency(counterTable): """Create a new counter table with the counter efficiencies.""" result = counterTable.clone() for icol in xrange(0, counterTable.getNcolumns()): prev = None for irow in xrange(0, counterTable.getNrows()): count = counterTable.getCount(irow, icol) value = None if count != None and prev != None: try: value = dataset.Count(count.value() / prev.value(), None) except ZeroDivisionError: pass prev = count result.setCount(irow, icol, value) return result
def sumColumn(name, columns): """Create a new CounterColumn as the sum of the columns.""" table = CounterTable() for c in columns: table.appendColumn(c) table.removeNonFullRows() nrows = table.getNrows() ncols = table.getNcolumns() rows = [] for irow in xrange(nrows): count = dataset.Count(0, 0) for icol in xrange(ncols): count.add(table.getCount(irow, icol)) rows.append(count) return CounterColumn(name, table.getRowNames(), rows)
def efficiencyColumnNormalApproximation(name, column): # also approximate that p is small, so sigma = sqrt(Npassed)/sqrt(Ntotal) origRownames = column.getRowNames() rows = [] rowNames = [] prev = None for irow in xrange(0, column.getNrows()): count = column.getCount(irow) value = None if count != None and prev != None: try: eff = count.value() / prev.value() unc = count.uncertainty() / prev.value() value = dataset.Count(eff, unc) except ZeroDivisionError: pass prev = count if value != None: rows.append(value) rowNames.append(origRownames[irow]) return CounterColumn(name, rowNames, rows)
def efficiencyColumnErrorPropagation(name, column): origRownames = column.getRowNames() rows = [] rowNames = [] prev = None for irow in xrange(0, column.getNrows()): count = column.getCount(irow) value = None if count != None and prev != None: try: eff = count.value() / prev.value() relUnc = math.sqrt((count.uncertainty() / count.value())**2 + (prev.uncertainty() / prev.value())**2) value = dataset.Count(eff, eff * relUnc) except ZeroDivisionError: pass prev = count if value != None: rows.append(value) rowNames.append(origRownames[irow]) return CounterColumn(name, rowNames, rows)
def multiply(self, value, uncertainty=0): count = dataset.Count(value, uncertainty) for v in self.values: v.multiply(count)