Exemplo n.º 1
0
def generalStatistics(trainingData, testData, protectedIndex, protectedValue):
    print('Size of training data:', len(trainingData))
    print('Size of testing data:', len(testData))

    print("SP of training data: %f" %
          statisticalParity(trainingData, protectedIndex, protectedValue))
    print("SP of test data: %f" %
          statisticalParity(testData, protectedIndex, protectedValue))
Exemplo n.º 2
0
def statistics(trainingData, testData, learner, protectedIndex, protectedValue):
   h = learner(trainingData)

   trainingError = labelError(trainingData, h)
   testError = labelError(testData, h)
   sp = statisticalParity(testData, protectedIndex, protectedValue, h)
   UBIF = individualFairness(trainingData, learner, FLIP_PROPORTION)

   return trainingError, testError, sp, UBIF
Exemplo n.º 3
0
def runBaseline(trainingData, testData, learner, protectedIndex, protectedValue):
   h = learner(trainingData)

   print("Training error: %f" % labelError(trainingData, h))
   print("Test error: %f" % labelError(testData, h))
   print("SP of the hypothesis: %f" % statisticalParity(testData, protectedIndex, protectedValue, h))

   UBIF = individualFairness(trainingData, learner, FLIP_PROPORTION)
   print("UBIF of the hypothesis on training: %f" % UBIF)
Exemplo n.º 4
0
def statistics(trainingData, testData, learner, protectedIndex,
               protectedValue):
    h = learner(trainingData)

    trainingError = labelError(trainingData, h)
    testError = labelError(testData, h)
    sp = statisticalParity(testData, protectedIndex, protectedValue, h)
    UBIF = individualFairness(trainingData, learner, FLIP_PROPORTION)

    return trainingError, testError, sp, UBIF
Exemplo n.º 5
0
def makeErrorFunction(protectedIndex, protectedValue, spWeight):
   '''
   Run boosting on a decision stump finder that uses an error function which
   is a linear combination of statistical imparity and label error

       w * statisticalParity + (1-w) * labelError
   '''
   sp = lambda data, h: ef.statisticalParity(data, protectedIndex, protectedValue, h=h)
   le = ef.minLabelErrorOfHypothesisAndNegation
   return ef.makeLinearCombination(sp, le, spWeight)
Exemplo n.º 6
0
def runBaseline(trainingData, testData, learner, protectedIndex,
                protectedValue):
    h = learner(trainingData)

    print("Training error: %f" % labelError(trainingData, h))
    print("Test error: %f" % labelError(testData, h))
    print("SP of the hypothesis: %f" %
          statisticalParity(testData, protectedIndex, protectedValue, h))

    UBIF = individualFairness(trainingData, learner, FLIP_PROPORTION)
    print("UBIF of the hypothesis on training: %f" % UBIF)
def makeErrorFunction(protectedIndex, protectedValue, spWeight):
   sp = lambda data, h: ef.statisticalParity(data, protectedIndex, protectedValue, h=h)
   le = ef.minLabelErrorOfHypothesisAndNegation
   return ef.makeLinearCombination(sp, le, spWeight)
Exemplo n.º 8
0
def makeErrorFunction(protectedIndex, protectedValue, spWeight):
    sp = lambda data, h: ef.statisticalParity(
        data, protectedIndex, protectedValue, h=h)
    le = ef.minLabelErrorOfHypothesisAndNegation
    return ef.makeLinearCombination(sp, le, spWeight)
Exemplo n.º 9
0
def generalStatistics(trainingData, testData, protectedIndex, protectedValue):
   print('Size of training data:', len(trainingData))
   print('Size of testing data:', len(testData))

   print("SP of training data: %f" % statisticalParity(trainingData, protectedIndex, protectedValue))
   print("SP of test data: %f" % statisticalParity(testData, protectedIndex, protectedValue))