コード例 #1
0
def indFairnessStats(train, learner):
    print("Computing UBIF")
    ubif = individualFairness(train,
                              learner,
                              flipProportion=0.2,
                              passProtected=True)
    return ubif
コード例 #2
0
def statistics(train, test, protectedIndex, protectedValue, learner):
    h = learner(train, protectedIndex, protectedValue)
    print("Computing error")
    error = labelError(test, h)
    print("Computing bias")
    bias = signedStatisticalParity(test, protectedIndex, protectedValue, h)
    print("Computing UBIF")
    ubif = individualFairness(train, learner, 0.2, passProtected=True)
    return error, bias, ubif
コード例 #3
0
ファイル: experiment-RR.py プロジェクト: j2kun/fkl-SDM16
def statistics(train, test, protectedIndex, protectedValue, learner):
   h = learner(train, protectedIndex, protectedValue)
   print("Computing error")
   error = labelError(test, h)
   print("Computing bias")
   bias = signedStatisticalParity(test, protectedIndex, protectedValue, h)
   print("Computing UBIF")
   ubif = individualFairness(train, learner, flipProportion=0.2, passProtected=True)
   return error, bias, ubif
コード例 #4
0
ファイル: baseline.py プロジェクト: j2kun/archival-papers
def statistics(trainingData, testData, learner, protectedIndex, protectedValue):
   h = learner(trainingData)

   trainingError = labelError(trainingData, h)
   testError = labelError(testData, h)
   sp = statisticalParity(testData, protectedIndex, protectedValue, h)
   UBIF = individualFairness(trainingData, learner, FLIP_PROPORTION)

   return trainingError, testError, sp, UBIF
コード例 #5
0
ファイル: baseline.py プロジェクト: j2kun/archival-papers
def runBaseline(trainingData, testData, learner, protectedIndex, protectedValue):
   h = learner(trainingData)

   print("Training error: %f" % labelError(trainingData, h))
   print("Test error: %f" % labelError(testData, h))
   print("SP of the hypothesis: %f" % statisticalParity(testData, protectedIndex, protectedValue, h))

   UBIF = individualFairness(trainingData, learner, FLIP_PROPORTION)
   print("UBIF of the hypothesis on training: %f" % UBIF)
コード例 #6
0
def statistics(massager, trainingData, testData, protectedIndex, protectedValue,
               learner, flipProportion=0.2):
   massagedData = massager(trainingData, protectedIndex, protectedValue)
   h = learner(massagedData)

   error = labelError(testData, h)
   bias = signedStatisticalParity(testData, protectedIndex, protectedValue, h)
   ubif = individualFairness(trainingData, learner, flipProportion)

   return error, bias, ubif
コード例 #7
0
def statistics(trainingData, testData, learner, protectedIndex,
               protectedValue):
    h = learner(trainingData)

    trainingError = labelError(trainingData, h)
    testError = labelError(testData, h)
    sp = statisticalParity(testData, protectedIndex, protectedValue, h)
    UBIF = individualFairness(trainingData, learner, FLIP_PROPORTION)

    return trainingError, testError, sp, UBIF
コード例 #8
0
def runBaseline(trainingData, testData, learner, protectedIndex,
                protectedValue):
    h = learner(trainingData)

    print("Training error: %f" % labelError(trainingData, h))
    print("Test error: %f" % labelError(testData, h))
    print("SP of the hypothesis: %f" %
          statisticalParity(testData, protectedIndex, protectedValue, h))

    UBIF = individualFairness(trainingData, learner, FLIP_PROPORTION)
    print("UBIF of the hypothesis on training: %f" % UBIF)
コード例 #9
0
ファイル: experiment-FWL.py プロジェクト: afcarl/fkl-SDM16
def statistics(train, test, protectedIndex, protectedValue, numRounds=20):
   weight = 0.5
   flipProportion = 0.2

   error = makeErrorFunction(protectedIndex, protectedValue, weight)
   weakLearner = lambda draw: buildDecisionStump(draw, errorFunction=error)

   h = boosting.boost(train, weakLearner=weakLearner)

   bias = ef.signedStatisticalParity(test, protectedIndex, protectedValue, h)
   error = ef.labelError(test, h)
   ubif = ef.individualFairness(train, boosting.boost, flipProportion)

   return error, bias, ubif
コード例 #10
0
def statistics(train, test, protectedIndex, protectedValue, numRounds=20):
   weight = 0.5
   flipProportion = 0.2

   error = makeErrorFunction(protectedIndex, protectedValue, weight)
   weakLearner = lambda draw: buildDecisionStump(draw, errorFunction=error)

   h = boosting.boost(train, weakLearner = weakLearner)

   bias = ef.signedStatisticalParity(test, protectedIndex, protectedValue, h)
   error = ef.labelError(test, h)
   ubif = ef.individualFairness(train, boosting.boost, flipProportion)

   return error, bias, ubif
コード例 #11
0
ファイル: experiment-RR.py プロジェクト: j2kun/fkl-SDM16
def indFairnessStats(train, learner):
   print("Computing UBIF")
   ubif = individualFairness(train, learner, flipProportion=0.2, passProtected=True)
   return ubif
コード例 #12
0
def indFairnessStats(trainingData, learner):
    UBIF = individualFairness(trainingData, learner, FLIP_PROPORTION)
    return UBIF
コード例 #13
0
ファイル: baseline.py プロジェクト: j2kun/archival-papers
def indFairnessStats(trainingData, learner):
   UBIF = individualFairness(trainingData, learner, FLIP_PROPORTION)
   return UBIF