示例#1
0
def finalTest(individual):
    func = toolbox.lambdify(expr=individual)
    correct = 0.0

    trainingResult = [ kernelCalc(func,t) for t in instancesTraining ]
    d0 = [ r for (r, l) in zip(trainingResult, labelsTraining) if l == 0 ]
    d1 = [ r for (r, l) in zip(trainingResult, labelsTraining) if l == 1 ]
    from findOptimum import linearSearch, bruteForce
    v = linearSearch(d0, d1)
    print "Training: ", v
    value = bruteForce(d0, d1)
    print "Optimum Value = ", value

    #TODO: if the result is the other way around, I should check it here also
    result = []
    test = [ kernelCalc(func,t) for t in instancesTest ]
    for t in test:
        if t < value:
            result.append(0)
        else:
            result.append(1)

    correct = sum( not (a ^ b) for (a, b) in zip(result, labelsTest) )
    total = len(instancesTest)
    
    print "Accuracy = ", correct / total
    print "Accuracy (scikit) = ", metrics.accuracy_score(labelsTest, result)
    print "F1-score (micro)    = ", metrics.f1_score(labelsTest, result, average='micro')
    print "F1-score (macro)    = ", metrics.f1_score(labelsTest, result, average='macro')
    print "F1-score (weighted) = ", metrics.f1_score(labelsTest, result, average='weighted')
    print metrics.classification_report(labelsTest, result)
    fitness = correct/total

    return fitness, #len(individual)
示例#2
0
def evaluate(individual, metric_):

    #TODO: check how can I separate the two datasets...

    func = toolbox.lambdify(expr=individual)

    result = [ kernelCalc(func,t) for t in instancesTraining ]
    #resultLabel = [ (r, l) in zip(result, labelsTraining) ]
    #print "result=", result
    d0 = [ r for (r, l) in zip(result, labelsTraining) if l == 0 ]
    d1 = [ r for (r, l) in zip(result, labelsTraining) if l == 1 ]
    
    #print "D0 = ", len(d0)
    #print "D1 = ", len(d1)
    #print individual
    from findOptimum import linearSearch
    fitness = linearSearch(d0, d1)
    fitness -= (len(individual)**2)/100.0
    #print "fit=", fitness

    #if metric_ == "f1":
    #    fitness = metrics.f1_score(labelsTraining, result)

    #elif metric_ == "acc":
    #    total = len(instancesTraining)
    #    fitness = correct / total

    return fitness, #len(individual)