예제 #1
0
def test_error(test_activites, hmms):
    total = 0.
    incorrect = 0.

    y_test = []
    y_pred = []
    activity_error = [0]*(len(hmms)+1);
    activity_count = [0]*(len(hmms)+1);
    for a in test_activites:
        for x in test_activites[a]:
            total += 1
            activity_count[a] += 1;
            y_test.append(a)
            pred = classify_activity(x, hmms)
            y_pred.append(pred)
            #print pred
            if a != pred:
                incorrect += 1
                activity_error[a] += 1;

    for a in test_activites:
        print "Activity " + str(a) + ": " + str(float(activity_error[a])/activity_count[a]);

    utils.show_confusion_matrix(y_test, y_pred)
    
    return incorrect/total
예제 #2
0
nbAccuracy = nbClf.score(X_test, Y_test)

# Predictions
treePrediction = treeClf.predict(X_test)
neighborsPrediction = neighborsClf.predict(X_test)
logPrediction = logClf.predict(X_test)
nbPrediction = nbClf.predict(X_test)

# Confusion matrices
treeCm = metrics.confusion_matrix(treePrediction, Y_test)
neighborsCm = metrics.confusion_matrix(neighborsPrediction, Y_test)
logCm = metrics.confusion_matrix(logPrediction, Y_test)
nbCm = metrics.confusion_matrix(nbPrediction, Y_test)

# Matplotlib Images
show_confusion_matrix('Tree', treeCm, labels)
show_confusion_matrix('K-Neighbors', neighborsCm, labels)
show_confusion_matrix('Logistic Regression', logCm, labels)
show_confusion_matrix('Naive Bayes', nbCm, labels)

# Terminal Outputs ...
# Accuracies
print('Tree accuracy: ', treeAccuracy)
print('Neighbors accuracy: ', neighborsAccuracy)
print('Logistic regression accuracy: ', logAccuracy)
print('Naive bayes accuracy: ', nbAccuracy)

# Predicitons
print('Tree predicition: ', treePrediction)
print('Neighbor prediction: ', neighborsPrediction)
print('Logistic regression prediction: ', logPrediction)
예제 #3
0
    if args.load:
        net = torch.load(args.load)

    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=5e-4)
    criterion = nn.CrossEntropyLoss()

    model = Model(net, optimizer, criterion)

    if args.trainable:
        train_history = model.train(train_loader,
                                    epochs=args.epochs,
                                    val_loader=test_loader)
        show_history(train_history,
                     f'history_{args.net}_{args.pretrained}.jpg')

        with open(f'score_{args.net}_{args.pretrained}.txt',
                  'w') as score_file:
            print(train_history['accuracy'], file=score_file)
            print(train_history['val_accuracy'], file=score_file)

    if args.save:
        model.save(args.save)

    test_history = model.test(test_loader)
    show_confusion_matrix(
        test_history['truth'], test_history['predict'],
        f'confusion_matrix_{args.net}_{args.pretrained}.jpg')
예제 #4
0
def run_method(clf, X, y):
    X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.4, random_state=0)

    clf.fit(X_train, y_train)

    return clf.predict(X_test), y_test, (1 - clf.score(X_train, y_train)), (1 - clf.score(X_test, y_test))


if __name__ == "__main__":

    methods = {"random_forest": RandomForestClassifier, "svm": svm.SVC}

    sources = {"time": data.load_time, "fourier": data.load_fourier, "raw": data.load_raw}

    if len(sys.argv) > 1:
        method = sys.argv[1]

    source = "time"
    if len(sys.argv) > 2:
        source = sys.argv[2]

    X, y = sources[source]()

    y_pred, y_test, train, test = run_method(methods[method](), X, y)

    utils.show_confusion_matrix(y_test, y_pred)

    print "training error:  %f" % (train)
    print "testing error:  %f" % (test)
예제 #5
0
        1 - clf.score(X_test, y_test))


if __name__ == "__main__":

    methods = {
        "random_forest": RandomForestClassifier,
        "svm": svm.SVC,
    }

    sources = {
        'time': data.load_time,
        'fourier': data.load_fourier,
        'raw': data.load_raw
    }

    if len(sys.argv) > 1:
        method = sys.argv[1]

    source = "time"
    if len(sys.argv) > 2:
        source = sys.argv[2]

    X, y = sources[source]()

    y_pred, y_test, train, test = run_method(methods[method](), X, y)

    utils.show_confusion_matrix(y_test, y_pred)

    print "training error:  %f" % (train)
    print "testing error:  %f" % (test)