コード例 #1
0
def main():
    # In this example, we have three types of samples: class 0, 1, or 2.  That
    # is, each of our sample vectors falls into one of three classes.  To keep
    # this example very simple, each sample vector is zero everywhere except at
    # one place.  The non-zero dimension of each vector determines the class of
    # the vector.  So for example, the first element of samples has a class of 1
    # because samples[0][1] is the only non-zero element of samples[0].
    samples = [[0, 2, 0], [1, 0, 0], [0, 4, 0], [0, 0, 3]]
    # Since we want to use a machine learning method to learn a 3-class
    # classifier we need to record the labels of our samples.  Here samples[i]
    # has a class label of labels[i].
    labels = [1, 0, 1, 2]

    # Now that we have some training data we can tell the structural SVM to
    # learn the parameters of our 3-class classifier model.  The details of this
    # will be explained later.  For now, just note that it finds the weights
    # (i.e. a vector of real valued parameters) such that predict_label(weights,
    # sample) always returns the correct label for a sample vector.
    problem = ThreeClassClassifierProblem(samples, labels)
    weights = dlib.solve_structural_svm_problem(problem)

    # Print the weights and then evaluate predict_label() on each of our
    # training samples. Note that the correct label is predicted for each
    # sample.
    print(weights)
    for k, s in enumerate(samples):
        print("Predicted label for sample[{0}]: {1}".format(
            k, predict_label(weights, s)))
コード例 #2
0
ファイル: svm_struct.py プロジェクト: Abai/dlib
def main():
    # In this example, we have three types of samples: class 0, 1, or 2.  That
    # is, each of our sample vectors falls into one of three classes.  To keep
    # this example very simple, each sample vector is zero everywhere except at
    # one place.  The non-zero dimension of each vector determines the class of
    # the vector.  So for example, the first element of samples has a class of 1
    # because samples[0][1] is the only non-zero element of samples[0].
    samples = [[0, 2, 0], [1, 0, 0], [0, 4, 0], [0, 0, 3]]
    # Since we want to use a machine learning method to learn a 3-class
    # classifier we need to record the labels of our samples.  Here samples[i]
    # has a class label of labels[i].
    labels = [1, 0, 1, 2]

    # Now that we have some training data we can tell the structural SVM to
    # learn the parameters of our 3-class classifier model.  The details of this
    # will be explained later.  For now, just note that it finds the weights
    # (i.e. a vector of real valued parameters) such that predict_label(weights,
    # sample) always returns the correct label for a sample vector.
    problem = ThreeClassClassifierProblem(samples, labels)
    weights = dlib.solve_structural_svm_problem(problem)

    # Print the weights and then evaluate predict_label() on each of our
    # training samples. Note that the correct label is predicted for each
    # sample.
    print(weights)
    for k, s in enumerate(samples):
        print("Predicted label for sample[{0}]: {1}".format(
            k, predict_label(weights, s)))
コード例 #3
0
ファイル: svm_struct.py プロジェクト: zhouqing1990/dlib
def main():
    # In this example, we have three types of samples: class 0, 1, or 2.  That is, each of
    # our sample vectors falls into one of three classes.  To keep this example very
    # simple, each sample vector is zero everywhere except at one place.  The non-zero
    # dimension of each vector determines the class of the vector.  So for example, the
    # first element of samples has a class of 1 because samples[0][1] is the only non-zero
    # element of samples[0].
    samples = [[0, 2, 0], [1, 0, 0], [0, 4, 0], [0, 0, 3]]
    # Since we want to use a machine learning method to learn a 3-class classifier we need
    # to record the labels of our samples.  Here samples[i] has a class label of labels[i].
    labels = [1, 0, 1, 2]

    # Now that we have some training data we can tell the structural SVM to learn the
    # parameters of our 3-class classifier model.  The details of this will be explained
    # later.  For now, just note that it finds the weights (i.e. a vector of real valued
    # parameters) such that predict_label(weights, sample) always returns the correct label
    # for a sample vector.
    problem = three_class_classifier_problem(samples, labels)
    weights = dlib.solve_structural_svm_problem(problem)

    # Print the weights and then evaluate predict_label() on each of our training samples.
    # Note that the correct label is predicted for each sample.
    print weights
    for i in range(len(samples)):
        print "predicted label for sample[{0}]: {1}".format(
            i, predict_label(weights, samples[i]))
コード例 #4
0
def main():
    # In this example, we have three types of samples: class 0, 1, or 2.  That is, each of
    # our sample vectors falls into one of three classes.  To keep this example very
    # simple, each sample vector is zero everywhere except at one place.  The non-zero
    # dimension of each vector determines the class of the vector.  So for example, the
    # first element of samples has a class of 1 because samples[0][1] is the only non-zero
    # element of samples[0].   
    samples = [[0,2,0], [1,0,0], [0,4,0], [0,0,3]];
    # Since we want to use a machine learning method to learn a 3-class classifier we need
    # to record the labels of our samples.  Here samples[i] has a class label of labels[i].
    labels =  [1,0,1,2]

    # Now that we have some training data we can tell the structural SVM to learn the
    # parameters of our 3-class classifier model.  The details of this will be explained
    # later.  For now, just note that it finds the weights (i.e. a vector of real valued
    # parameters) such that predict_label(weights, sample) always returns the correct label
    # for a sample vector. 
    problem = three_class_classifier_problem(samples, labels)
    weights = dlib.solve_structural_svm_problem(problem)

    # Print the weights and then evaluate predict_label() on each of our training samples.
    # Note that the correct label is predicted for each sample.
    print weights
    for i in range(len(samples)):
        print "predicted label for sample[{0}]: {1}".format(i, predict_label(weights, samples[i]))
コード例 #5
0
ファイル: task3.py プロジェクト: RussellXie7/COGS-185
def main():

    # for simple testing
    # I recommend try training_sample['feature'][:30] and testing_sample['feature'][:30]
    # and training_sample['labels'][:30] and testing_sample['labels'][:30]
    # should give a high training accuracy and a low testing accuracy

    # need to read 4001 + 1001 = 5002
    dataset1 = read_OCR('OCRdataset/letter.data', 5002, 128)

    (training_sample, testing_sample) = recompose_data(dataset1)

    print "total training samples: ", len(training_sample['words'])

    print "total testing samples: ", len(testing_sample['words'])

    # problem = MultiClassClassifierProblem(training_sample['feature'][:30],training_sample['labels'][:30])
    problem = MultiClassClassifierProblem(training_sample['feature'],
                                          training_sample['labels'])
    weights = dlib.solve_structural_svm_problem(problem)

    # get training accuracy
    predictions = []

    # for samp in training_sample['feature'][:30]:
    for samp in training_sample['feature']:
        prediction = [0] * window_size

        Nither = 4
        max1 = 0

        for k in range(Nither):
            for iL in range(window_size):
                for i in range(27):
                    temp_label = list(prediction)
                    temp_label[iL] = i
                    psi1 = problem.make_psi(samp, temp_label)
                    score1 = dlib.dot(weights, psi1)

                    if max1 < score1:
                        max1 = score1
                        prediction[iL] = i

        predictions.append(prediction)

    # print("weights", weights)
    print predictions
    # print "training accuracy=", accuracy_score(predictions, training_sample['labels'][:30])
    print "training accuracy=", accuracy_score(predictions,
                                               training_sample['labels'])

    # get testing accuracy
    te_predictions = []
    # for samp in testing_sample['feature'][:30]:
    for samp in testing_sample['feature']:
        te_prediction = [0] * window_size
        Nither = 4
        max1 = 0

        for k in range(Nither):
            for iL in range(window_size):
                for i in range(27):
                    temp_label = list(te_prediction)
                    temp_label[iL] = i
                    psi1 = problem.make_psi(samp, temp_label)
                    score1 = dlib.dot(weights, psi1)

                    if max1 < score1:
                        max1 = score1
                        te_prediction[iL] = i
        te_predictions.append(te_prediction)

    # print te_labels
    # print te_predictions
    # print "test accuracy=", accuracy_score(te_predictions, testing_sample['labels'][:30])
    print "test accuracy=", accuracy_score(te_predictions,
                                           testing_sample['labels'])