Пример #1
0
def test_default_small():
    features, labels = load()
    selected = np.concatenate( [np.where(labels < 2)[0], np.where(labels == 2)[0][:6]] )
    features = features[selected]
    labels = labels[selected]
    learner = defaultclassifier('fast')
    # For version 0.3.8, the line below led to an error
    milk.nfoldcrossvalidation(features, labels, classifier=learner)
Пример #2
0
def test_default_small():
    features, labels = load()
    selected = np.concatenate(
        [np.where(labels < 2)[0],
         np.where(labels == 2)[0][:6]])
    features = features[selected]
    labels = labels[selected]
    learner = defaultclassifier('fast')
    # For version 0.3.8, the line below led to an error
    milk.nfoldcrossvalidation(features, labels, classifier=learner)
Пример #3
0
def trainMe(directory):
    classes = []
    labels = []
    features = []
    print('[+] Reading files')
    gestures = check_output(['ls', directory]).split()
    print('[+] Extracting features')
    for gesture in gestures:
        classes.append(gesture)
        gesture_dir = directory + '/' + gesture
        files = check_output(['ls', gesture_dir]).split()
        for filename in files:
            file_dir = gesture_dir + '/' + filename
            labels.append(len(classes))
            image = cv.imread(file_dir)
            image = resizeImage(image, width=500)
            features.append(getFVector(image))
    features = np.array(features)
    labels = np.array(labels)

    print('[+] Training')
    classifier = milk.defaultclassifier()
    model = classifier.train(features, labels)

    print('[+] Cross validation')
    confusion_matrix, names = milk.nfoldcrossvalidation(features, labels, learner=classifier)
    print('[+] Accuracy %.2f' % (float(confusion_matrix.trace())/float(confusion_matrix.sum())))

    return model, classes
#Apply KPCA for feature reduction
# will select best 440 features out of one image
kpca = KernelPCA(n_components=440, kernel='rbf')
Features = kpca.fit_transform(Features)

labels = [1] * len(positives) + [0] * len(negatives)  #Labels

#creating SVM Classifier
learner = milk.defaultclassifier()
#model = learner.train(Features, labels)

start = timer()
cm, names, preds = milk.nfoldcrossvalidation(Features,
                                             labels,
                                             nfolds=10,
                                             classifier=learner,
                                             return_predictions=True)
end = timer()
print(end - start)  #Time taken to perform validation

TP = cm[0][0]  # class Spliced, predicted as Spliced
TN = cm[1][1]  #class Authentic, predicted as Authentic
FN = cm[0][1]  #class Spliced, predicted as Authentic
FP = cm[1][0]  #class Authentic, predicted as Spliced

print("Accuracy=", ((TN + TP) / float(TN + TP + FP + FN)) * 100.00)
print("Precision=", (TP / float(TP + FP)) * 100.00)
print("Recall=", (TP / float(TP + FN)) * 100.00)
"""
#Xgboost with KFold-cross Validation
import pickle
import sys

import milk
import milk.supervised
import milk.supervised.adaboost
import milk.supervised.multi

assert(sys.argv[1])
features = pickle.load(open(sys.argv[1], 'r'))

#learner = milk.supervised.tree_learner()
#learner = milk.supervised.adaboost.boost_learner(weak)
#learner = milk.supervised.multi.one_against_one(learner)
learner = milk.defaultlearner(mode='really-slow')
model = learner.train(*features)


pickle.dump(model, open('trainer.pik', 'w'))
cmat,names, preds = milk.nfoldcrossvalidation(*features,
                                              classifier=learner,
                                              return_predictions=1)

print cmat
print names
print preds
Пример #6
0
			no_y_total = 0.0
			hh = 0.0
			aa = 0.0
			learner_mode = 0
			for feature_ii in range(0,slope_mode_cases[slope_mode][1]):
				len_train = len(features[feature_ii])
				if len_train < 5:
					continue
				if learner_mode == 0:
					weak = milk.supervised.tree.stump_learner()
					learner = milk.supervised.adaboost.boost_learner(weak)
					learner = milk.supervised.multi.one_against_one(learner)
				else:
					learner = milk.defaultclassifier()
					learner = milk.supervised.defaultclassifier()
				cmat,names,predictions = milk.nfoldcrossvalidation(features[feature_ii][0:len_train/2],labels[feature_ii][0:len_train/2],classifier=learner,return_predictions=True)
				colors = "rgby"
				codes = "xo"
				T0 = []
				T1 = []
				L0 = []
				for kk in range(0,len(features[feature_ii])):
					kk0,kk1 = features[feature_ii][kk]
					L0.append(labels[feature_ii][kk])
					T0.append(kk0)
					T1.append(kk1)
				for y,x,r,p in zip(T0,T1,labels[feature_ii],predictions):
					if ( r + p < 2 or ( r == 1 and p == 1)) or \
					   ( r + p > 4 or ( r == 2 and p == 2)):
						code = codes[1]
					else:
Пример #7
0
 def __call__(self, i):
     return milk.nfoldcrossvalidation(value(self.features), value(self.labels), folds=[i], **value(self.kwargs))
Пример #8
0
 def __call__(self, i):
     return milk.nfoldcrossvalidation(
                 self.features,
                 self.labels,
                 folds=[i],
                 **self.kwargs)
Пример #9
0
# https://pypi.python.org/pypi/milk/

import numpy as np
import milk

# prepare data
# 2d array of features: 100 examples of 10 features each
features = np.random.rand(100, 10)
labels = np.zeros(100)
features[50:] += .5
labels[50:] = 1

confusion_matrix, names = milk.nfoldcrossvalidation(features, labels)
print 'Accuracy:', confusion_matrix.trace() / float(confusion_matrix.sum())

# train
learner = milk.defaultclassifier()
model = learner.train(features, labels)

# Now you can use the model on new examples:
example = np.random.rand(10)
print model.apply(example)

example2 = np.random.rand(10)
example2 += .5
print model.apply(example2)