Exemple #1
0
def main():
    print('loading model')
    model = constructModel()
    model.load_weights('weights.h5')
    print('loading images')
    imagesT, imagesPubV, imagesPriV, labelsT, labelsPubV, labelsPriV = loadImages(
    )

    for i in range(10):
        print(
            str(predict(imagesPriV[i], model)) + ' ' +
            str(np.argmax(labelsPriV[i])))
def main():
    print('loading images')
    imagesT, imagesPubV, imagesPriV, labelsT, labelsPubV, labelsPriV = loadImages(
    )

    print('done loading')
    print('constructing model')
    model = constructModel()
    print('model constructed')
    print('training model')
    history = model.fit(imagesT,
                        labelsT,
                        epochs=20,
                        validation_data=(imagesPubV, labelsPubV))
    print('model trained')
    print('saving weights')
    model.save_weights('weights.h5')
    print('saved!')

    # Generate Accuracy plot
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('Accuracy')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['Training', 'Public Validation'])
    plt.savefig('Accuracy.png')

    plt.clf()
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['Training', 'Public Validation'])
    plt.savefig('Loss.png')
import cv2
import numpy
from sklearn import svm
from sklearn.externals import joblib
from helper import loadImages,getHog,detect,detectMultiScale,mergeRectangles,cropImages
from sklearn.metrics import confusion_matrix

#load images
positiveImages=loadImages('Pos.txt')
posDescriptors=getHog(positiveImages)
negativeImages=loadImages('Neg.txt')
negDescriptors=getHog(negativeImages)


#create training data
nPosSamples=posDescriptors.shape[0]
nNegSamples=negDescriptors.shape[0]

posLables=1.0*numpy.ones( (nPosSamples), dtype='float')  # not numpy.ones( (nPosSamples,1), dtype='float')
negLables=-1.0*numpy.ones( (nNegSamples), dtype='float')
lables=numpy.concatenate((posLables,negLables))
descriptors=numpy.concatenate((posDescriptors,negDescriptors))


#train
C=0.01
svc = svm.SVC(kernel='linear',tol=0.00000000001,probability=True, C=C)
print descriptors.shape, lables.shape
trained=svc.fit(descriptors,lables)

#test
import cv2
import numpy
from sklearn import svm
from sklearn.externals import joblib
from sklearn.metrics import confusion_matrix
from helper import loadImages,getHog,detect,detectMultiScale,mergeRectangles,cropImages
			
#cropImages('/home/juned/Code/computervisiontutorial/database/negative/image_001115.jpg','/home/juned/Code/computervisiontutorial/database/negative/64x128/')
#exit()

#pedestrian images
positiveImages=loadImages('pos.txt')
posDescriptors=getHog(positiveImages)

#random patches
negativeImages=loadImages('neg.txt')
negDescriptors=getHog(negativeImages)


#prepare data
nPosSamples=posDescriptors.shape[0]
nNegSamples=negDescriptors.shape[0]
posLables=1.0*numpy.ones( (nPosSamples), dtype='float')  # not numpy.ones( (nPosSamples,1), dtype='float')
negLables=-1.0*numpy.ones( (nNegSamples), dtype='float')
lables=numpy.concatenate((posLables,negLables))
descriptors=numpy.concatenate((posDescriptors,negDescriptors))



C = 0.1  # SVM regularization parameter
from sklearn import svm

from helper import loadImages,getFeatures,train_test_split
from sklearn.metrics import confusion_matrix
import numpy
import pylab as pl
import cv2
#load possitive
posImage=loadImages('1.txt')

nPosSamples=len(posImage)

#load negagive images
negImages=[]
negImgFileList=['0.txt','2.txt','3.txt','4.txt','5.txt','6.txt','7.txt','8.txt','9.txt']
limit=20
for negfile in negImgFileList:
	negImages=negImages+loadImages(negfile,limit)
nNegSamples=len(negImages)

#prepare dataset


#positive
posFeatures=getFeatures(posImage)
posLabels=1.0* numpy.ones((nPosSamples),dtype='float')

#negatives
negFeatures=getFeatures(negImages)
negLabels=-1.0* numpy.ones((nNegSamples),dtype='float')