コード例 #1
0
def feature_hog_desc(img):
    winSize = (img_rows, img_cols)
    blockSize = (112, 112)
    blockStride = (7, 7)
    cellSize = (56, 56)

    nbins = 9
    derivAperture = 1
    winSigma = -1.
    histogramNormType = 0
    L2HysThreshold = 0.2
    gammaCorrection = 0
    nlevels = 64
    useSignedGradients = True

    #img = Dilated_Canny(img)

    hog = cv2.HOGDescriptor(winSize, blockSize, blockStride, cellSize, nbins,
                            derivAperture, winSigma, histogramNormType,
                            L2HysThreshold, gammaCorrection, nlevels,
                            useSignedGradients)

    descriptor = hog.compute(img)
    print(descriptor)
    print(descriptor.shape)

    return descriptor
コード例 #2
0
ファイル: hog.py プロジェクト: poi890poi/a-projects
    def hog_compute(img, p):
        width, height, depth = img.shape

        win_size = p.w  # Decrease length of output
        block_size = p.b  # In pixels
        block_stride = p.b_stride  # In pixels
        cell_size = p.c  # In pixels
        nbins = p.nbins
        deriv_aperture = p.aperture
        win_sigma = p.sigma
        histogram_norm_type = p.norm
        threshold = p.t
        gamma_correction = p.g
        nlevels = p.nlevels
        hog = cv2.HOGDescriptor(win_size, block_size, block_stride, cell_size,
                                nbins, deriv_aperture, win_sigma,
                                histogram_norm_type, threshold,
                                gamma_correction, nlevels)
        hog.save("hog.xml")

        win_stride = p.w_stride  # Not affecting length of output
        padding = p.padding
        hist = hog.compute(img, win_stride,
                           padding)  # Omit locations to compute whole image
        return hist
コード例 #3
0
def train():
    hog = getHOG()
    dataset = []
    datalabels = []
    folderCount = 0
    folderList = getFolderList("Training")
    dataCount = 0

    while folderCount < len(folderList):
        imageCount = 0
        imageList, prop = loadImages(folderList[folderCount])
        classId = prop[0][-1]
        while imageCount < len(imageList):
            new_img = cv2.imread(folderList[folderCount] + prop[imageCount][0])
            new_img = resize(new_img, None)
            des = hog.compute(new_img)
            dataset.append(des)
            datalabels.append(int(classId))
            # print(int(classId))
            #             cv2.imshow('frame', new_img)
            #             if cv2.waitKey(1) & 0xFF == ord('q'):
            #                 break
            imageCount = imageCount + 1
            dataCount = dataCount + 1
            # print(imageCount)
        folderCount = folderCount + 1
        cv2.destroyAllWindows()


#     print('dataset', len(dataset))
# rbf    gamm = 0.9 c =5

# Set up SVM for OpenCV 3
    svm = cv2.ml.SVM_create()

    # Set SVM type
    svm.setType(cv2.ml.SVM_C_SVC)

    # Set SVM Kernel to Radial Basis Function (RBF)
    svm.setKernel(cv2.ml.SVM_POLY)

    # det degree
    svm.setDegree(2.5)

    # Set parameter C
    svm.setC(12.5)

    # Set parameter Gamma
    svm.setGamma(0.030625)

    # Train SVM on training data
    dataset = np.squeeze(np.array(dataset))
    print(dataset.shape)
    datalabels = np.array(datalabels)
    svm.train(dataset, cv2.ml.ROW_SAMPLE, datalabels)

    # Save trained model
    svm.save("Models/svm.dat")
    print("Training Done")
    return svm
コード例 #4
0
def gaindata(filepath):
    label = []
    data = []
    winSize = (168, 192)
    blockSize = (16, 16)
    blockStride = (8, 8)
    cellSize = (8, 8)
    nbins = 9
    hog = cv2.HOGDescriptor(winSize, blockSize, blockStride, cellSize, nbins)
    #    hog = cv2.HOGDescriptor()
    for root, dirs, files in os.walk(filepath):
        print(root)
        datasingle = []
        if root == "/home/dzd/dzd/labwork/face/yaleBExtData":
            continue
        for file in files:
            hog_data = []
            img = cv2.imread(os.path.join(root, file), cv2.IMREAD_GRAYSCALE)
            label.append(int(root[-2:]))
            if (img.size != 32256):
                img.resize(192, 168)
#            img.resize(48,42)
            hog_descriptor = hog.compute(img)
            for i in range(len(hog_descriptor)):
                hog_data.append(hog_descriptor[i][0])


#            img.resize(1,48*42)
#            datasingle.append(img[0]/255.0)
            datasingle.append(hog_data)
        data.append(datasingle)
    return label, data
コード例 #5
0
def test(svm):
    hog = getHOG()
    dataset = []
    datalabels = []
    folderCount = 0
    folderList = getFolderList("Testing")
    dataCount = 0
    imageCount = 0
    '''
    ## Dark image test case --------------------------------------------------------------
    a = np.zeros((64,64,3), dtype = np.uint8)
    b = []
    for i in range(3):

        new_img = resize(a[0], None)
        des = hog.compute(new_img)
        b.append(des)
     '''#----------------------------------------------------------------------------------

    while folderCount < len(folderList):
        print('folder count', folderCount)
        imageCount = 0
        imageList, prop = loadImages(folderList[folderCount])
        if not imageList:
            folderCount = folderCount + 1
            continue
        classId = prop[0][-1]

        while imageCount < len(imageList):
            new_img = cv2.imread(folderList[folderCount] + prop[imageCount][0])
            new_img = resize(new_img, prop[imageCount])
            des = hog.compute(new_img)

            dataset.append(des)
            datalabels.append(int(classId))
            cv2.imshow('frame', new_img)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            imageCount = imageCount + 1
            dataCount = dataCount + 1

        folderCount = folderCount + 1
        cv2.destroyAllWindows()

    dataset = np.squeeze(np.array(dataset))
    testResponse = svm.predict(dataset)[1].ravel()
    count = 0
    for i in range(len(testResponse)):
        if (testResponse[i] - datalabels[i]) != 0.0:
            print('Test Value:', testResponse[i])
            print('Actual Value:', datalabels[i])
            count = count + 1

    print(np.array(testResponse))
    print(datalabels)
    percentage = float(len(datalabels)-count)/(len(datalabels))*100
    print('Percentage: ', float(len(datalabels)-count)/(len(datalabels))*100)

    print('unique responses: ',np.unique(testResponse))
    return percentage
コード例 #6
0
def getDescriptor(sample,expected_shape=(128,64) ,descr_open_cv=False,name='LBPH'):
      sampleToPredict = pre_process(sample,expected_shape)
      descr = None
      if(descr_open_cv):
        if name=='ORB':
          descr_sz = 64
          descr = np.zeros(descr_sz) # tamanho max baseado em experimento
          orb = cv2.ORB_create(nfeatures=descr_sz)
          kp = orb.detect(sampleToPredict,None)
          kp,orb_desc = orb.compute(sampleToPredict,kp)
          if orb_desc is not None: 
              orb_desc = orb_desc.ravel()
              for i in range(orb_desc.shape[0]):
                 if i < descr_sz:
                    descr[i] = orb_desc[i]  
                 else:
                    break  
        elif name=='HOG_OPENCV':
          hog = cv2.HOGDescriptor()          
          descr = hog.compute(sampleToPredict) #opencv hog
          descr = np.squeeze(descr)
      else:
        if name=='HOG_SKIMAGE':
           descr =hog(sampleToPredict,orientations=8,pixels_per_cell=(4,4),
                            cells_per_block=(1,1),visualize=False,multichannel=False) #skimage hog 
        elif name=='LBPH':  
          descr = local_binary_pattern(image=sampleToPredict,P=8,R=1,method='default')
          descr = descr.ravel()
          hist,_ = np.histogram(descr,bins=np.arange(256),density=True)
          descr = hist                         
      return descr
コード例 #7
0
ファイル: hog_svm.py プロジェクト: zkmale/GaborGlcmHogLbpSvm
def get_hogcv_feature(img, label):
    winsize = 12
    winSize = (winsize, winsize)
    blockSize = (int(winsize / 2), int(winsize / 2))
    blockStride = (int(winsize / 4), int(winsize / 4))
    cellSize = (int(winsize / 4), int(winsize / 4))
    #cellSize = (3,3)
    nbins = 10
    derivAperture = 3
    winSigma = 4.
    histogramNormType = 0
    L2HysThreshold = 0.5
    gammaCorrection = 0
    nlevels = 72
    hog = cv2.HOGDescriptor(winSize, blockSize, blockStride, cellSize, nbins,
                            derivAperture, winSigma, histogramNormType,
                            L2HysThreshold, gammaCorrection, nlevels)

    winStride = (int(winsize / 4), int(winsize / 4))
    padding = (int(winsize / 4), int(winsize / 4))

    locations = train_tool.windingSystemTrainTool_class().getHogLocations(
        img.shape[1], img.shape[0], winsize)
    features = hog.compute(img, winStride, padding, locations).flatten()
    fd = np.concatenate((features, [label]))

    return fd
コード例 #8
0
def extract_hog(samples):
        """
        从训练数据集中提取HOG特征,并返回
        :param samples: 训练数据集
        :return train: 从训练数据集中提取的HOG特征
        """
        train = []
        print("正在提取HOG特征......")
        num = 0.
        total = len(samples)
        for f in samples:
            num += 1.
            print('正在处理{} {:2.1f}%'.format(f, num / total * 100))
            # HOG参数 winSize, blockSize, blockStride, cellSize, nbins
            # hog = cv2.HOGDescriptor((64, 128), (16, 16), (8, 8), (8, 8), 9)
            img = cv2.imread(f, -1)
            # img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # 灰度化
            img = cv2.resize(img, (64, 128))

            hog = cv2.HOGDescriptor((64, 128), (16, 16), (8, 8), (8, 8), 9)
            feature = hog.compute(img)
            # feature = hog(img, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(4, 4), block_norm="L1")
            # descriptors = hog.compute(img)  # 计算特征向量
            train.append(feature)

        train = np.float32(train)
        train = np.resize(train, (total, 3780))

        return train
コード例 #9
0
def hog_descriptor_opencv(image, shape):
    win_size = shape
    block_size = (int(shape[0] / 8), int(shape[0] / 8))
    block_stride = (int(shape[0] / 8), int(shape[0] / 8))
    cell_size = (int(shape[0] / 8), int(shape[0] / 8))
    orientation_bins = 9
    hog = cv2.HOGDescriptor(win_size, block_size, block_stride, cell_size, orientation_bins)
    return hog.compute(image)
コード例 #10
0
def train_traffic_signs(name):

    hog = getHOG()
    dataset = []
    datalabels = []
    folderCount = 5
    folderList = getFolderList("Training")
    dataCount = 0
    while folderCount < len(folderList):
        imageCount = 0
        imageList, prop = loadImages(folderList[folderCount])
        classId = prop[0][-1]
        while imageCount < len(imageList):
            new_img = cv2.imread(folderList[folderCount] + prop[imageCount][0])
            new_img = resize(new_img, prop[imageCount])
            des = hog.compute(new_img)
            dataset.append(des)
            datalabels.append(int(1))

            cv2.imshow('frame', new_img)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            imageCount = imageCount + 1
            dataCount = dataCount + 1
        folderCount = folderCount + 1
        cv2.destroyAllWindows()

    # Set up SVM for OpenCV 3
    svm = cv2.ml.SVM_create()

    # Set SVM type
    svm.setType(cv2.ml.SVM_ONE_CLASS)

    # Set SVM Kernel to Radial Basis Function (RBF)
    svm.setKernel(cv2.ml.SVM_POLY)

    # det degree
    svm.setDegree(2.5)

    # Set parameter C
    svm.setC(3)
    svm.setP(0)
    svm.setCoef0(0)
    svm.setNu(.5)

    # Set parameter Gamma
    svm.setGamma(1)

    # Train SVM on training data
    dataset = np.squeeze(np.array(dataset))
    datalabels = np.array(datalabels)
    svm.train(dataset, cv2.ml.ROW_SAMPLE, datalabels)

    # Save trained model
    svm.save('Models/' + name + '.dat')
    print("Training Done")
    return svm
def HOG(image):
    winSize = (64,128)
    blockSize = (16,16)
    cellSize = (8,8)
    blockStride = (8,8)
    nbins = 9
    hog = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins)
    h = hog.compute(image)
    return h
コード例 #12
0
def feature_extract(img, mask):
    hog = cv.HOGDescriptor((640, 480), (16, 16), (8, 8), (8, 8), 9, 4, 0.2, 1,
                           64)
    mask = cv.morphologyEx(mask,
                           cv.MORPH_OPEN,
                           kernel=cv.getStructuringElement(
                               cv.MORPH_RECT, (4, 4)))
    mask = cv.medianBlur(mask, 7)
    mask = cv.medianBlur(mask, 5)
    mask = cv.medianBlur(mask, 3)
    mask, conts, _ = cv.findContours(mask, cv.RETR_EXTERNAL,
                                     cv.CHAIN_APPROX_SIMPLE)
    mask = img * mask[:, :, None].astype(img.dtype)
    mask = cv.drawContours(mask, conts, -1, (0, 255, 0), 3)
    #fd, hog_image = hog(mask, orientations=9, pixels_per_cell=(16, 16), cells_per_block=(1, 1), visualise=True)
    return hog.compute(mask)
コード例 #13
0
ファイル: rodaVJ.py プロジェクト: Rodrigump/EP2-IA
def rodaHOG(original_image):
    # https://www.learnopencv.com/histogram-of-oriented-gradients/
    # https://www.learnopencv.com/handwritten-digits-classification-an-opencv-c-python-tutorial/

    winSize = (20, 20)
    blockSize = (10, 10)
    blockStride = (5, 5)
    cellSize = (2, 2)
    nbins = 9
    derivAperture = 1
    winSigma = -1.
    histogramNormType = 0
    L2HysThreshold = 0.2
    gammaCorrection = 1
    nlevels = 64
    signedGradients = True

    # regra para valores (altura/largura): (winSize - blockSize) % blockStride == 0

    # Cria descritor
    hog = cv.HOGDescriptor(winSize, blockSize, blockStride, cellSize, nbins,
                           derivAperture, winSigma, histogramNormType,
                           L2HysThreshold, gammaCorrection, nlevels,
                           signedGradients)
    # descriptor = hog.compute(original_image, winStride=(64, 128), padding=(0, 0))

    array = np.array([])  # empty array for storing all the features
    # h = 128  # height of the image
    # w = 64  # width of the image

    # cv.imshow('Image', original_image)
    # img = cv.resize(original_image, (w, h), interpolation=cv.INTER_CUBIC)  # resize images
    # cv.imshow('Image', img)

    h = hog.compute(original_image, winStride=(64, 128),
                    padding=(0, 0))  # storing HOG features as column vector
    # h = hog.compute(original_image)  # storing HOG features as column vector
    # # hog_image_rescaled = exposure.rescale_intensity(h, in_range=(0, 0.02))
    # # plt.figure(1, figsize=(3, 3))
    # # plt.imshow(h,cmap=plt.cm.gray)
    # # plt.show()
    # # print len(h)
    h_trans = h.transpose()  # transposing the column vector

    arrayHOG = np.vstack(h_trans)  # appending it to the array
    print("HOG features of label 1")
    print(arrayHOG)
コード例 #14
0
ファイル: BOVW_functions.py プロジェクト: eglrp/MCV-M5-1
def extractHOGfeatures(img, detector):
    winSize = (64, 64)
    blockSize = (16, 16)
    blockStride = (8, 8)
    cellSize = (8, 8)
    nbins = 9
    hog = cv2.HOGDescriptor(winSize, blockSize, blockStride, cellSize, nbins)
    kpts = detector.detect(img)
    loc = [(int(x.pt[0]), int(x.pt[1])) for x in kpts]
    loc = tuple(loc)
    fd = hog.compute(img, hog.blockStride, hog.cellSize, loc)
    #fd = hog(img,
    #         orientations=cfg.hog_orientations,
    #         pixels_per_cell=cfg.hog_pixels_per_cell,
    #         cells_per_block=cfg.hog_cells_per_block,
    #         visualise=False,
    return fd
コード例 #15
0
def validateBox(image,corners, mode):
    img = np.copy(image)
    roi = img[corners[1]:corners[3], corners[0]:corners[2]]     # xmin, ymin, xmax, yax
    width = 64
    height = 64
    dim = (width, height)
    hog = getHOG()
    resized_img = cv2.resize(roi, dim, interpolation = cv2.INTER_AREA)
    des = hog.compute(resized_img)
    dataset = np.squeeze(np.array(des)).reshape((1,-1))
    response = computeClass(dataset, mode)
    print(response)
    if not response == -1:
        text = str(response)
        cv2.rectangle(image, (corners[0], corners[1]), (corners[2], corners[3]), (0,255,0), 2)
        cv2.putText(image, text, (corners[0], corners[3]+25 ), cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 1.0, (0,255,0), 2)
    return image
コード例 #16
0
ファイル: tests.py プロジェクト: acarcher/monitoring
def testcv2hog():
    im = cv2.imread("data/video_1_59.jpg")  # 8x8? cell size

    im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    hog = cv2.HOGDescriptor(_winSize=(256, 256),
                            _blockSize=(16, 16),
                            _blockStride=(8, 8),
                            _cellSize=(8, 8),
                            _nbins=9)
    features = hog.compute(im)

    print(len(features))


# testskimagehog()
# testcv2hog()
#testhaar('svmdata/yalefaces_jpg')
    def hogCV(self, image, param):
        winSize = (param.resize_width, param.resize_height)
        blockSize = (param.block_size, param.block_size)
        blockStride = (param.cell_size, param.cell_size)
        cellSize = (param.cell_size, param.cell_size)
        nbins = param.bins
        signedGradient = param.signed_gradient

        derivAperture = 1
        winSigma = 4.
        histogramNormType = 0
        L2HysThreshold = 2.0000000000000001e-01
        gammaCorrection = 0
        nlevels = 64

        hog = cv2.HOGDescriptor(winSize, blockSize, blockStride, cellSize,
                                nbins, derivAperture, winSigma,
                                histogramNormType, L2HysThreshold,
                                gammaCorrection, nlevels, signedGradient)
        h = hog.compute(image)
        return h
コード例 #18
0
def classifier(image, hog, model):
    TRAINING_IMAGE_SIZE_X = 64
    TRAINING_IMAGE_SIZE_Y = 64

    # convert to grayscale
    trainImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # resize
    trainImage = cv2.resize(trainImage,
                            (TRAINING_IMAGE_SIZE_X, TRAINING_IMAGE_SIZE_Y),
                            interpolation=cv2.INTER_AREA)

    # We have to tune these

    # fd = hog.compute(trainImage,winStride,padding,locations)
    fd = hog.compute(trainImage)
    fd = fd.T
    y_pred = model.predict(fd)
    # print 'Class is :',y_pred

    return y_pred[0]
コード例 #19
0
def extractHOG(img,
               orient,
               pix_per_cell,
               cell_per_block,
               vis=False,
               feature_vec=True):
    if vis == True:
        features, hog_image = hog(img,
                                  orientations=orient,
                                  pixels_per_cell=(pix_per_cell, pix_per_cell),
                                  cells_per_block=(cell_per_block,
                                                   cell_per_block),
                                  transform_sqrt=False,
                                  visualise=True,
                                  feature_vector=False)
        return features, hog_image
    else:
        #Parameter Hardcoded for final version
        hog = cv2.HOGDescriptor((64, 64), (8, 8), (8, 8), (8, 8), 9)
        features = hog.compute(img)
        return np.ravel(features)
コード例 #20
0
ファイル: recognition.py プロジェクト: sarfata/meterreader
    def hog_of_digit(self, image):
        winSize = (20, 20)
        blockSize = (10, 10)
        blockStride = (5, 5)
        cellSize = (10, 10)
        nbins = 9
        derivAperture = 1
        winSigma = -1.
        histogramNormType = 0
        L2HysThreshold = 0.2
        gammaCorrection = 1
        nlevels = 64
        useSignedGradients = True

        hog = cv2.HOGDescriptor(winSize, blockSize, blockStride, cellSize,
                                nbins, derivAperture, winSigma,
                                histogramNormType, L2HysThreshold,
                                gammaCorrection, nlevels, useSignedGradients)
        # hog = cv2.HOGDescriptor()
        descriptor = hog.compute(image)
        return descriptor
コード例 #21
0
def hog_descriptor_2(digit):
    winSize = (20, 20)
    blockSize = (10, 10)
    blockStride = (5, 5)
    cellSize = (10, 10)
    nbins = 9
    derivAperture = 1
    winSigma = -1.
    histogramNormType = 0
    L2HysThreshold = 0.2
    gammaCorrection = 1
    nlevels = 64
    signedGradients = True

    hog = cv.HOGDescriptor(winSize, blockSize, blockStride, cellSize, nbins,
                           derivAperture, winSigma, histogramNormType,
                           L2HysThreshold, gammaCorrection, nlevels,
                           signedGradients)

    descriptors = hog.compute(digit)
    return descriptors
コード例 #22
0
def hog2(image):
    winSize = (4, 4)
    blockSize = (2, 2)
    blockStride = (1, 1)
    cellSize = (2, 2)
    nbins = 9
    derivAperture = 1
    winSigma = -1.
    histogramNormType = 0
    L2HysThreshold = 0.2
    gammaCorrection = 1
    nlevels = 4
    signedGradients = True

    hog = cv2.HOGDescriptor(winSize, blockSize, blockStride, cellSize, nbins,
                            derivAperture, winSigma, histogramNormType,
                            L2HysThreshold, gammaCorrection, nlevels,
                            signedGradients)

    descriptor = hog.compute(image)
    print('tam:', descriptor.shape)
    return descriptor
コード例 #23
0
def hog(l1):
    winSize = (64, 64)
    blockSize = (16, 16)
    blockStride = (8, 8)
    cellSize = (8, 8)
    nbins = 9
    derivAperture = 1
    winSigma = 4.
    histogramNormType = 0
    L2HysThreshold = 2.0000000000000001e-01
    gammaCorrection = 0
    nlevels = 64
    hog = cv2.HOGDescriptor(winSize, blockSize, blockStride, cellSize, nbins,
                            derivAperture, winSigma, histogramNormType,
                            L2HysThreshold, gammaCorrection, nlevels)
    # compute(img[, winStride[, padding[, locations]]]) -> descriptors
    winStride = (8, 8)
    padding = (8, 8)
    locations = ((10, 20), )
    l1 = np.array(l1, dtype='uint8')
    result = [hog.compute(img, winStride, padding, locations) for img in l1]
    # Rescale histogram for better display
    # hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 10))
    return result
#Step 1: Sample P positive samples from your training data of 
#the object(s) you want to detect and extract HOG descriptors from these samples.

import matplotlib.pyplot as plt

from skimage.feature import hog
from skimage import data, color, exposure
from config import roadtemp, road1, road2, road3, road4, road5, road6, road7, road8, road9, road10
from PIL import Image
import numpy as np
import cv2

im_list = [roadtemp, road1, road2, road3, road4, road5, road6, road7, road8]
hog_image_list = []
test_image_list = [road9, road10]
test_hog_image_list = []

for element in im_list:
	hog = cv2.HOGDescriptor()
	im = cv2.imread(element)
	h = hog.compute(im)
	hog_image_list.append(h)

for element2 in test_image_list:
	hog = cv2.HOGDescriptor()
	im = cv2.imread(element2)
	h = hog.compute(im)
	test_hog_image_list.append(h)

コード例 #25
0
        # load training image
        image = cv2.imread(imagePath)
        # convert to grayscale
        trainImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        # resize
        trainImage = cv2.resize(trainImage,
                                (TRAINING_IMAGE_SIZE_X, TRAINING_IMAGE_SIZE_Y),
                                interpolation=cv2.INTER_AREA)
        # showImage(trainImage)

        # We have to tune these

        # fd = hog.compute(trainImage,winStride,padding,locations)
        fd = hog.compute(trainImage)
        fd = fd.T
        if nImageCounter == 1:
            trainingFeaturesArr = fd
            trainingLabelsArr = np.array(1)
        else:
            trainingFeaturesArr = np.vstack((trainingFeaturesArr, fd))
            trainingLabelsArr = np.append(trainingLabelsArr, 1)
        if nImageCounter >= nImagesCutoff:
            break
    if nImageCounter >= nImagesCutoff:
        break

nPostiveImages = nImageCounter

nImageCounter = 0
コード例 #26
0
ファイル: projeto02_MLP.py プロジェクト: iuriigarashi/rpi
                                                    cv2.CV_16S,
                                                    0,
                                                    1,
                                                    ksize=3,
                                                    scale=1,
                                                    delta=0),
                                          size=(TAMANHO_X, TAMANHO_Y))
        sobeldXY = image_to_feature_vector(cv2.Sobel(image,
                                                     cv2.CV_16S,
                                                     1,
                                                     1,
                                                     ksize=3,
                                                     scale=1,
                                                     delta=0),
                                           size=(TAMANHO_X, TAMANHO_Y))
        hogimage = image_to_feature_vector(hog.compute(image),
                                           size=(TAMANHO_X, TAMANHO_Y))

        rawImages.append(pixels)
        descHist.append(histogram)
        descEdges.append(edges)
        descSobelX.append(sobeldx)
        descSobelY.append(sobeldy)
        descSobelXY.append(sobeldXY)
        descHOG.append(hogimage)
        if i % 250 == 0:
            print('Processed {} of {}'.format(i, count))
    pickelObject(rawImages, 'rawImagesPickel')
    pickelObject(descHist, 'descHistPickel')
    pickelObject(descEdges, 'descEdgesPickel')
    pickelObject(descSobelX, 'descSobelXPickel')
コード例 #27
0
    cellSize = (16, 16)
    nbins = 9
    derivAperture = 1
    winSigma = 4.
    histogramNormType = 0
    L2HysThreshold = 2.0000000000000001e-01
    gammaCorrection = 0
    nlevels = 64
    hog = cv2.HOGDescriptor(winSize, blockSize, blockStride, cellSize, nbins,
                            derivAperture, winSigma, histogramNormType,
                            L2HysThreshold, gammaCorrection, nlevels)
    #compute(img[, winStride[, padding[, locations]]]) -> descriptors
    winStride = (8, 8)
    padding = (8, 8)
    locations = ((10, 20), )
    hist = hog.compute(image)  #,winStride,padding,locations)

    print("histogram computed")
    print("type of hist is: " + str(type(hist)))
    print("size of hist is: " + str(hist.size))
    print("shape of hist is: " + str(hist.shape))

    hog_im = ord_hist(hist, nbins)
    hog_im = vis_hist(hog_im, image, 16, 16)
    #hog_im = exposure.rescale_intensity(hog_im, in_range=(0, 100))
    hog_im = shrink_image(45, hog_im)
    cv2.imshow("image gradients", hog_im)
    cv2.waitKey(0)

    #cv2.destroyAllWindows()
コード例 #28
0
        #image is the current frame
        success, image = vidcapture.read()
        imagePrev = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        imagePrev = cv2.resize(imagePrev, (64, 128))
        #count of frames captured
        framesCount = 0

        #success will be false when there's no frames left to read
        success = True
        while success:
            success, image2 = vidcapture.read()
            #Skipping 3 frames
            if framesCount % 6 == 0 and success == True:
                imageNext = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
                imageNext = cv2.resize(imageNext, (64, 128))
                videoFrames.append(hog.compute(imageNext))
                listOfFrameLabels.append(folderIndex)
                #creating a 9-valued bin for current cell
                cellBin = [0, 0, 0, 0, 0, 0, 0, 0, 0]

                for windowHeight in range(0, imageNext.shape[0],
                                          windowsize_width):
                    for windowWidth in range(0, imageNext.shape[1],
                                             windowsize_height):
                        windowPrev = imagePrev[windowHeight:windowHeight +
                                               windowsize_height,
                                               windowWidth:windowWidth +
                                               windowsize_width]
                        windowNext = imageNext[windowHeight:windowHeight +
                                               windowsize_height,
                                               windowWidth:windowWidth +
コード例 #29
0
img = cv2.cvtColor(im1, cv2.COLOR_RGB2HLS)

cell_size = (8, 8)  # h x w in pixels
block_size = (2, 2)  # h x w in cells
nbins = 9  # number of orientation bins

# winSize is the size of the image cropped to an multiple of the cell size
hog = cv2.HOGDescriptor(_winSize=(img.shape[1] // cell_size[1] * cell_size[1],
                                  img.shape[0] // cell_size[0] * cell_size[0]),
                        _blockSize=(block_size[1] * cell_size[1],
                                    block_size[0] * cell_size[0]),
                        _blockStride=(cell_size[1], cell_size[0]),
                        _cellSize=(cell_size[1], cell_size[0]),
                        _nbins=nbins)

h = hog.compute(img[:, :, 1])

n_cells = (img.shape[0] // cell_size[0], img.shape[1] // cell_size[1])
hog_feats = hog.compute(img)\
               .reshape(n_cells[1] - block_size[1] + 1,
                        n_cells[0] - block_size[0] + 1,
                        block_size[0], block_size[1], nbins) \
               .transpose((1, 0, 2, 3, 4))  # index blocks by rows first
gradients = np.zeros((n_cells[0], n_cells[1], nbins))

# count cells (border cells appear less often across overlapping groups)
cell_count = np.full((n_cells[0], n_cells[1], 1), 0, dtype=int)
for off_y in range(block_size[0]):
    for off_x in range(block_size[1]):
        gradients[off_y:n_cells[0] - block_size[0] + off_y + 1,
                  off_x:n_cells[1] - block_size[1] + off_x + 1] += \
false_positives = []
# loop over the image pyramid
for resized in pyramid(image, scale=1.5):
	# loop over the sliding window for each layer of the pyramid
	for (x, y, window) in sliding_window(resized, stepSize=32, windowSize=(winW, winH)):
		# if the window does not meet our desired window size, ignore it
		if window.shape[0] != winH or window.shape[1] != winW:
			# print window.shape[0], window.shape[1]
			continue

		# THIS IS WHERE YOU WOULD PROCESS YOUR WINDOW, SUCH AS APPLYING A
		# MACHINE LEARNING CLASSIFIER TO CLASSIFY THE CONTENTS OF THE
		# WINDOW

		hog = cv2.HOGDescriptor()
		h = hog.compute(resized)
		print(h)

		prediciton = lin_svc.predict(h.reshape(1,-1))

		print(prediciton)

		# threshold = 0.4
		# loc = np.where( h >= threshold)
		# print(loc)

		# # since we do not have a classifier, we'll just draw the window
		clone = resized.copy()
		cv2.rectangle(clone, (x,y), (x + 130, y + 130), (0, 255, 0), 2)
		cv2.imshow("Window", clone)
		cv2.waitKey(1)
コード例 #31
0
ファイル: kane.py プロジェクト: vakhil/MLT-Project
def training(path,train,labels):
	tame = 0
	winSize = (64,64)
	blockSize = (16,16)
	blockStride = (8,8)
	cellSize = (8,8)
	nbins = 9
	derivAperture = 1
	winSigma = 4.
	histogramNormType = 0
	L2HysThreshold = 2.0000000000000001e-01
	gammaCorrection = 0
	nlevels = 64
	winStride = (8,8)
	padding = (8,8)
	locations = ((10,20),)
	hog = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins,derivAperture,winSigma, histogramNormType,L2HysThreshold,gammaCorrection,nlevels)


	for root, dir_names, file_names in os.walk(path):
		
		for path in dir_names:
			training(os.path.join(root, path),train,labels)
		for file_name in file_names:
			file_path = os.path.join(root, file_name)
			
			if not file_path in names and file_name[-3:-1] =='jp': 
				if file_name[-3:-1] == 'jp' :
					#print file_name[7:-4]
					
					if file_name[7] == 'B':
						labels.append(1)
					if file_name[7] == 'C' :
						labels.append(3)
					if file_name[7] == 'P':
						labels.append(4)
					if file_name[7] == 'N' :
						labels.append(5)
					if file_name[7] == 'M' :
						labels.append(2)


					#print file_path
					image = cv2.imread(file_path,0)
					
					
					lists = []
					
					hist = hog.compute(image,winStride,padding,locations)
					
					for i in range(0, len(hist)) :
						
						lists.append( hist[i][0])

					train.append(lists)
					tame = tame + 1
					
					#fd =hog(c, orientations=9, pixels_per_cell=(len(image)/size, (len(image[0]))/size), cells_per_block=(1, 1), visualise=False)
					#for i in len(image) :

					#train.append(fd)
					#print (fd)
					
		return tame
コード例 #32
0
ファイル: kane.py プロジェクト: vakhil/MLT-Project
winSize = (64,64)
blockSize = (16,16)
blockStride = (8,8)
cellSize = (8,8)
nbins = 9
derivAperture = 1
winSigma = 4.
histogramNormType = 0
L2HysThreshold = 2.0000000000000001e-01
gammaCorrection = 0
nlevels = 64
winStride = (8,8)
padding = (8,8)
locations = ((10,20),)
hog = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins,derivAperture,winSigma, histogramNormType,L2HysThreshold,gammaCorrection,nlevels)
hist = hog.compute(image,winStride,padding,locations)



lists = []					
					
for i in range(0, len(hist)) :
	
	lists.append( hist[i][0])
finale = []
finale.append(lists)
clf.fit(train,labels)
# filename = '/digits_classifier.joblib.pkl'
# _ = joblib.dump(clf, filename, compress=9)
with open('kane.pkl', 'wb') as f:
    pickle.dump(clf, f)
コード例 #33
0
def calculate_hog(hog, img):
    h = hog.compute(img)
    npNumber = np.zeros(h.shape[0], dtype=np.float)
    for i in range(0, h.shape[0]):
        npNumber[i]=h[i]
    return npNumber