Example #1
0
def extractHOGfromVideo(path):    
    vE = videoExplorer()           
    vE.setVideoStream(path, info=False, frameMode='RGB')
    
    feats = []
    for frame in vE:
        feats += [feat.hog(frame)]
        
    f = np.asarray(feats)
    
    np.save(path.split('.avi')[0] + '.feat.{ending}.npy'.format(ending='hog-8'), f)
Example #2
0









for k in range(9):
	print k
	tmpBoxRot = allBoxesRot[k]
	# tmpBoxRot = np.nan_to_num(tmpBoxRot)
	tmpBoxRot[tmpBoxRot>0] -= tmpBoxRot[tmpBoxRot>0].min()
	tmpBoxRot[tmpBoxRot>0] /= tmpBoxRot.max()
	# tmpBoxRot[tmpBoxRot>0] = np.log(tmpBoxRot[tmpBoxRot>0])
	tmpBoxD = np.dstack([tmpBoxRot, tmpBoxRot, tmpBoxRot])
	f = features.hog(tmpBoxD, 4)

	# figure(1); subplot(3,3,k+1)
	figure(1); imshow(tmpBoxRot)

	# figure(2); subplot(3,3,k+1)
	# imshow(f[:,:,i-1])

	im = HOGpicture(f, 4)
	figure(2); imshow(im, interpolation='nearest')


Example #3
0
from vision import features
from PIL import Image
from scipy.io import savemat as savematlab

im = Image.open("/scratch/vatic/syn-bounce-level/0/0/0.jpg")
f = features.hog(im)
savematlab(open("features.mat", "w"), {"py": f})
                pos[b] = (pos[b][0] + int(size[b][0] / 2) - boxSize / 2,
                          pos[b][1] + int(size[b][1] / 2) - boxSize / 2)
                # cv2.rectangle(im, pos[b], (pos[b][0]+size[b][0], pos[b][1]+size[b][1]), [200,100,100])
                box = im[pos[b][1]:pos[b][1] + size[b][1],
                         pos[b][0]:pos[b][0] + size[b][0], 2]
                # boxMean = int(box.mean())
                # boxMax = box.max()
                # box[box<boxMean] = box.max()
                # box[box>=boxMean] -= box[box>0].min()
                bodyPartDepths[b].append(deepcopy(box))
                if calcHOG and box.shape[0] > 0 and box.shape[
                        1] > 0 and box.shape[0] == boxSize and box.shape[
                            1] == boxSize:
                    # tmpBox = np.dstack([box[:,:,2], box[:,:,2], box[:,:,2]])
                    tmpBoxD = np.dstack([box, box, box])
                    f = features.hog(tmpBoxD, 4)
                    bodyPartFeatures[b].append(deepcopy(f))

                    # im2 = HOGpicture(f, 4)
                    # figure(2); imshow(im, interpolation='nearest')

                    # for i in range(5):
                    # 	posNew = [pos[b][0]+random.randint(30,50), pos[b][1]+random.randint(30,50)]
                    # 	box = im[pos[b][1]:pos[b][1]+size[b][1], pos[b][0]:pos[b][0]+size[b][0], 2]
                    # 	if calcHOG and box.shape[0] > 0 and box.shape[1] > 0 and box.shape[0] == 30 and box.shape[1] == 30:
                    # 		otherDepths.append(deepcopy(box))
                    # 		tmpBoxD = np.dstack([box,box,box])
                    # 		f = features.hog(tmpBoxD, 4)
                    # 		otherFeatures.append(deepcopy(f))

        # figure(1); subplot(2,4,b)
Example #5
0
from vision import features
import Image
from scipy.io import savemat as savematlab

im = Image.open("/scratch/vatic/syn-bounce-level/0/0/0.jpg")
f = features.hog(im)
savematlab(open("features.mat", "w"), {"py": f})
Example #6
0
    for i in range(len(trainingDepths[bodypart]) - 1):
        tmpImg = np.asarray(trainingDepths[bodypart][i][0], dtype=float)
        ## Normalize the image. Min min/max within standard deviations to help eliminate the background
        if not np.all(tmpImg == 0):
            imMean = tmpImg.mean()
            imSTD = tmpImg.std()
            min_ = tmpImg[tmpImg > imMean - 1 * imSTD].min()
            tmpImg -= min_
            max_ = tmpImg[tmpImg < imMean - min_ + 1 * imSTD].max()
            tmpImg[tmpImg > max_] = max_
            tmpImg[tmpImg < 0] = max_
            tmpImg /= np.float(max_ / 255.0)
            tmpImg = np.asarray(tmpImg, dtype=np.uint8)

            tmp = np.dstack([tmpImg, tmpImg, tmpImg])
            hogs[part].append(features.hog(tmp, hogRes))


from pyKinectTools.utils.HOGUtils import *

hogList = list(hogs["face"])
for p in hogs["l_hand"]:
    hogList.append(p)
# hogList.append(hogs['l_hand'])
# hogList = np.array(hogList)
hogVals = np.empty([len(hogList), len(hogList[0].flatten())])
for i in range(len(hogList)):
    # hogList[i] = hogList[i].flatten()
    hogVals[i, :] = hogList[i][0].flatten()[0]
hogLabels = np.zeros(len(hogs["face"]) + len(hogs["l_hand"]))
# hogLabels[0:len(hogs['face'])]+=1
Example #7
0
	for i in range(len(trainingDepths[bodypart])-1):
		tmpImg = np.asarray(trainingDepths[bodypart][i][0], dtype=float)
		## Normalize the image. Min min/max within standard deviations to help eliminate the background
		if not np.all(tmpImg == 0):
			imMean = tmpImg.mean()
			imSTD = tmpImg.std()
			min_ = tmpImg[tmpImg > imMean-1*imSTD].min()
			tmpImg -= min_
			max_ = tmpImg[tmpImg < imMean-min_+1*imSTD].max()
			tmpImg[tmpImg>max_]=max_
			tmpImg[tmpImg<0]=max_
			tmpImg /= np.float(max_/255.0)	
			tmpImg = np.asarray(tmpImg, dtype=np.uint8)

			tmp = np.dstack([tmpImg, tmpImg, tmpImg])
			hogs[part].append(features.hog(tmp, hogRes))


from pyKinectTools.utils.HOGUtils import *
hogList = list(hogs['face'])
for p in hogs['l_hand']:
	hogList.append(p)
# hogList.append(hogs['l_hand'])
# hogList = np.array(hogList)
hogVals = np.empty([len(hogList), len(hogList[0].flatten())])
for i in range(len(hogList)):
	# hogList[i] = hogList[i].flatten()
	hogVals[i,:] = hogList[i][0].flatten()[0]
hogLabels = np.zeros(len(hogs['face'])+len(hogs['l_hand']))
# hogLabels[0:len(hogs['face'])]+=1
hogLabels[len(hogs['face']):]+=1