コード例 #1
0
 def do_POST(self):
     content_length = int(self.headers['Content-Length'])
     body = self.rfile.read(content_length)
     self.send_response(200)
     self.end_headers()
     response = BytesIO()
     response.write(detectFace(body.decode('utf-8')).encode())
     self.wfile.write(response.getvalue())
コード例 #2
0
def test_detect(vidfname, outfname):
    #vid1 = imageio.get_reader("shifted.mp4", 'ffmpeg')
    vid1 = imageio.get_reader(vidfname, 'ffmpeg')

    prevframe = None
    prevbboxes = None
    prevXs, prevYs = np.zeros((1, 1)), np.zeros((1, 1))
    allimg = []
    for i, frame in enumerate(vid1):
        print i
        if i % 30 == 0 or count_minfeats(prevXs) < 20:
            print "RECALCULATING"
            try:
                newbboxes = detectFace(frame)
            except AttributeError:
                print "NO NEW FACE FOUND! Trying again on next frame"
                continue
            greyframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            Xs, Ys = getFeatures(greyframe, newbboxes)
            print Xs.shape
            prevXs, prevYs = Xs, Ys
        else:
            Xs, Ys = estimateAllTranslation(prevXs, prevYs, prevframe, frame)
            Xs, Ys, newbboxes = applyGeometricTransformation(
                prevXs, prevYs, Xs, Ys, prevbboxes)
            #plot_all(frame, newbboxes, Xs, Ys, display=True)

        allimg.append(plot_all(frame, newbboxes, Xs, Ys, display=False))
        # if i==0 or (i > 9 and not i % 10):
        # plot_features(frame, Xs, Ys, display=True)
        #     print Xs-prevXs

        prevbboxes = newbboxes
        prevframe = frame
        prevXs, prevYs = Xs.copy(), Ys.copy()


#        if i > 60:
#            print "Quitting early for testing purposes"
#            break

    imageio.mimsave(outfname, allimg)
    return
コード例 #3
0
def faceTracking(rawVideo):
    #TODO: Your code here

    ind = 0
    trackedVideo = []
    nFrames = len(rawVideo)

    # detect faces
    bbox = detectFace(rawVideo[0])

    # detect features
    gray_im = rgb2gray(rawVideo[0])
    x, y = getFeatures(gray_im, bbox)

    for f in range(0, x.shape[1]):
        x[:, f] = x[:, f] + bbox[f][0][1]  # row coords
        y[:, f] = y[:, f] + bbox[f][0][0]  # col coords

    for i in range(0, nFrames - 1):

        if x.shape[0] < 15:
            # detect faces
            bbox = detectFace(rawVideo[0])

            # detect features
            gray_im = rgb2gray(rawVideo[0])
            x, y = getFeatures(gray_im, bbox)

            for f in range(0, x.shape[1]):
                x[:, f] = x[:, f] + bbox[f][0][1]  # row coords
                y[:, f] = y[:, f] + bbox[f][0][0]  # col coords

        # get frames
        img1 = rawVideo[ind]
        img2 = rawVideo[ind + 1]

        # track features from first frame to second using KLT procedure

        newX, newY = estimateAllTranslation(x, y, img1, img2)

        # apply resulting transformation
        newXs, newYs, bbox = applyGeometricTransformation(
            x, y, newX, newY, bbox)
        # apply tracked features and bounding box to frames, update output array
        for f in range(0, newXs.shape[1]):
            im = cv2.rectangle(img2, (int(bbox[f][0][0]), int(bbox[f][0][1])),
                               (int(bbox[f][2][0]), int(bbox[f][2][1])),
                               (0, 0, 0), 3)
            for j in range(0, len(newXs)):
                im = cv2.circle(im, (int(newYs[j][f]), int(newXs[j][f])), 1,
                                (0, 0, 255), 2)

        trackedVideo.append(im)

        ind = ind + 1

        x = newXs
        y = newYs

    trackedVideo = np.array(trackedVideo)

    return trackedVideo
    #     plt.plot(newbox.T[1], newbox.T[0], 'ro')
    #     plt.axis('off')
    # plt.show()

    return Xs, Ys, newbbox


if __name__ == '__main__':
    cap = cv2.VideoCapture("./Datasets/Difficult/StrangerThings.mp4")
    ret, img1 = cap.read()
    ret, img2 = cap.read()
    cap.release()
    tmpimg1 = img1.copy()
    tmpimg2 = img2.copy()

    bbox = detectFace(img1)
    startXs, startYs = getFeatures(img1, bbox)
    newXs, newYs = estimateAllTranslation(startXs, startYs, img1, img2)
    Xs, Ys, newbbox = applyGeometricTransformation(startXs, startYs, newXs,
                                                   newYs, bbox)

    for box in bbox:
        cv2.rectangle(tmpimg1, (int(box[0][1]), int(box[0][0])),
                      (int(box[-1][1]), int(box[-1][0])), (0, 255, 0), 3)
    plt.figure()
    plt.imshow(tmpimg1)
    for j in range(len(startYs)):
        plt.plot(startYs[j], startXs[j], 'w+')
    plt.axis('off')
    plt.show()
コード例 #5
0
def faceTracking(rawVideo):

    #process the video here, convert into frames of images
    #assuming that 'rawVideo' is a video path
    cap = cv2.VideoCapture(rawVideo)
    #ie, test with rawVideo = 'Data/Easy/TheMartian.mp4'
    frame_width = int(cap.get(3))
    frame_height = int(cap.get(4))
    num_frames = int(cap.get(7))
    #create an array that holds all the frames in the video, format: frame_number x width x height
    frames = np.zeros([num_frames, frame_height, frame_width, 3], np.uint8)
    f = 0

    while (cap.isOpened()):
        ret, frame = cap.read()
        #cv2.imshow('fr ame')
        frames[f, :, :, :] = frame

        f += 1
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        if f == num_frames - 1:
            break

    cap.release()
    cv2.destroyAllWindows()

    #need to detect face only on the first frame of the video
    #if "good" face not found, then try other frames, until a good face is found
    face_found = False
    f = 0
    face = None
    while not face_found:
        #face is a Fx4x2 bounding box
        face = detectFace(frames[f, :, :, :])
        f += 1
        if face is not None:
            face_found = True
            f -= 1

    #find the start features
    init_frame = f
    init_img = frames[init_frame, :, :, :]
    init_img_gray = cv2.cvtColor(init_img, cv2.COLOR_BGR2GRAY)
    #there will always be 1000 xy's, because we have padded with (0,0)
    #make srue to ignore the (0,0) points later in the code
    startXs, startYs = getFeatures(init_img_gray, face)

    #initialize the the output matrix of tracked images
    outputMatrix = np.zeros((num_frames - f, frame_height, frame_width, 3),
                            np.uint8)

    #draw rectangles of all the faces on the current image
    initImgWithBBox = init_img
    [numFaces, _, _] = face.shape
    for i in range(0, numFaces):
        bboxOfCurrFace = face[i, :, :]
        # get the position of the corners of the bounding box for the current face
        first = bboxOfCurrFace[0, :]
        second = bboxOfCurrFace[3, :]
        # add a bounding box to the initial image
        cv2.rectangle(initImgWithBBox, (first[0], first[1]),
                      (second[0], second[1]), (255, 0, 0))
        initImgWithBBox = plotPoints(initImgWithBBox, startYs[:, i],
                                     startXs[:, i])

    #add the initial image as the first image
    outputMatrix[0, :, :, :] = initImgWithBBox

    #actually do the transform and find the new bounding box
    for frame in range(f, num_frames - 1):  #this should probably not be -1
        #get the two consecutive frames at the index
        img1 = frames[frame, :, :, :]
        img2 = frames[frame + 1, :, :, :]

        #find new feature points every 10th frame
        if (frame % 10 == 0):
            faceCurr = detectFace(img1)
            if faceCurr is not None:
                face = faceCurr
            #else
            #just use the last face since it couldn't detect a new face

            #convert first image to grey
            img1grey = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
            #find the starting features on the first image
            startXs, startYs = getFeatures(img1grey, face)

        [newXs, newYs] = estimateAllTranslation(startXs, startYs, img1, img2)
        [Xs, Ys,
         newbbox] = applyGeometricTransformation(startXs, startYs, newXs,
                                                 newYs, face)
        #print newXs
        #now add a rectangle of newbbox to img2 and add the feature points
        img2WithBoundingBox = img2
        for facei in range(0, numFaces):
            #get the bounding box for the current face
            bboxOfCurrFace = newbbox[facei, :, :]
            #get the positions of the two corners for the bounding box of the current face
            first = bboxOfCurrFace[0, :].astype(int)
            second = bboxOfCurrFace[3, :].astype(int)
            #draw the bounding box
            img2WithBoundingBox = cv2.rectangle(img2WithBoundingBox,
                                                (first[0], first[1]),
                                                (second[0], second[1]),
                                                (255, 0, 0))
            #draw the feature points
            if numFaces == 1:
                img2WithBoundingBox = plotPoints(img2WithBoundingBox, Ys, Xs)
            else:
                img2WithBoundingBox = plotPoints(img2WithBoundingBox,
                                                 Ys[:, facei], Xs[:, facei])

        #add img2 to the output matrix
        outputMatrix[frame + 1, :, :, :] = img2WithBoundingBox
        #set the new bbox to the face for the next iteration
        face = newbbox

        #set the xs and ys of the features for the new features
        startXs = newXs
        startYs = newYs

    #output the final video
    imageio.mimwrite('finalVideo.avi', outputMatrix, fps=30)
    trackedVideo = []
    return trackedVideo
コード例 #6
0
rawVideo = loadVid(inputName)

from detectFace import detectFace
from getFeatures import getFeatures
from estimateAllTranslation import estimateAllTranslation
from applyGeometricTransformation import applyGeometricTransformation
from helper import rgb2gray
import numpy as np
import cv2

ind = 0
trackedVideo = []
nFrames = len(rawVideo)

# detect faces
bbox = detectFace(rawVideo[0])

# detect features
gray_im = rgb2gray(rawVideo[0])
x, y = getFeatures(gray_im, bbox)

for f in range(0, x.shape[1]):
    x[:, f] = x[:, f] + bbox[f][0][1]  # row coords
    y[:, f] = y[:, f] + bbox[f][0][0]  # col coords

# detect faces
bbox = detectFace(rawVideo[0])

# detect features
gray_im = rgb2gray(rawVideo[0])
x, y = getFeatures(gray_im, bbox)
コード例 #7
0
def faceTracking(rawVideo):
    #TODO: Your code here
    import numpy as np
    import cv2
    import scipy
    import matplotlib.pyplot as plt
    import matplotlib
    from detectFace import detectFace
    from getFeatures import getFeatures
    from estimateAllTranslation import estimateAllTranslation
    from applyGeometricTransformation import applyGeometricTransformation
    frameSet = []  #list to store all frames of the input video
    newFrameSet = []  #list to store all frames of the output video
    tf = True
    plt.ioff()
    while tf:  #read each frame in the input video
        tf, frame = rawVideo.read()
        frameSet.append(frame)
    frameSet = frameSet[:-1]

    bbox = detectFace(frameSet[0])  #detect bounding box in the first frame
    gray = cv2.cvtColor(
        frameSet[0],
        cv2.COLOR_BGR2GRAY)  #convert first frame to gray scale image
    x, y = getFeatures(
        gray,
        bbox)  #extract feature points from gray scale image and bounding box

    #drawing
    plt.imshow(cv2.cvtColor(frameSet[0], cv2.COLOR_BGR2RGB))
    [r1b, c1b, d1b] = np.asarray(bbox.shape)
    for i in range(r1b):  #plot bounding box and feature points
        b = bbox[i, :, :]
        xloc = x[:, i]
        yloc = y[:, i]
        facebb = matplotlib.patches.Polygon(b, closed=True, fill=False)
        #      facebb.set_edgecolor('w')#uncomment if bounding box color blends with black background
        features = plt.plot(xloc, yloc, 'w.', ms=1)
        plt.gca().add_patch(facebb)
    plt.axis('off')
    plt.savefig("temp.png", dpi=300, bbox_inches="tight")
    img = cv2.imread("temp.png")
    plt.close()
    newFrameSet.append(img)

    #getting features and transforming
    for k in range(
            1, len(frameSet)):  #iterate through all frames of the input video
        newXs, newYs = estimateAllTranslation(x, y, frameSet[k - 1],
                                              frameSet[k])
        Xs, Ys, newbbox = applyGeometricTransformation(x, y, newXs, newYs,
                                                       bbox)
        plt.imshow(cv2.cvtColor(frameSet[k], cv2.COLOR_BGR2RGB))
        print len(Xs)
        for j in range(r1b):
            b = newbbox[j, :, :]
            xloc = Xs[:, j]
            yloc = Ys[:, j]
            facebb = matplotlib.patches.Polygon(b, closed=True, fill=False)
            #          facebb.set_edgecolor('w')#uncomment if bounding box color blends with black background
            features = plt.plot(xloc, yloc, 'w.', ms=1)
            plt.gca().add_patch(facebb)
        plt.axis('off')
        plt.savefig("temp.png", dpi=300, bbox_inches="tight")
        img = cv2.imread("temp.png")
        plt.close()
        newFrameSet.append(img)
        x = Xs
        y = Ys
        bbox = newbbox

    [height, width, layer] = np.asarray(newFrameSet[0].shape)
    trackedVideo = cv2.VideoWriter(
        'output.mp4', cv2.VideoWriter_fourcc(*'MP4V'), 30,
        (width, height))  #write frames to a video in mp4 format and 30fps
    for m in range(len(newFrameSet)):
        trackedVideo.write(newFrameSet[m].astype('uint8'))
        cv2.destroyAllWindows()
    trackedVideo.release()

    return trackedVideo
コード例 #8
0
def faceTracking(rawVideo):
    cap = cv2.VideoCapture(rawVideo)
    output = None
    pre_img = None

    # first frame
    ret, cur_img = cap.read()
    bbox = detectFace(cur_img)
    startXs, startYs = getFeatures(cur_img, bbox)

    # initialize video writer
    h, w, l = cur_img.shape
    fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')

    # change tracked_video name for each run
    tracked_video = './Output_Video/tracked_video.avi'
    output = cv2.VideoWriter(tracked_video, fourcc, 20, (w, h), True)

    # draw box on first frame
    imgwbox = drawBox(cur_img, bbox)
    output.write(imgwbox)
    pre_img = cur_img

    count = 0
    while (cap.isOpened()):
        ret, cur_img = cap.read()

        if not ret:
            break

        newXs, newYs = estimateAllTranslation(startXs, startYs, pre_img,
                                              cur_img)
        Xs, Ys, newbbox = applyGeometricTransformation(startXs, startYs, newXs,
                                                       newYs, bbox)

        box_features = np.array([])
        for i in range(len(Xs)):
            box_features = np.append(box_features, len(Xs[i]))

        print sum(box_features)
        if sum(box_features) < 10:
            newbbox = detectFace(cur_img)
            Xs, Ys = getFeatures(cur_img, newbbox)

        pre_img = cur_img
        startXs = Xs
        startYs = Ys
        bbox = newbbox

        imgwbox = drawBox(cur_img, bbox)
        output.write(imgwbox)

        # print video record
        print('{} frame finished').format(count)
        count += 1

    # close video writer
    cv2.destroyAllWindows()
    cap.release()
    output.release()

    return tracked_video
コード例 #9
0
from getFeatures import getFeatures
from estimateAllTranslation import estimateAllTranslation
from applyGeometricTransformation import applyGeometricTransformation
from helper import rgb2gray, overlay_points
import matplotlib.pyplot as plt
import cv2

#create all the images
color_img1 = plt.imread('./data/easy/TheMartian0.jpg')
img1 = rgb2gray(color_img1)

color_img2 = plt.imread('./data/easy/TheMartian130.jpg')
img2 = rgb2gray(color_img2)

#find the bounding boxes
bbox = detectFace(color_img1)

#find the positions of all the features
startXs, startYs = getFeatures(img1, bbox)
#overlay_points(img1, startXs, startYs, 'postGetFeatures_TheMartian1')

#draw the bounding boxes
first = bbox[0,0,:]
second = bbox[0,3,:]
cv2.rectangle(img1,(first[0],first[1]), (second[0],second[1]),color=(0,255,0))
plt.imshow(img1)

#estimate the translation
newXs, newYs = estimateAllTranslation(startXs, startYs, color_img1, color_img2)

Xs, Ys, newbbox = applyGeometricTransformation(startXs, startYs, newXs, newYs, bbox)