Esempio n. 1
0
def onShowOverlays(self, event):
    bounds = False
    tracks = False
    recognize = False

    video = vision.video.CvVideo(self.currentVideo)
    dbVideo = stream.models.Video.get(video.file_path)

    try:
        video.loadFaceBounds()
        bounds = True
    except:
        pass

    faceTracks = dbVideo.getTracks()
    if faceTracks: tracks = True

    d = widgets.ShowOverlaysDialog(self, wx.ID_ANY, bounds, tracks, recognize)
    if d.ShowModal() == wx.ID_OK:
        if bounds:
            bounds = d.faceCheck.GetValue()
        if faceTracks:
            tracks = d.trackCheck.GetValue()
        if recognize:
            recognize = d.extractCheck.GetValue()
        d.Destroy()

        self.videoPanel.overlays = []

        if bounds:
            overlay = widgets.video_overlays.overlayFromFaceBounds(video.face_bounds)
            self.videoPanel.overlays.append(overlay)
        if tracks:
            overlay = widgets.video_overlays.overlayFromTracks(faceTracks, video.face_bounds)
            self.videoPanel.overlays.append(overlay)

        video = None
    else: d.Destroy()
Esempio n. 2
0
def onAnalyze(self, event):
    d = widgets.AnalyzeDialog(self, wx.ID_ANY)
    if d.ShowModal() == wx.ID_OK:
        faceFind = d.faceCheck.GetValue()
        if faceFind:
            faceScale = eval(d.faceScale.GetValue())
            faceParams = eval(d.faceParams.GetValue())

        faceTrack = d.trackCheck.GetValue()
        if faceTrack:
            trackParams = eval(d.trackParams.GetValue())

        faceExtract = d.extractCheck.GetValue()
        if faceExtract:
            extractParams = eval(d.extractParams.GetValue())

        faceRecognize = d.recognizeCheck.GetValue()
        if faceRecognize:
            recognizeParams = eval(d.recognizeParams.GetValue())

        d.Destroy()

        if faceFind:
            video = vision.video.CvVideo(self.currentVideo)
            if faceScale:
                finder = vision.finder.ObjectFinder(scaleTo=faceScale)
            else:
                finder = vision.finder.ObjectFinder()
            progDialog = wx.ProgressDialog("Extracting Face Boundaries", "Working...", maximum=1000, parent=self, style=wx.PD_CAN_ABORT)
            video.face_bounds = finder.findInVideo(video, progDialog=progDialog, **faceParams)
            video.writeFaceBounds()
            progDialog.Destroy()
            video = None

        if faceTrack:
            video = vision.video.CvVideo(self.currentVideo)
            dbVideo = stream.models.Video.get(video.file_path)
            stream.models.PersonAppearance.query.filter_by(video=dbVideo).delete()
            self.story.commit()

            video.loadFaceBounds()
            tracker = vision.tracker.ObjectTracker(**trackParams)
            face_tracks = tracker.extractAndInerpolateTracks(video.face_bounds)
            unrecognized = self.story.getUnrecognizedPerson()
            dbVideo = stream.models.Video.get(video.file_path)
            for track in face_tracks:
                track = cPickle.dumps(track)
                stream.models.PersonAppearance(track=track, person=unrecognized, video=dbVideo)
            video = None
            self.story.commit()

        if faceExtract:
            video = vision.video.CvVideo(self.currentVideo)
            video.loadFaceBounds()
            dbVideo = stream.models.Video.get(video.file_path)
            for x in stream.models.PersonAppearance.query.filter_by(video=dbVideo).all():
                x.faces = None
            self.story.commit()

            dbTracks = dbVideo.getDbTracks()
            tracks = [(x.id, cPickle.loads(str(x.track))) for x in dbTracks]
            idToDb = dict((x.id, x) for x in dbTracks)

            video.calcDuration()
            progDialog = wx.ProgressDialog("Extracting Faces", "Working...", maximum=video.getNormalizedFrameCount(), parent=self, style=wx.PD_CAN_ABORT)
            faceTracks, numFaces = vision.tracker.ObjectTracker.getFacesFromTracks(video, tracks, progDialog)
            progDialog.Destroy()

            progDialog = wx.ProgressDialog("Saving Faces", "Working...", maximum=numFaces, parent=self, style=wx.PD_CAN_ABORT)
            prog = 0
            root = os.path.join(self.story.getUnrecognizedPeopleDir(), video.creation)
            util.filesystem.ensureDirectoryExists(root)
            for fl in os.listdir(root):
                os.remove(os.path.join(root, fl))

            for id, faces in faceTracks.iteritems():
                filename = root + "/" + str(id) + ".avi"
                writer = cv.CreateVideoWriter(filename, cv.CV_FOURCC('P', 'I', 'M', '1'), video.getFps(), extractParams['scaleTo'], True)
                for face in faces:
                    cont, _ = progDialog.Update(prog, "Saving Faces")
                    if not cont:
                        progDialog.Destroy()
                        video = None
                        return
                    scaled = cv.CreateImage(extractParams['scaleTo'], face.depth, face.nChannels)
                    cv.Resize(face, scaled, cv.CV_INTER_LINEAR)
                    cv.WriteFrame(writer, scaled)
                    prog += 1
                idToDb[id].faces = filename

            self.story.commit()
            progDialog.Destroy()
            video = None