コード例 #1
0
 def readFromVideoAndFrames(self,
                            videoName,
                            frames,
                            category,
                            compression=0.16,
                            feature="HOG"):
     from egovision import Video
     from egovision.features import FeatureController
     import time
     featureController = FeatureController(compression)
     video = Video(videoName)
     nf = 0
     while frames != []:
         success = video.grab()
         if int(frames[0]) == nf:
             success, frame = video.retrieve()
             t0 = time.time()
             desc = featureController.getFeature(frame, feature)
             t1 = time.time()
             self.headers.append("".join([videoName, "_", str(nf)]))
             self.attributes.append(desc)
             self.categories.append(category)
             frames.pop(0)
         else:
             pass
         nf += 1
     self.attributes = numpy.vstack(self.attributes)
コード例 #2
0
 def frameSize(self):
     videoName = VIDEO_EXAMPLE_PATH.format(self.videoname)
     video = Video(videoName)
     success, frame = video.read()
     video.release()
     # Test frame width
     self.assertEqual(frame.matrix.shape, (720, 1280, 3),
                      msg="Dimensions of the frame does not match")
コード例 #3
0
 def videoRead(self):
     videoname = VIDEO_EXAMPLE_PATH.format(self.videoname)
     video = Video(videoname)
     success, frame = video.read()
     video.release()
     # Test reading
     self.assertTrue(success, msg="Video is not reading")
     # Test frame type
     self.assertIsInstance(frame, Frame, msg="Reader does not return Frame")
コード例 #4
0
 def frameExportImport(self):
     videoname = VIDEO_EXAMPLE_PATH.format(self.videoname)
     video = Video(videoname)
     success, frame = video.read()
     video.release()
     filename = VIDEO_EXAMPLE_PATH.format("frameMatrix.pk")
     ObjectPickler.save(frame, filename)
     frame2 = ObjectPickler.load(Frame, filename)
     self.assertEqual(frame,
                      frame2,
                      msg="Readed frame different of ground truth")
コード例 #5
0
    def testVideoFeatureCreator(self):
        from datetime import datetime
        outputfile = GROUNDTRUTH_VIDEOFEATURE_PATH.format(
            self.videoname, self.feature)
        videoname = VIDEO_EXAMPLE_PATH.format("".join(
            [self.videoname, self.extension]))
        video = Video(videoname)
        success, featureVideo = self.featureController.getFeatures(video)
        self.assertTrue(success, msg="Impossible to process the features")
        self.assertIsInstance(
            featureVideo.features,
            numpy.ndarray,
            msg="The video reader is not returning an ndarray")

        if createGroundTruths:
            print "[Feature Creator] Ground Truth Created"
            print outputfile
            if not os.path.exists(os.path.split(outputfile)[0]):
                os.makedirs(os.path.split(outputfile)[0])
            success = ObjectPickler.save(featureVideo, outputfile)
            self.assertTrue(success, msg="Impossible to save the features")
コード例 #6
0
    def readDataset(self, datasetFolder, compressionWidth, feature):
        """

        This method reads the folder structure of the dataset and initialize
        the attributes of the data manager. In general the folder structure is
        divided in three parts: i) Videos: contains the raw video sequences,
        ii) Positives: Containing the masks of the positive samples, iii)
        Negatives: Containing the masks of the negative samples. For
        illustrative purposes lets name our dataset as "EV", and lets define
        its root folder as "EV/". The folder structure is briefly summarized in
        the next table:

        .. list-table::
           :widths: 10 20 60
           :header-rows: 1
           
           * - Path
             - Content
             - Description 
           * - <dataset>/Videos
             - Full video files
             - Original video sequences. Each video could contains positives as
               well as negative frames. Each video should be named as
               <dataset>_<videoid>.<extension>. For example the full path of a
               video in the EV dataset could be "EV/Videos/EV_Video1.MP4".  
           * - <dataset>/Positives
             - Folders
             - <dataset>/Positives contains a folder per video that is going to
               be used to extract positive frames (with hands). For example,
               lets assume that the frame 10, 20 and 30 of
               "EV/Videos/EV_Video1.MP4" are going to be used as positive
               samples in the training stage, then the positives folder should
               contain these files::
               
                    "EV/Positives/EV_Video1/mask10.jpg",
                    "EV/Positives/EV_Video1/mask20.jpg",
                    "EV/Positives/EV_Video1/mask30.jpg"

               respectively. In practice the mask files could be empty files
               because they are used only to guide the scanning of the video.
               However, as a way to validate the used frames we suggest to use
               compressed snapshots of the real frames.
           * - <dataset>/Negatives
             - Folders
             - <dataset>/Negatives contains a folder per video that is going to
               be used to extract negative frames (without hands). For example,
               lets assume that the frame 30, 100 and 120 of
               "EV/Videos/EV_Video2.MP4" are going to be used as negative 
               samples in the training stage. To do this the negatives folder should
               contain these files::

                    "EV/Positives/EV_Video1/mask30.jpg",
                    "EV/Positives/EV_Video2/mask100.jpg",
                    "EV/Positives/EV_Video2/mask120.jpg" 
            
               respectively. In practice the mask files could be empty files
               because they are only used to guide the scanning of the video.
               However, as a way to validate the used frames we suggest to use
               compressed snapshots of the real frames.

        Finally, following the previous example the folder structure is::

            EV/Videos/EV_Video1.MP4
            EV/Videos/EV_Video2.MP4
            EV/Positives/EV_Video1/mask10.jpg
            EV/Positives/EV_Video1/mask20.jpg
            EV/Positives/EV_Video1/mask30.jpg
            EV/Negatives/EV_Video2/mask30.jpg
            EV/Negatives/EV_Video2/mask100.jpg
            EV/Negatives/EV_Video2/mask120.jpg


        Example 1: How to read the dataset folder from egovision::
        
            from egovision.handDetection import HandDetectionDataManager
            from egovision.values.paths import DATASET_PATH
            feature = "HOG"
            dataset = "UNIGEmin"
            datasetFolder = DATASET_PATH.format(dataset)
            dm = HandDetectionDataManager()
            dm.readDataset(datasetFolder, 200, feature)

        """
        from datetime import datetime
        from egovision import Video
        from egovision.features import FeatureController
        self.datasetFolder = datasetFolder
        self.compressionWidth = compressionWidth
        self.feature = feature
        categories = ["Negatives", "Positives"]
        featureController = FeatureController(compressionWidth, feature)
        for nc, cat in enumerate(categories):
            categoryFolder = "".join([datasetFolder, "/", cat, "/"])
            videoNames = os.listdir(categoryFolder)
            for videoName in videoNames:
                masks = os.listdir("".join([categoryFolder, videoName]))
                masks.sort(key=lambda x: int(x[4:-4]))
                fVideoName = "".join([datasetFolder, "/Videos/", videoName])
                fVideoName = fullVideoName(fVideoName)
                video = Video(fVideoName)
                for mask in masks:
                    sys.stdout.flush()
                    fmNumber = int(mask[4:-4])
                    t0 = datetime.now()
                    success, frame = video.readFrame(fmNumber)
                    t1 = datetime.now()
                    success, desc = featureController.getFeatures(frame)
                    t2 = datetime.now()
                    # sysout = "\r{0}: - {1} - {2} - {3}".format(videoName, mask, t2-t1,t1-t0)
                    # sys.stdout.write(sysout)
                    self.headers.append("".join(
                        [fVideoName, "_", str(fmNumber)]))
                    self.attributes.append(desc.next())
                    self.categories.append(nc)
        self.attributes = numpy.vstack(self.attributes)
コード例 #7
0
modelRecommender.train(allFrames[:N_MODELS])

# TRAINING THE MULTIMODEL PIXEL BY PIXEL
hs = PixelByPixelMultiHandSegmenter(FEATURE, COMPRESSION_WIDTH, CLASSIFIER,
                                    STEP)
dataManagerList = hs.trainClassifier(allMasks[:N_MODELS], modelRecommender)

# DEFINING THE HAND2HAND OCLUSSION DETECTOR
occDetector = SuperpixelsOcclusionDetector(COMPRESSION_WIDTH)
# occDetector.tuneSuperpixelAlgorithm(SIGMA, dataManagerList, hs)
occDetector.setSuperpixelAlgorithm(
    nSegments=250, compactness=7,
    sigma=SIGMA)  # obtained from the optimization

# LOADING TESTING VIDEO
video = Video(DATASET_VIDEOS_PATH.format(DATASET, TESTINGVIDEO) + ".mp4")
fFrame = 0
video.readFrame(fFrame)
writer = VideoWriter(
    RESULTS_PATH.format(DATASET, TESTINGVIDEO) + "_segmented.avi")
writer.setParametersFromVideo(video)

# leftState = []
# leftFilter = EllipseTracker(COMPRESSION_WIDTH, 1/60.0, modelId = 0)
# rightFilter = EllipseTracker(COMPRESSION_WIDTH, 1/60.0, modelId = 1)

# PER MASK
fout = open(RESULTS_PATH.format(DATASET, TESTINGVIDEO) + ".csv", "w")
for frameNumber, frame in enumerate(video):
    print fFrame + frameNumber
    binaryShape = (frame.matrix.shape[0], frame.matrix.shape[1], 1)