def test_link_subsitite_masks(self):
        import os

        def imageName(x):
            return None, x

        createMovie(
            'test_link1.m4v',
            lambda: np.random.randint(0, 255,
                                      (720, 480, 3), dtype='uint8'), 40)
        self.addFileToRemove('test_link1.m4v')
        createMovie(
            'test_link2.m4v',
            lambda: np.random.randint(0, 255,
                                      (720, 480, 3), dtype='uint8'), 40)
        self.addFileToRemove('test_link2.m4v')
        masks = video_tools.formMaskDiff('test_link1.m4v', 'test_link2.m4v',
                                         'link_1_2_cmp', '-')
        createMovie(
            'test_link3.m4v',
            lambda: np.random.randint(0, 255,
                                      (720, 480, 3), dtype='uint8'), 30)
        self.addFileToRemove('test_link3.m4v')

        link_tool = scenario_model.VideoVideoLinkTool()
        model = Mock()
        model.getImageAndName = imageName
        model.G = Mock()
        model.G.dir = '.'
        subs = link_tool.addSubstituteMasks('test_link1.m4v',
                                            'test_link2.m4v',
                                            model,
                                            '-',
                                            arguments={'Start Time': '11'},
                                            filename='test_link3.m4v')
        self.assertEqual(1, len(subs))
        self.assertEqual(30, video_tools.get_frames_from_segment(subs[0]))
        self.assertEqual(11, video_tools.get_start_frame_from_segment(subs[0]))
        self.assertTrue(
            os.path.exists(video_tools.get_file_from_segment(subs[0])))
        self.addFileToRemove(video_tools.get_file_from_segment(subs[0]))
Пример #2
0
def cleanup_temporary_files(probes=[], scModel=None):
    files_to_remove = []

    used_hdf5 = ['']
    used_masks = ['']
    for frm, to in scModel.G.get_edges():
        edge = scModel.G.get_edge(frm, to)
        input_mask = getValue(edge, 'inputmaskname', '')
        video_input_mask = getValue(edge, 'arguments.videomaskname', '')
        subs = getValue(edge, 'substitute videomasks', [])
        for sub in subs:
            hdf5 = get_file_from_segment(sub)
            used_hdf5.append(hdf5)
        mask = getValue(edge, 'maskname', '')
        used_masks.append(input_mask)
        used_masks.append(video_input_mask)
        used_masks.append(mask)
        videomasks = getValue(edge, 'videomasks', [])
        for mask in videomasks:
            hdf5 = get_file_from_segment(mask)
            used_hdf5.append(hdf5)
    used_hdf5 = set(used_hdf5)
    used_masks = set(used_masks)

    for probe in probes:
        mask = probe.targetMaskFileName if probe.targetMaskFileName != None else ''
        if os.path.basename(mask) not in used_masks:
            files_to_remove.append(mask)
        if probe.targetVideoSegments != None:
            for segment in probe.targetVideoSegments:
                hdf5 = segment.filename if segment.filename != None else ''
                if os.path.basename(hdf5) not in used_hdf5:
                    files_to_remove.append(hdf5)

    files_to_remove = set(files_to_remove)

    for _file in files_to_remove:
        try:
            os.remove(os.path.join(scModel.get_dir(), _file))
        except OSError:
            pass
Пример #3
0
 def read_masks(self, mask_set):
     from maskgen import tool_set
     r = []
     for segment in mask_set:
         reader = tool_set.GrayBlockReader(
             video_tools.get_file_from_segment(segment))
         r.append({
             'start_time': reader.current_frame_time(),
             'start_frame': reader.current_frame(),
             'frames': reader.length()
         })
     return r
    def test_video_video_link_tool(self):
        from maskgen.scenario_model import VideoVideoLinkTool
        from maskgen.software_loader import Operation
        from maskgen.video_tools import get_end_frame_from_segment, get_file_from_segment
        from maskgen.image_wrap import ImageWrapper
        import os
        import numpy as np

        def create_zero(h, w):
            return ImageWrapper(np.zeros((h, w), dtype='uint8'))

        vida = self.locateFile('videos/sample1.mov')
        vidb = self.locateFile('videos/sample1_swap.mov')
        image_values = {
            'a': (create_zero(300, 300), vida),
            'b': (create_zero(300, 300), vidb)
        }

        def get_image(arg):
            return image_values[arg]

        tool = VideoVideoLinkTool()
        scModel = Mock()
        scModel.gopLoader = Mock()
        scModel.G.dir = '.'
        scModel.gopLoader.getOperationWithGroups = Mock(
            return_value=Operation(name='test', category='test'))
        scModel.getImageAndName = get_image
        mask, analysis, errors = tool.compareImages('a',
                                                    'b',
                                                    scModel,
                                                    'Normalization',
                                                    arguments={
                                                        'aggregate': 'sum',
                                                        'minimum threshold': 1
                                                    },
                                                    analysis_params={})
        self.assertEqual(0, len(errors))
        self.assertEqual((640, 480), mask.size)
        self.assertEqual(1, len(analysis['videomasks']))
        self.assertEqual(803,
                         get_end_frame_from_segment(analysis['videomasks'][0]))
        self.assertTrue(
            os.path.exists(get_file_from_segment(analysis['videomasks'][0])))
    def test_zip_zip_link_tool(self):
        from maskgen.scenario_model import ZipZipLinkTool
        from maskgen.software_loader import Operation
        from maskgen.video_tools import get_end_frame_from_segment, get_file_from_segment
        from maskgen.tool_set import ZipWriter
        from maskgen.image_wrap import ImageWrapper
        import os
        import numpy as np

        def create_zero(h, w):
            return ImageWrapper(np.zeros((h, w), dtype='uint8'))

        w = ZipWriter('v1.zip')
        for i in range(2):
            w.write(np.random.randint(0, 255, (300, 300, 3)))
        w.release()
        self.addFileToRemove('v1.zip')

        w = ZipWriter('v2.zip')
        for i in range(2):
            w.write(np.random.randint(0, 255, (300, 300, 3)))
        w.release()
        self.addFileToRemove('v2.zip')

        image_values = {
            'a': (create_zero(300, 300), 'v1.zip'),
            'b': (create_zero(300, 300), 'v2.zip')
        }

        def get_image(arg):
            return image_values[arg]

        tool = ZipZipLinkTool()
        scModel = Mock()
        scModel.gopLoader = Mock()
        scModel.G.dir = '.'
        scModel.gopLoader.getOperationWithGroups = Mock(
            return_value=Operation(name='test', category='test'))
        scModel.getImageAndName = get_image
        mask, analysis, errors = tool.compareImages('a',
                                                    'b',
                                                    scModel,
                                                    'Normalization',
                                                    arguments={},
                                                    analysis_params={})
        self.assertEqual(0, len(errors))
        self.assertEqual((300, 300), mask.size)
        self.assertEqual(1, len(analysis['videomasks']))
        self.assertEqual(2,
                         get_end_frame_from_segment(analysis['videomasks'][0]))
        self.assertTrue(
            os.path.exists(get_file_from_segment(analysis['videomasks'][0])))

        w = ZipWriter('v1.zip')
        for i in range(2):
            w.write(np.random.randint(0, 255, (300, 300, 3)))
        w.release()
        self.addFileToRemove('v1.zip')

        w = ZipWriter('v2.zip')
        for i in range(2):
            w.write(np.random.randint(0, 255, (400, 400, 3)))
        w.release()
        self.addFileToRemove('v2.zip')

        image_values = {
            'a': (create_zero(300, 300), 'v1.zip'),
            'b': (create_zero(400, 400), 'v2.zip')
        }

        def get_image(arg):
            return image_values[arg]

        tool = ZipZipLinkTool()
        scModel = Mock()
        scModel.gopLoader = Mock()
        scModel.G.dir = '.'
        scModel.gopLoader.getOperationWithGroups = Mock(
            return_value=Operation(name='test', category='test'))
        scModel.getImageAndName = get_image
        mask, analysis, errors = tool.compareImages('a',
                                                    'b',
                                                    scModel,
                                                    'TransformResize',
                                                    arguments={},
                                                    analysis_params={})
        self.assertEqual(0, len(errors))
        self.assertEqual('(100, 100)', analysis['shape change'])
        self.assertEqual(1, len(analysis['videomasks']))
        self.assertEqual(2,
                         get_end_frame_from_segment(analysis['videomasks'][0]))
Пример #6
0
    def warpMask(self,
                 video_masks,
                 source,
                 target,
                 expectedType='video',
                 inverse=False,
                 useFFMPEG=False):
        """
        Tranform masks when the frame rate has changed.
        :param video_masks: ithe set of video masks to walk through and transform
        :param expectedType:
        :param video_masks:
        :return: new set of video masks
        """
        edge = self.graph.get_edge(source, target)
        meta_i, frames_i = self.getVideoMeta(source,
                                             show_streams=True,
                                             media_types=[expectedType])
        meta_o, frames_o = self.getVideoMeta(target,
                                             show_streams=True,
                                             media_types=[expectedType])
        indices_i = ffmpeg_api.get_stream_indices_of_type(meta_i, expectedType)
        indices_o = ffmpeg_api.get_stream_indices_of_type(meta_o, expectedType)
        if not indices_i or not indices_o:
            return video_masks
        index_i = indices_i[0]
        index_o = indices_o[0]
        isVFR = ffmpeg_api.is_vfr(meta_i[index_i]) or ffmpeg_api.is_vfr(
            meta_o[index_o])

        result = self.getChangeInFrames(edge,
                                        meta_i[index_i],
                                        meta_o[index_o],
                                        source,
                                        target,
                                        expectedType=expectedType)

        if result is None:
            return video_masks

        sourceFrames, sourceTime, targetFrames, targetTime, sourceRate, targetRate = result

        if sourceFrames == targetFrames and int(sourceTime * 100) == int(
                targetTime * 100):
            return video_masks

        dropRate = sourceFrames / float(
            sourceFrames -
            targetFrames) if sourceFrames != targetFrames else sourceFrames + 1

        #if sourceFrames > targetFrames else targetFrames / float(sourceFrames - targetFrames)

        def apply_change(existing_value,
                         orig_rate,
                         final_rate,
                         inverse=False,
                         round_value=True,
                         min_value=0,
                         upper_bound=False):
            # if round_value, return a tuple of value plus rounding error
            import math
            multiplier = -1.0 if inverse else 1.0
            adjustment = existing_value * math.pow(final_rate / orig_rate,
                                                   multiplier)
            if round_value:
                v = max(
                    min(round(adjustment), final_rate)
                    if upper_bound else round(adjustment), min_value)
                e = abs(adjustment - v)
                return int(v), e
            return max(
                min(adjustment, final_rate) if upper_bound else adjustment,
                min_value)

        def adjustPositionsFFMPEG(meta, video_frames, hits):
            rate = ffmpeg_api.get_video_frame_rate_from_meta([meta],
                                                             [video_frames])
            aptime = 0
            lasttime = 0
            hitspos = 0
            start_mask = None
            for pos in range(0, len(video_frames)):
                aptime = get_frame_time(video_frames[pos], aptime, rate)
                while hitspos < len(hits) and aptime > hits[hitspos][0]:
                    mask = hits[hitspos][2]
                    element = hits[hitspos][1]
                    error = abs(aptime - hits[hitspos][0])
                    if element == 'starttime':
                        update_segment(mask,
                                       starttime=lasttime,
                                       startframe=pos,
                                       error=error +
                                       get_error_from_segment(mask))
                        start_mask = mask
                    else:
                        # for error, only record the error if not recorded
                        update_segment(
                            mask,
                            endtime=lasttime,
                            endframe=pos,
                            error=(error if start_mask != mask else 0) +
                            get_error_from_segment(mask))
                    hitspos += 1
                lasttime = aptime
            return mask

        def adjustPositions(video_file, hits):
            # used if variable frame rate
            frmcnt = 0
            hitspos = 0
            last = 0
            cap = cv2api_delegate.videoCapture(video_file)
            try:
                while cap.grab() and hitspos < len(hits):
                    frmcnt += 1
                    aptime = cap.get(cv2api_delegate.prop_pos_msec)
                    while hitspos < len(hits) and aptime > hits[hitspos][0]:
                        mask = hits[hitspos][2]
                        element = hits[hitspos][1]
                        error = max(abs(last - hits[hitspos][0]),
                                    abs(aptime - hits[hitspos][0]))
                        if element == 'starttime':
                            update_segment(mask,
                                           starttime=last,
                                           startframe=frmcnt,
                                           error=error)
                        else:
                            update_segment(mask,
                                           endtime=last,
                                           endframe=frmcnt,
                                           error=max(
                                               error,
                                               get_error_from_segment(mask)))
                        hitspos += 1
                    last = aptime
            finally:
                cap.release()
            return mask

        new_mask_set = []
        hits = []
        # First adjust all the frame and time references by the total change in the video.
        # In most cases, the length of the video in time changes by a small amount which is distributed
        # across all the masks
        for mask_set in video_masks:
            if 'type' in mask_set and mask_set['type'] != expectedType:
                new_mask_set.append(mask_set)
                continue
            #these are initial estimates
            startframe, error_start = apply_change(
                get_start_frame_from_segment(mask_set),
                float(sourceFrames),
                float(targetFrames),
                inverse=inverse,
                round_value=True,
                min_value=1)
            endframe, error_end = apply_change(
                get_end_frame_from_segment(mask_set),
                float(sourceFrames),
                float(targetFrames),
                inverse=inverse,
                min_value=1,
                round_value=True,
                upper_bound=True)
            endtime = apply_change(get_end_time_from_segment(mask_set),
                                   float(sourceTime),
                                   targetTime,
                                   inverse=inverse,
                                   round_value=False)
            starttime = apply_change(get_start_time_from_segment(mask_set),
                                     sourceTime,
                                     targetTime,
                                     inverse=inverse,
                                     round_value=False,
                                     upper_bound=True)

            try:
                if endframe == int(getValue(meta_o[index_o], 'nb_frames', 0)) and \
                                float(getValue(meta_o[index_o], 'duration', 0)) > 0:
                    endtime = float(getValue(meta_o[index_o], 'duration',
                                             0)) * 1000.0 - (1000.0 /
                                                             targetRate)
                elif endtime > targetTime and endframe > targetFrames:
                    message = '{} exceeded target time of {} for {}'.format(
                        sourceTime, target, targetTime)
                    if (endtime - targetTime) > 300:
                        logging.getLogger('maskgen').error(message)
                    else:
                        logging.getLogger('maskgen').warn(message)
                    endtime = targetTime - (1000.0 / targetRate)
                    endframe = targetFrames
            except:
                pass
            change = create_segment(
                rate=sourceRate if inverse else targetRate,
                type=get_type_of_segment(mask_set),
                starttime=starttime,
                startframe=startframe,
                error=get_error_from_segment(mask_set) +
                (max(error_start, error_end) / targetRate * 1000.0),
                endtime=endtime,
                endframe=endframe,
                videosegment=get_file_from_segment(mask_set))
            if not isVFR:
                # in this case, we trust the time invariance, updating frames
                recalculate_frames_for_segment(change)
                # then we reupdate time to match the frames
                recalculate_times_for_segment(change)
            new_mask_set.append(change)
            hits.append(
                (get_start_time_from_segment(change), 'starttime', change))
            hits.append((get_end_time_from_segment(change), 'endime', change))

        # only required when one of the two videos is variable rate
        hits = sorted(hits)

        if isVFR:
            if useFFMPEG:
                meta_r, frames_r = self.getVideoMeta(
                    source if inverse else target,
                    show_streams=True,
                    with_frames=True,
                    media_types=[expectedType])
                index_r = ffmpeg_api.get_stream_indices_of_type(
                    meta_o, expectedType)[0]
                adjustPositionsFFMPEG(meta_r[index_r], frames_r[index_r], hits)
            else:
                adjustPositions(
                    self.getNodeFile(source)
                    if inverse else self.getNodeFile(target), hits)

        transfer_masks(video_masks,
                       new_mask_set,
                       dropRate,
                       frame_time_function=lambda x, y: y +
                       (1000.0 / targetRate),
                       frame_count_function=lambda x, y: y + 1)
        return new_mask_set