Example #1
0
 def test_Audio_to_Video(self):
     source = self.locateFile('tests/videos/sample1.mov')
     extractor = MetaDataExtractor(GraphProxy(source, 'b'))
     masks = [video_tools.create_segment(endframe= 2618367,
                                         rate= 44100,
                                         starttime=0.0,
                                         frames= 2618367,
                                         startframe=1,
                                         endtime=59373.424,
                                         type='audio')]
     newMasks = extractor.create_video_for_audio(source, masks=masks)
     self.assertTrue(len(newMasks) > len(masks))
     self.assertTrue(video_tools.get_start_frame_from_segment(newMasks[1]) == 1)
     self.assertTrue(video_tools.get_end_frame_from_segment(newMasks[1]) == 803)
     self.assertTrue(video_tools.get_rate_from_segment(newMasks[1]) == 28.25)
     self.assertTrue(video_tools.get_end_time_from_segment(newMasks[1]) == 59348.333)
     source = self.locateFile('tests/videos/Sample1_slow.mov')
     masks = [video_tools.create_segment(endframe= 441000,
                                         rate= 44100,
                                         starttime=1000.0,
                                         frames= 396901,
                                         startframe=44100,
                                         endtime=10000.0,
                                         type='audio')]
     newMasks = extractor.create_video_for_audio(source, masks=masks)
     self.assertTrue(len(newMasks) > len(masks))
     self.assertTrue(video_tools.get_rate_from_segment(newMasks[1]) == 10.0)
     self.assertTrue(video_tools.get_start_frame_from_segment(newMasks[1]) == 11)
     self.assertTrue(video_tools.get_end_frame_from_segment(newMasks[1]) == 100)
Example #2
0
    def test_audio_zip_donor(self):
        graph = Mock()

        def lkup_preds(x):
            return {'b': ['a'], 'e': ['d']}[x]

        def lkup_edge(x, y):
            return \
            {'ab': {'op': 'NoSelect'}, 'ef': {'op': 'SelectSomething', 'arguments': {'Start Time': "00:00:00.000000"}}}[
                x + y]

        graph.predecessors = lkup_preds
        graph.get_edge = lkup_edge
        graph.dir = '.'

        donor = AudioZipDonor(
            graph, 'e', 'f', 'x',
            (None, self.locateFile('tests/zips/test.wav.zip')),
            (None, self.locateFile('tests/videos/sample1.mov')))
        args = donor.arguments()
        self.assertEqual("00:00:00.000000", args['Start Time']['defaultvalue'])
        segments = donor.create(
            arguments={
                'Start Time': "00:00:09.11",
                'End Time': "00:00:16.32",
                'sample rate': 44100
            })
        for segment in segments:
            self.assertEqual(401752, get_start_frame_from_segment(segment))
            self.assertEqual(719713, get_end_frame_from_segment(segment))
            self.assertAlmostEqual(9110,
                                   get_start_time_from_segment(segment),
                                   places=1)
            self.assertEqual(16320.0, int(get_end_time_from_segment(segment)))

        segments = donor.create(
            arguments={
                'Start Time': "00:00:00.00",
                'End Time': "00:00:00.00",
                'sample rate': 44100
            })

        for segment in segments:
            self.assertEqual(1, get_start_frame_from_segment(segment))
            self.assertEqual(1572865, get_end_frame_from_segment(segment))
            self.assertAlmostEqual(0.0,
                                   get_start_time_from_segment(segment),
                                   places=1)
            self.assertEqual(35665, int(get_end_time_from_segment(segment)))
Example #3
0
    def create_video_for_audio(self, source, masks):
        """
        make a mask in video time for each audio mask in masks.
        in VFR case, uses ffmpeg frames to get nearest frame to timestamp.
        :param source: video file
        :param masks:
        :return: new set of masks
        """
        from math import floor
        from video_tools import get_frame_rate

        def _get_frame_time(frame):
            if 'pkt_pts_time' in frame.keys() and frame['pkt_pts_time'] != 'N/A':
                return float(frame['pkt_pts_time']) * 1000
            else:
                return float(frame['pkt_dts_time']) * 1000

        def _frame_distance(time_a, time_b):
            dist = time_a - time_b
            return abs(dist) if dist <= 0 else float('inf')

        meta_and_frames = self.getVideoMeta(source, show_streams=True, with_frames=False, media_types=['video'])
        hasVideo = ffmpeg_api.get_stream_indices_of_type(meta_and_frames[0], 'video')
        meta = meta_and_frames[0][0]
        isVFR = ffmpeg_api.is_vfr(meta)
        video_masks = [mask for mask in masks if get_type_of_segment(mask)== 'video']
        audio_masks = [mask for mask in masks if get_type_of_segment(mask) == 'audio']
        if len(video_masks) == 0 and hasVideo:
            entire_mask = getMaskSetForEntireVideoForTuples(self.getMetaDataLocator(source), media_types=['video'])[0]
            upper_bounds = (get_end_frame_from_segment(entire_mask), get_end_time_from_segment(entire_mask))
            new_masks = list(audio_masks)
            for mask in audio_masks:
                end_time = min(get_end_time_from_segment(mask), upper_bounds[1])
                new_mask = mask.copy()
                rate = get_frame_rate(self.getMetaDataLocator(source))
                if not isVFR:
                    start_frame = int(get_start_time_from_segment(mask) * rate / 1000.0) + 1
                    end_frame = int(end_time * rate / 1000.0)
                else:
                    video_frames = \
                    self.getVideoMeta(source, show_streams=True, with_frames=True, media_types=['video'])[1][0]
                    start_frame = video_frames.index(
                        min(video_frames, key=lambda x: _frame_distance(_get_frame_time(x), get_start_time_from_segment(mask)))) + 1
                    end_frame = video_frames.index(
                        min(video_frames, key=lambda x: _frame_distance(_get_frame_time(x), end_time))) + 1
                end_frame = min(end_frame, upper_bounds[0])
                update_segment(new_mask,
                               type= 'video-associate',
                               rate=rate,
                               endtime=end_time,
                               startframe=start_frame,
                               endframe=end_frame)
                new_masks.append(new_mask)
            return new_masks
        else:
            return masks
    def test_video_video_link_tool(self):
        from maskgen.scenario_model import VideoVideoLinkTool
        from maskgen.software_loader import Operation
        from maskgen.video_tools import get_end_frame_from_segment, get_file_from_segment
        from maskgen.image_wrap import ImageWrapper
        import os
        import numpy as np

        def create_zero(h, w):
            return ImageWrapper(np.zeros((h, w), dtype='uint8'))

        vida = self.locateFile('videos/sample1.mov')
        vidb = self.locateFile('videos/sample1_swap.mov')
        image_values = {
            'a': (create_zero(300, 300), vida),
            'b': (create_zero(300, 300), vidb)
        }

        def get_image(arg):
            return image_values[arg]

        tool = VideoVideoLinkTool()
        scModel = Mock()
        scModel.gopLoader = Mock()
        scModel.G.dir = '.'
        scModel.gopLoader.getOperationWithGroups = Mock(
            return_value=Operation(name='test', category='test'))
        scModel.getImageAndName = get_image
        mask, analysis, errors = tool.compareImages('a',
                                                    'b',
                                                    scModel,
                                                    'Normalization',
                                                    arguments={
                                                        'aggregate': 'sum',
                                                        'minimum threshold': 1
                                                    },
                                                    analysis_params={})
        self.assertEqual(0, len(errors))
        self.assertEqual((640, 480), mask.size)
        self.assertEqual(1, len(analysis['videomasks']))
        self.assertEqual(803,
                         get_end_frame_from_segment(analysis['videomasks'][0]))
        self.assertTrue(
            os.path.exists(get_file_from_segment(analysis['videomasks'][0])))
    def test_zip_zip_link_tool(self):
        from maskgen.scenario_model import ZipZipLinkTool
        from maskgen.software_loader import Operation
        from maskgen.video_tools import get_end_frame_from_segment, get_file_from_segment
        from maskgen.tool_set import ZipWriter
        from maskgen.image_wrap import ImageWrapper
        import os
        import numpy as np

        def create_zero(h, w):
            return ImageWrapper(np.zeros((h, w), dtype='uint8'))

        w = ZipWriter('v1.zip')
        for i in range(2):
            w.write(np.random.randint(0, 255, (300, 300, 3)))
        w.release()
        self.addFileToRemove('v1.zip')

        w = ZipWriter('v2.zip')
        for i in range(2):
            w.write(np.random.randint(0, 255, (300, 300, 3)))
        w.release()
        self.addFileToRemove('v2.zip')

        image_values = {
            'a': (create_zero(300, 300), 'v1.zip'),
            'b': (create_zero(300, 300), 'v2.zip')
        }

        def get_image(arg):
            return image_values[arg]

        tool = ZipZipLinkTool()
        scModel = Mock()
        scModel.gopLoader = Mock()
        scModel.G.dir = '.'
        scModel.gopLoader.getOperationWithGroups = Mock(
            return_value=Operation(name='test', category='test'))
        scModel.getImageAndName = get_image
        mask, analysis, errors = tool.compareImages('a',
                                                    'b',
                                                    scModel,
                                                    'Normalization',
                                                    arguments={},
                                                    analysis_params={})
        self.assertEqual(0, len(errors))
        self.assertEqual((300, 300), mask.size)
        self.assertEqual(1, len(analysis['videomasks']))
        self.assertEqual(2,
                         get_end_frame_from_segment(analysis['videomasks'][0]))
        self.assertTrue(
            os.path.exists(get_file_from_segment(analysis['videomasks'][0])))

        w = ZipWriter('v1.zip')
        for i in range(2):
            w.write(np.random.randint(0, 255, (300, 300, 3)))
        w.release()
        self.addFileToRemove('v1.zip')

        w = ZipWriter('v2.zip')
        for i in range(2):
            w.write(np.random.randint(0, 255, (400, 400, 3)))
        w.release()
        self.addFileToRemove('v2.zip')

        image_values = {
            'a': (create_zero(300, 300), 'v1.zip'),
            'b': (create_zero(400, 400), 'v2.zip')
        }

        def get_image(arg):
            return image_values[arg]

        tool = ZipZipLinkTool()
        scModel = Mock()
        scModel.gopLoader = Mock()
        scModel.G.dir = '.'
        scModel.gopLoader.getOperationWithGroups = Mock(
            return_value=Operation(name='test', category='test'))
        scModel.getImageAndName = get_image
        mask, analysis, errors = tool.compareImages('a',
                                                    'b',
                                                    scModel,
                                                    'TransformResize',
                                                    arguments={},
                                                    analysis_params={})
        self.assertEqual(0, len(errors))
        self.assertEqual('(100, 100)', analysis['shape change'])
        self.assertEqual(1, len(analysis['videomasks']))
        self.assertEqual(2,
                         get_end_frame_from_segment(analysis['videomasks'][0]))
    def test_audiozip_zip_link_tool(self):
        from maskgen.scenario_model import ZipAudioLinkTool, AudioZipLinkTool
        from maskgen.software_loader import Operation
        from maskgen.image_wrap import ImageWrapper
        from maskgen.support import getValue
        from maskgen.video_tools import get_end_frame_from_segment, get_frames_from_segment
        import os
        import numpy as np

        def create_zero(h, w):
            return ImageWrapper(np.zeros((h, w), dtype='uint8'))

        vida = self.locateFile('tests/zips/test.wav.zip')
        vidb = self.locateFile('videos/sample1.mov')
        image_values = {
            'a': (create_zero(300, 300), vida),
            'b': (create_zero(300, 300), vidb)
        }

        def get_image(arg):
            return image_values[arg]

        class SillyGraph:
            def get_node(self, name):
                return {'a': {}, 'b': {}}[name]

        tool = ZipAudioLinkTool()
        scModel = Mock()
        scModel.gopLoader = Mock()
        scModel.G.dir = '.'
        scModel.gopLoader.getOperationWithGroups = Mock(
            return_value=Operation(name='test', category='test'))
        scModel.getImageAndName = get_image
        scModel.getGraph = Mock(return_value=SillyGraph())
        mask, analysis, errors = tool.compareImages('a',
                                                    'b',
                                                    scModel,
                                                    'Normalization',
                                                    arguments={},
                                                    analysis_params={})

        self.assertEqual(3, len(analysis['videomasks']))
        x = getValue(analysis, 'metadatadiff.audio.duration')
        x[1] = int(x[1])
        x[2] = int(x[2])
        self.assertEqual(['change', 35665, 59348], x)
        self.assertEqual(
            2617263, get_end_frame_from_segment(analysis['videomasks'][-1]))

        tool = AudioZipLinkTool()
        scModel = Mock()
        scModel.gopLoader = Mock()
        scModel.G.dir = '.'
        scModel.gopLoader.getOperationWithGroups = Mock(
            return_value=Operation(name='test', category='test'))
        scModel.getImageAndName = get_image
        scModel.getGraph = Mock(return_value=SillyGraph())
        mask, analysis, errors = tool.compareImages('b',
                                                    'a',
                                                    scModel,
                                                    'Normalization',
                                                    arguments={},
                                                    analysis_params={})

        self.assertEqual(1, len(analysis['videomasks']))
        x = getValue(analysis, 'metadatadiff.audio.duration')
        x[1] = int(x[1])
        x[2] = int(x[2])
        self.assertEqual(['change', 59348, 35665], x)
        self.assertEqual(
            1572865, get_end_frame_from_segment(analysis['videomasks'][-1]))
        self.assertEqual(1572865,
                         get_frames_from_segment(analysis['videomasks'][-1]))

        mask, analysis, errors = tool.compareImages(
            'b',
            'a',
            scModel,
            'Normalization',
            arguments={'Start Time': '00:00:01.000000'},
            analysis_params={})

        self.assertEqual(1, len(analysis['videomasks']))
        x = getValue(analysis, 'metadatadiff.audio.duration')
        x[1] = int(x[1])
        x[2] = int(x[2])
        self.assertEqual(['change', 59348, 35665], x)
        self.assertEqual(
            1572865, get_end_frame_from_segment(analysis['videomasks'][-1]))
        self.assertEqual(1528766,
                         get_frames_from_segment(analysis['videomasks'][-1]))
Example #7
0
    def test_audio_donor(self):
        graph = Mock()

        def lkup_preds(x):
            return {'b': ['a'], 'e': ['d']}[x]

        def lkup_edge(x, y):
            return \
            {'ab': {'op': 'NoSelect'}, 'de': {'op': 'SelectSomething', 'arguments': {'Start Time': 20, 'End Time': 100}}}[
                x + y]

        graph.predecessors = lkup_preds
        graph.get_edge = lkup_edge
        graph.dir = '.'

        donor = AudioDonor(graph, 'e', 'f', 'x',
                           (None, self.locateFile('tests/videos/sample1.mov')),
                           (None, self.locateFile('tests/videos/sample1.mov')))
        args = donor.arguments()
        self.assertEqual("00:00:00.000000", args['Start Time']['defaultvalue'])
        self.assertEqual("00:00:00.000000", args['End Time']['defaultvalue'])
        segments = donor.create(arguments={
            'Start Time': "00:00:01.11",
            'End Time': "00:00:01.32"
        })
        for segment in segments:
            self.assertEqual(48951, get_start_frame_from_segment(segment))
            self.assertEqual(58212, get_end_frame_from_segment(segment))
            self.assertAlmostEqual(1109.97,
                                   get_start_time_from_segment(segment),
                                   places=1)
            self.assertEqual(1320.0, int(get_end_time_from_segment(segment)))

        donor = AllStreamDonor(
            graph, 'e', 'f', 'y',
            (None, self.locateFile('tests/videos/sample1.mov')),
            (None, self.locateFile('tests/videos/sample1.mov')))
        args = donor.arguments()
        self.assertEqual(0, len(args))
        segments = donor.create(arguments={})
        types = set()
        for segment in segments:
            types.add(get_type_of_segment(segment))
            if get_type_of_segment(segment) == 'audio':
                self.assertEqual(1, get_start_frame_from_segment(segment))
                self.assertEqual(2617262, get_end_frame_from_segment(segment))
                self.assertAlmostEqual(0,
                                       get_start_time_from_segment(segment),
                                       places=1)
                self.assertAlmostEqual(59348,
                                       int(get_end_time_from_segment(segment)))
            else:
                self.assertEqual(1, get_start_frame_from_segment(segment))
                self.assertEqual(803, get_end_frame_from_segment(segment))
                self.assertAlmostEqual(0,
                                       get_start_time_from_segment(segment),
                                       places=1)
                self.assertAlmostEqual(59348,
                                       int(get_end_time_from_segment(segment)))
        self.assertEqual(2, len(types))

        donor = AllAudioStreamDonor(
            graph, 'e', 'f', 'y',
            (None, self.locateFile('tests/videos/sample1.mov')),
            (None, self.locateFile('tests/videos/sample1.mov')))
        self.assertEqual(0, len(donor.arguments()))
        self.assertEqual(['audio'], donor.media_types())
Example #8
0
    def test_video_donor(self):
        graph = Mock()

        def lkup_preds(x):
            return {'b': ['a'], 'e': ['d']}[x]

        def lkup_edge(x, y):
            return {
                'ab': {
                    'op': 'NoSelect'
                },
                'de': {
                    'op': 'SelectSomething',
                    'arguments': {
                        'Start Time': 20,
                        'End Time': 100
                    }
                }
            }[x + y]

        graph.predecessors = lkup_preds
        graph.get_edge = lkup_edge
        graph.dir = '.'

        donor = VideoDonor(graph, 'e', 'f', 'x',
                           (None, self.locateFile('tests/videos/sample1.mov')),
                           (None, self.locateFile('tests/videos/sample1.mov')))
        args = donor.arguments()
        self.assertEqual(20, args['Start Time']['defaultvalue'])
        self.assertEqual(100, args['End Time']['defaultvalue'])
        segments = donor.create(arguments={
            'include audio': 'yes',
            'Start Time': 30,
            'End Time': 150
        })
        for segment in segments:
            if get_type_of_segment(segment) == 'audio':
                self.assertEqual(115542, get_start_frame_from_segment(segment))
                self.assertEqual(509061, get_end_frame_from_segment(segment))
            else:
                self.assertEqual(30, get_start_frame_from_segment(segment))
                self.assertEqual(150, get_end_frame_from_segment(segment))
            self.assertEqual(2620.0, get_start_time_from_segment(segment))
            self.assertEqual(11543, int(get_end_time_from_segment(segment)))

        donor = VideoDonor(graph, 'b', 'c', 'x',
                           (None, self.locateFile('tests/videos/sample1.mov')),
                           (None, self.locateFile('tests/videos/sample1.mov')))
        args = donor.arguments()
        self.assertEqual(1, args['Start Time']['defaultvalue'])
        self.assertEqual(0, args['End Time']['defaultvalue'])
        segments = donor.create(arguments={
            'include audio': 'yes',
            'Start Time': 30,
            'End Time': 150
        })
        for segment in segments:
            if get_type_of_segment(segment) == 'audio':
                self.assertEqual(115542, get_start_frame_from_segment(segment))
                self.assertEqual(509061, get_end_frame_from_segment(segment))
            else:
                self.assertEqual(30, get_start_frame_from_segment(segment))
                self.assertEqual(150, get_end_frame_from_segment(segment))
            self.assertEqual(2620.0, get_start_time_from_segment(segment))
            self.assertEqual(11543, int(get_end_time_from_segment(segment)))

        segments = donor.create(arguments={
            'include audio': 'no',
            'Start Time': 30,
            'End Time': 150
        })
        self.assertEqual(
            0,
            len([
                segment for segment in segments
                if get_type_of_segment(segment) == 'audio'
            ]))

        donor = VideoDonorWithoutAudio(
            graph, 'b', 'c', 'x',
            (None, self.locateFile('tests/videos/sample1.mov')),
            (None, self.locateFile('tests/videos/sample1.mov')))
        self.assertTrue('include audio' not in donor.arguments())
Example #9
0
    def warpMask(self,
                 video_masks,
                 source,
                 target,
                 expectedType='video',
                 inverse=False,
                 useFFMPEG=False):
        """
        Tranform masks when the frame rate has changed.
        :param video_masks: ithe set of video masks to walk through and transform
        :param expectedType:
        :param video_masks:
        :return: new set of video masks
        """
        edge = self.graph.get_edge(source, target)
        meta_i, frames_i = self.getVideoMeta(source,
                                             show_streams=True,
                                             media_types=[expectedType])
        meta_o, frames_o = self.getVideoMeta(target,
                                             show_streams=True,
                                             media_types=[expectedType])
        indices_i = ffmpeg_api.get_stream_indices_of_type(meta_i, expectedType)
        indices_o = ffmpeg_api.get_stream_indices_of_type(meta_o, expectedType)
        if not indices_i or not indices_o:
            return video_masks
        index_i = indices_i[0]
        index_o = indices_o[0]
        isVFR = ffmpeg_api.is_vfr(meta_i[index_i]) or ffmpeg_api.is_vfr(
            meta_o[index_o])

        result = self.getChangeInFrames(edge,
                                        meta_i[index_i],
                                        meta_o[index_o],
                                        source,
                                        target,
                                        expectedType=expectedType)

        if result is None:
            return video_masks

        sourceFrames, sourceTime, targetFrames, targetTime, sourceRate, targetRate = result

        if sourceFrames == targetFrames and int(sourceTime * 100) == int(
                targetTime * 100):
            return video_masks

        dropRate = sourceFrames / float(
            sourceFrames -
            targetFrames) if sourceFrames != targetFrames else sourceFrames + 1

        #if sourceFrames > targetFrames else targetFrames / float(sourceFrames - targetFrames)

        def apply_change(existing_value,
                         orig_rate,
                         final_rate,
                         inverse=False,
                         round_value=True,
                         min_value=0,
                         upper_bound=False):
            # if round_value, return a tuple of value plus rounding error
            import math
            multiplier = -1.0 if inverse else 1.0
            adjustment = existing_value * math.pow(final_rate / orig_rate,
                                                   multiplier)
            if round_value:
                v = max(
                    min(round(adjustment), final_rate)
                    if upper_bound else round(adjustment), min_value)
                e = abs(adjustment - v)
                return int(v), e
            return max(
                min(adjustment, final_rate) if upper_bound else adjustment,
                min_value)

        def adjustPositionsFFMPEG(meta, video_frames, hits):
            rate = ffmpeg_api.get_video_frame_rate_from_meta([meta],
                                                             [video_frames])
            aptime = 0
            lasttime = 0
            hitspos = 0
            start_mask = None
            for pos in range(0, len(video_frames)):
                aptime = get_frame_time(video_frames[pos], aptime, rate)
                while hitspos < len(hits) and aptime > hits[hitspos][0]:
                    mask = hits[hitspos][2]
                    element = hits[hitspos][1]
                    error = abs(aptime - hits[hitspos][0])
                    if element == 'starttime':
                        update_segment(mask,
                                       starttime=lasttime,
                                       startframe=pos,
                                       error=error +
                                       get_error_from_segment(mask))
                        start_mask = mask
                    else:
                        # for error, only record the error if not recorded
                        update_segment(
                            mask,
                            endtime=lasttime,
                            endframe=pos,
                            error=(error if start_mask != mask else 0) +
                            get_error_from_segment(mask))
                    hitspos += 1
                lasttime = aptime
            return mask

        def adjustPositions(video_file, hits):
            # used if variable frame rate
            frmcnt = 0
            hitspos = 0
            last = 0
            cap = cv2api_delegate.videoCapture(video_file)
            try:
                while cap.grab() and hitspos < len(hits):
                    frmcnt += 1
                    aptime = cap.get(cv2api_delegate.prop_pos_msec)
                    while hitspos < len(hits) and aptime > hits[hitspos][0]:
                        mask = hits[hitspos][2]
                        element = hits[hitspos][1]
                        error = max(abs(last - hits[hitspos][0]),
                                    abs(aptime - hits[hitspos][0]))
                        if element == 'starttime':
                            update_segment(mask,
                                           starttime=last,
                                           startframe=frmcnt,
                                           error=error)
                        else:
                            update_segment(mask,
                                           endtime=last,
                                           endframe=frmcnt,
                                           error=max(
                                               error,
                                               get_error_from_segment(mask)))
                        hitspos += 1
                    last = aptime
            finally:
                cap.release()
            return mask

        new_mask_set = []
        hits = []
        # First adjust all the frame and time references by the total change in the video.
        # In most cases, the length of the video in time changes by a small amount which is distributed
        # across all the masks
        for mask_set in video_masks:
            if 'type' in mask_set and mask_set['type'] != expectedType:
                new_mask_set.append(mask_set)
                continue
            #these are initial estimates
            startframe, error_start = apply_change(
                get_start_frame_from_segment(mask_set),
                float(sourceFrames),
                float(targetFrames),
                inverse=inverse,
                round_value=True,
                min_value=1)
            endframe, error_end = apply_change(
                get_end_frame_from_segment(mask_set),
                float(sourceFrames),
                float(targetFrames),
                inverse=inverse,
                min_value=1,
                round_value=True,
                upper_bound=True)
            endtime = apply_change(get_end_time_from_segment(mask_set),
                                   float(sourceTime),
                                   targetTime,
                                   inverse=inverse,
                                   round_value=False)
            starttime = apply_change(get_start_time_from_segment(mask_set),
                                     sourceTime,
                                     targetTime,
                                     inverse=inverse,
                                     round_value=False,
                                     upper_bound=True)

            try:
                if endframe == int(getValue(meta_o[index_o], 'nb_frames', 0)) and \
                                float(getValue(meta_o[index_o], 'duration', 0)) > 0:
                    endtime = float(getValue(meta_o[index_o], 'duration',
                                             0)) * 1000.0 - (1000.0 /
                                                             targetRate)
                elif endtime > targetTime and endframe > targetFrames:
                    message = '{} exceeded target time of {} for {}'.format(
                        sourceTime, target, targetTime)
                    if (endtime - targetTime) > 300:
                        logging.getLogger('maskgen').error(message)
                    else:
                        logging.getLogger('maskgen').warn(message)
                    endtime = targetTime - (1000.0 / targetRate)
                    endframe = targetFrames
            except:
                pass
            change = create_segment(
                rate=sourceRate if inverse else targetRate,
                type=get_type_of_segment(mask_set),
                starttime=starttime,
                startframe=startframe,
                error=get_error_from_segment(mask_set) +
                (max(error_start, error_end) / targetRate * 1000.0),
                endtime=endtime,
                endframe=endframe,
                videosegment=get_file_from_segment(mask_set))
            if not isVFR:
                # in this case, we trust the time invariance, updating frames
                recalculate_frames_for_segment(change)
                # then we reupdate time to match the frames
                recalculate_times_for_segment(change)
            new_mask_set.append(change)
            hits.append(
                (get_start_time_from_segment(change), 'starttime', change))
            hits.append((get_end_time_from_segment(change), 'endime', change))

        # only required when one of the two videos is variable rate
        hits = sorted(hits)

        if isVFR:
            if useFFMPEG:
                meta_r, frames_r = self.getVideoMeta(
                    source if inverse else target,
                    show_streams=True,
                    with_frames=True,
                    media_types=[expectedType])
                index_r = ffmpeg_api.get_stream_indices_of_type(
                    meta_o, expectedType)[0]
                adjustPositionsFFMPEG(meta_r[index_r], frames_r[index_r], hits)
            else:
                adjustPositions(
                    self.getNodeFile(source)
                    if inverse else self.getNodeFile(target), hits)

        transfer_masks(video_masks,
                       new_mask_set,
                       dropRate,
                       frame_time_function=lambda x, y: y +
                       (1000.0 / targetRate),
                       frame_count_function=lambda x, y: y + 1)
        return new_mask_set