Exemple #1
0
 def drop(self, firstFrametoDrop, lastFrametoDrop, framesToAdd, flows, matches):
     super(StratAudioSep, self).drop(firstFrametoDrop, lastFrametoDrop, framesToAdd, flows, matches)
     st = tool_set.getMilliSecondsAndFrameCount(int(firstFrametoDrop), self.fps)[0]
     en = tool_set.getMilliSecondsAndFrameCount(int(lastFrametoDrop), self.fps)[0]
     audio_file = maskgen_audio.cut_n_stitch(self.sp.original, [int(st), int(en)], 100)
     maskgen_audio.export_audio(audio_file, os.path.splitext(self.out_file)[0] + '.wav')
     return firstFrametoDrop, lastFrametoDrop, framesToAdd
Exemple #2
0
def transformAdd(img,source,target,**kwargs):
    start_time = getMilliSecondsAndFrameCount(kwargs['Start Time']) if 'Start Time' in kwargs else (0, 1)
    end_time = getMilliSecondsAndFrameCount(kwargs['End Time']) if 'End Time' in kwargs else None
    frames_add = int(kwargs['Frames to Add']) if 'Frames to Add' in kwargs else None
    if frames_add is not None:
        end_time = (start_time[0], start_time[1] + frames_add - 1)
    codec = (kwargs['codec']) if 'codec' in kwargs else 'XVID'
    add_frames, end_time_millis = maskgen.algorithms.optical_flow.smartAddFrames(source, target,
                                                              start_time,
                                                              end_time,
                                                              codec=codec,
                                                              direction=kwargs['Direction'] if 'Direction' in kwargs else 'forward')

    if start_time[0] > 0:
        et = getDurationStringFromMilliseconds(end_time_millis)
    else:
        et = str(int(start_time[1]) + int(add_frames))

    return {'Start Time': str(kwargs['Start Time']),
            'End Time': et,
            'Frames to Add': int(add_frames),
            'Method': 'Pixel Motion',
            'Algorithm': 'Farneback',
            'scale': 0.8,
            'levels': 7,
            'winsize': 15,
            'iterations': 3,
            'poly_n': 7,
            'poly_sigma': 1.5,
            'Vector Detail': 100}, None
Exemple #3
0
 def create(self, arguments={}, invert=False):
     from maskgen.zip_tools import AudioPositions
     from maskgen.tool_set import getMilliSecondsAndFrameCount
     from maskgen.video_tools import FileMetaDataLocator, \
         create_segment
     fps = float(getValue(arguments, 'sample rate', 0))
     # use AudioPostions to determine duration and rate
     positions = AudioPositions(FileMetaDataLocator(
         self.startFileName).get_filename(),
                                fps=fps)
     duration = positions.get_total_duration()
     rate = positions.fps
     end_time_tuple = getMilliSecondsAndFrameCount(
         getValue(arguments, 'End Time', "00:00:00"))
     start_time_tuple = getMilliSecondsAndFrameCount(
         getValue(arguments, 'Start Time', '00:00:00'))
     if end_time_tuple[0] <= start_time_tuple[0]:
         end_time_tuple = (duration, 0)
     return [
         create_segment(
             starttime=float(start_time_tuple[0]),
             startframe=int(start_time_tuple[0] * rate / 1000.0) + 1,
             endtime=float(end_time_tuple[0]),
             endframe=int(end_time_tuple[0] * rate / 1000.0) + 1,
             type='audio',
             rate=rate)
     ]
Exemple #4
0
def transform(img, source, target, **kwargs):
    start_time = getMilliSecondsAndFrameCount(str(
        kwargs['Start Time'])) if 'Start Time' in kwargs else (0, 1)
    end_time = getMilliSecondsAndFrameCount(str(
        kwargs['End Time'])) if 'End Time' in kwargs else None
    seconds_to_drop = float(
        kwargs['seconds to drop']) if 'seconds to drop' in kwargs else 1.0
    save_histograms = (kwargs['save histograms']
                       == 'yes') if 'save histograms' in kwargs else False
    drop = (kwargs['drop']
            == 'yes') if 'drop at start time' in kwargs else True
    audio = (kwargs['Audio'] == 'yes') if 'Audio' in kwargs else False
    codec = (kwargs['codec']) if 'codec' in kwargs else 'XVID'
    start, stop, frames_to_add = smartDropFrames(
        source,
        target,
        start_time,
        end_time,
        seconds_to_drop,
        savehistograms=save_histograms,
        codec=codec,
        audio=audio)
    return {
        'Start Time': str(start),
        'End Time': str(stop),
        'Frames Dropped': str(stop - start + 1),
        'Frames to Add': frames_to_add
    }, None
Exemple #5
0
 def value(self, frame1, frame2):
     st = tool_set.getMilliSecondsAndFrameCount(self.offset + frame1,
                                                self.fps)[0]
     en = tool_set.getMilliSecondsAndFrameCount(self.offset + frame2,
                                                self.fps)[0]
     if int(st) < self.vlen and int(en) < self.vlen and \
             self.visualsilence[int(st)] == 1 and self.visualsilence[int(en) + 1] == 1:
         return 1
     return 1000000
    def test_timeparse(self):
        t, f = tool_set.getMilliSecondsAndFrameCount('00:00:00')
        self.assertEqual(1, f)
        self.assertEqual(0, t)
        t, f = tool_set.getMilliSecondsAndFrameCount('1')
        self.assertEqual(1, f)
        self.assertEqual(0, t)
        self.assertTrue(tool_set.validateTimeString('03:10:10.434'))
        t, f = tool_set.getMilliSecondsAndFrameCount('03:10:10.434')
        self.assertEqual(0, f)
        self.assertEqual(1690434, t)
        t, f = tool_set.getMilliSecondsAndFrameCount('03:10:10:23')
        self.assertFalse(tool_set.validateTimeString('03:10:10:23'))
        self.assertEqual(23, f)
        self.assertEqual(1690000, t)
        t, f = tool_set.getMilliSecondsAndFrameCount('03:10:10:A',
                                                     defaultValue=(0, 0))
        self.assertFalse(tool_set.validateTimeString('03:10:10:A'))
        self.assertEqual((0, 0), (t, f))
        time_manager = tool_set.VidTimeManager(startTimeandFrame=(1000, 2),
                                               stopTimeandFrame=(1003, 4))
        time_manager.updateToNow(999)
        self.assertTrue(time_manager.isBeforeTime())
        time_manager.updateToNow(1000)
        self.assertTrue(time_manager.isBeforeTime())
        time_manager.updateToNow(1001)
        self.assertTrue(time_manager.isBeforeTime())
        time_manager.updateToNow(1002)
        self.assertFalse(time_manager.isBeforeTime())
        self.assertFalse(time_manager.isPastTime())
        time_manager.updateToNow(1003)
        self.assertFalse(time_manager.isPastTime())
        time_manager.updateToNow(1004)
        self.assertFalse(time_manager.isPastTime())
        time_manager.updateToNow(1005)
        self.assertFalse(time_manager.isPastTime())
        time_manager.updateToNow(1006)
        self.assertFalse(time_manager.isPastTime())
        time_manager.updateToNow(1007)
        self.assertFalse(time_manager.isPastTime())
        time_manager.updateToNow(1008)
        self.assertTrue(time_manager.isPastTime())
        self.assertEqual(9, time_manager.getEndFrame())
        self.assertEqual(4, time_manager.getStartFrame())

        time_manager = tool_set.VidTimeManager(startTimeandFrame=(999, 2),
                                               stopTimeandFrame=None)
        time_manager.updateToNow(999)
        self.assertTrue(time_manager.isBeforeTime())
        time_manager.updateToNow(1000)
        self.assertTrue(time_manager.isBeforeTime())
        time_manager.updateToNow(1001)
        self.assertFalse(time_manager.isBeforeTime())
        self.assertEqual(3, time_manager.getEndFrame())
        self.assertEqual(3, time_manager.getStartFrame())
Exemple #7
0
 def create(self, arguments={}, invert=False):
     from maskgen.video_tools import getMaskSetForEntireVideoForTuples, FileMetaDataLocator
     from maskgen.tool_set import getMilliSecondsAndFrameCount
     end_time_tuple = getMilliSecondsAndFrameCount(
         getValue(arguments, 'End Time', "00:00:00"))
     start_time_tuple = getMilliSecondsAndFrameCount(
         getValue(arguments, 'Start Time', '00:00:00'))
     return getMaskSetForEntireVideoForTuples(
         FileMetaDataLocator(self.startFileName),
         start_time_tuple=start_time_tuple,
         end_time_tuple=end_time_tuple if end_time_tuple[0] > 0 else None,
         media_types=self.media_types())
Exemple #8
0
def transform(img, source, target, **kwargs):
    start_time = getMilliSecondsAndFrameCount(str(kwargs['Start Time'])) if 'Start Time' in kwargs else (0, 1)
    end_time = getMilliSecondsAndFrameCount(str(kwargs['End Time'])) if 'End Time' in kwargs else None
    codec = (kwargs['codec']) if 'codec' in kwargs else 'XVID'
    start, stop, frames_dropped = dropFrames(source,
                                             target,
                                             start_time,
                                             end_time,
                                             codec=codec)
    return {'Start Time': str(start),
            'End Time': str(stop),
            'Frames Dropped': frames_dropped}, None
Exemple #9
0
def transform(img, source, target, **kwargs):
    start_time = getMilliSecondsAndFrameCount(
        kwargs['Select Start Time']) if 'Select Start Time' in kwargs else (0,
                                                                            1)
    end_time = addFrame(start_time, int(kwargs['Number of Frames']) - 1)
    paste_time = getMilliSecondsAndFrameCount(kwargs['Dest Paste Time'])
    codec = (kwargs['codec']) if 'codec' in kwargs else 'XVID'
    start_frame = copyFrames(
        source, target, start_time, end_time, paste_time, codec=codec) + 1
    return {
        'Start Time': str(start_frame),
        'End Time': str(start_frame + int(kwargs['Number of Frames']) - 1),
        'add type': 'insert'
    }, None
Exemple #10
0
def transformDrop(img,source,target,**kwargs):
    start_time = getMilliSecondsAndFrameCount(str(kwargs['Start Time'])) if 'Start Time' in kwargs else (0, 1)
    end_time = getMilliSecondsAndFrameCount(str(kwargs['End Time'])) if 'End Time' in kwargs else None
    seconds_to_drop = float(kwargs['seconds to drop']) if 'seconds to drop' in kwargs else 1.0
    save_histograms = (kwargs['save histograms'] == 'yes') if 'save histograms' in kwargs else False
    drop = (kwargs['drop']) if 'drop' in kwargs else True
    codec = (kwargs['codec']) if 'codec' in kwargs else 'XVID'
    audio = (kwargs['audio']) if 'audio' in kwargs else False
    if drop:
        start, stop, frames_to_add = smartDropFrames(source, target,
                                                     start_time,
                                                     end_time,
                                                     seconds_to_drop,
                                                     savehistograms=save_histograms,
                                                     codec=codec,
                                                     audio=audio)
        # start = 1235
        # stop = 1245
        # frames_to_add=7
        return {'Start Time': str(start),
                'End Time': str(stop),
                'Frames Dropped': stop - start + 1,
                'Frames to Add': frames_to_add}, None
    if audio:
        frame_data = get_best_frame_pairs(source, target,
                                          start_time,
                                          end_time,
                                          seconds_to_drop,
                                          savehistograms=save_histograms,
                                          codec=codec,
                                          strategy=dropStrats.StratTopFrames)
    else:
        start, stop, frames_to_add = get_best_frame_pairs(source, target,
                                      start_time,
                                      end_time,
                                      seconds_to_drop,
                                      savehistograms=save_histograms,
                                      codec=codec,
                                      strategy=dropStrats.StratTopFramesVideo)

        return {'Start Time': str(start),
                'End Time': str(stop),
                'Frames Dropped': stop - start + 1,
                'Frames to Add': frames_to_add}, None

    # start = 1235
    # stop = 1245
    # frames_to_add=7
    return frame_data
Exemple #11
0
def transform(img, source, target, **kwargs):
    end_time = getMilliSecondsAndFrameCount(
        kwargs['Select Start Time']) if 'Select Start Time' in kwargs else (0,
                                                                            1)
    return {
        'Select Start Time':
        str(end_time[1] - int(kwargs['Number of Frames'])),
        'Number of Frames': kwargs['Number of Frames'],
        'Dest Paste Time': kwargs['Dest Paste Time']
    }, None
Exemple #12
0
    def getMasksFromEdge(self,
                         source,
                         target,
                         media_types,
                         channel=0,
                         startTime=None,
                         endTime=None):
        """
        Currently prioritizes masks over entered.  This seems appropriate.  Adjust the software to
        produce masks consistent with recorded change.
        :param filename:q
        :param edge:
        :param media_types:
        :param channel:
        :param startTime:
        :param endTime:
        :return:
        """

        edge = self.graph.get_edge(source, target)
        if 'videomasks' in edge and \
                        edge['videomasks'] is not None and \
                        len(edge['videomasks']) > 0:
            return [
                mask for mask in edge['videomasks']
                if mask['type'] in media_types
            ]
        else:
            st = getMilliSecondsAndFrameCount(getValue(
                edge, 'arguments.Start Time', defaultValue='00:00:00.000'),
                                              defaultValue=(0, 1))
            et = getValue(edge, 'arguments.End Time', None)
            et = getMilliSecondsAndFrameCount(et) if et not in [None, '0'
                                                                ] else None
            result = self.getMetaDataLocator(
                source).getMaskSetForEntireVideoForTuples(
                    start_time_tuple=st if startTime is None else startTime,
                    end_time_tuple=et if endTime is None else endTime,
                    media_types=media_types,
                    channel=channel)
            if result is None or len(result) == 0:
                return None
        return result
Exemple #13
0
 def test_zip(self):
     import os
     filename = self.locateFile('tests/zips/raw.zip')
     self.addFileToRemove(os.path.join(os.path.dirname(filename), 'raw.png'))
     img = tool_set.openImage(filename,tool_set.getMilliSecondsAndFrameCount('2'),preserveSnapshot=True)
     self.assertEqual((5796, 3870),img.size)
     tool_set.condenseZip(filename,keep=1)
     self.addFileToRemove(os.path.join(os.path.dirname(filename),'raw_c.zip'))
     contents = tool_set.getContentsOfZip(os.path.join(os.path.dirname(filename),'raw_c.zip'))
     self.assertTrue('59487443539401a4d83512edaab3c1b2.cr2' in contents)
     self.assertTrue('7d1800a38ca7a22021bd94e71b6e0f42.cr2' in contents)
     self.assertTrue(len(contents) == 2)
Exemple #14
0
def selectSilence(matches, fps, sp, offset):
    audio_matches = []

    silences = sp.detect_silence(sp.processed,
                                 min_silence_len=sp.fade_length,
                                 silence_thresh=-21,
                                 seek_step=1)

    visualsilence = np.zeros(silences[-1][1])
    for s in silences:
        visualsilence[s[0]:s[1]] = 1
    if matches is not None:
        for m in matches:
            st = tool_set.getMilliSecondsAndFrameCount(offset + int(m[0]),
                                                       fps)[0]
            en = tool_set.getMilliSecondsAndFrameCount(offset + int(m[1]),
                                                       fps)[0]
            if int(st)<len(visualsilence) and int(en) < len(visualsilence) and \
                    visualsilence[int(st)] == 1 and visualsilence[int(en) + 1] == 1:

                audio_matches.append(m)
    return np.asarray(audio_matches)
Exemple #15
0
    def create(self, arguments={}, invert=False):
        from maskgen.tool_set import getMilliSecondsAndFrameCount
        media_types = ['video', 'audio'] if getValue(
            arguments, 'include audio', 'no') == 'yes' else ['video']

        from maskgen.video_tools import FileMetaDataLocator
        end_time_tuple = getMilliSecondsAndFrameCount(
            getValue(arguments, 'End Time', "00:00:00"))
        start_time_tuple = getMilliSecondsAndFrameCount(
            getValue(arguments, 'Start Time', '00:00:00'))
        video_set = FileMetaDataLocator(
            self.startFileName).getMaskSetForEntireVideoForTuples(
                start_time_tuple=start_time_tuple,
                end_time_tuple=end_time_tuple
                if end_time_tuple[1] > start_time_tuple[1] else None,
                media_types=media_types)
        audio_segments = [
            x for x in video_set if get_type_of_segment(x) == 'audio'
        ]
        video_segments = [
            x for x in video_set if get_type_of_segment(x) == 'video'
        ]

        if getValue(arguments, 'include audio', 'no') == 'yes':
            for audio_segment in audio_segments:
                video_segment = video_segments[0] if len(
                    video_segments) > 0 else audio_segment
                update_segment(
                    audio_segment,
                    type='audio',
                    starttime=get_start_time_from_segment(video_segment),
                    endtime=get_end_time_from_segment(video_segment),
                    startframe=int(
                        get_start_time_from_segment(video_segment) *
                        get_rate_from_segment(audio_segment) / 1000.0),
                    endframe=int(
                        get_end_time_from_segment(video_segment) *
                        get_rate_from_segment(audio_segment) / 1000.0) + 1)
        return video_set
    def test_gray_writing(self):
        import os
        import sys
        import time
        s = time.clock()
        writer = tool_set.GrayBlockWriter('test_ts_gw', 29.97002997)
        mask_set = list()
        for i in range(255):
            mask = np.random.randint(255, size=(1090, 1920)).astype('uint8')
            mask_set.append(mask)
            writer.write(mask, 33.3666666667 * i, i + 1)
        for i in range(300, 350):
            mask = np.random.randint(255, size=(1090, 1920)).astype('uint8')
            mask_set.append(mask)
            writer.write(mask, 33.3666666667 * i, i + 1)
        writer.close()
        fn = writer.get_file_name()
        reader = tool_set.GrayBlockReader(fn, end_frame=305)
        pos = 0
        while True:
            mask = reader.read()
            if mask is None:
                break
            compare = mask == mask_set[pos]
            self.assertEqual(mask.size, sum(sum(compare)))
            if pos == 255:
                self.assertEqual(301, reader.current_frame() - 1)
            pos += 1

        reader.close()
        self.assertEqual(305, pos)
        print time.clock() - s
        suffix = 'm4v'
        if sys.platform.startswith('win'):
            suffix = 'avi'
        filename = tool_set.convertToVideo(fn)
        self.assertEquals('test_ts_gw_mask_0.0.' + suffix, filename)
        self.assertTrue(os.path.exists(filename))

        size = tool_set.openImage(
            filename, tool_set.getMilliSecondsAndFrameCount('00:00:01')).size
        self.assertTrue(size == (1920, 1090))
        os.remove(filename)
        os.remove(fn)
Exemple #17
0
 def drop(self, firstFrametoDrop, lastFrametoDrop, framesToAdd, flows, matches):
     st = tool_set.getMilliSecondsAndFrameCount(int(firstFrametoDrop), self.fps)[0]
     en = tool_set.getMilliSecondsAndFrameCount(int(lastFrametoDrop), self.fps)[0]
     return firstFrametoDrop, st, lastFrametoDrop, en, framesToAdd