Ejemplo n.º 1
0
 def probe_video(self, video_file):
     if os.path.exists(self.job.source_file):
         success('source probe info exists')
         return
     info('probing video...')
     probe = ffmpeg.probe(video_file)
     store_json(probe, self.job.source_file)
Ejemplo n.º 2
0
    def ir(self, frames, subjects):
        # search for subjects in frames
        matches = []
        create_dir(self.job.matches_dir)

        info('searching for ' + str(len(subjects)) + ' subjects in ' + str(len(frames)) + ' frames...')

        frame_index = 0
        subject_index = 0
        total = len(subjects) * len(frames)

        for frame in frames:
            frame_index = frame_index + 1
            for subject in subjects:
                progress(subject_index, total)
                subject_index = subject_index + 1
                match = self.search_frame_ir(frame, subject, frame_index)
                if match:
                    matches.append(match)
        progress(subject_index, total)

        for result in matches:
            highlight('found subject ' + result.get('subject') + ' (' + str(
                result.get('subject_size')) + 'px) in frame ' + result.get('frame'))

        if len(matches) == 0:
            warn('found nothing :/')

        return matches
Ejemplo n.º 3
0
 def publish_html(self):
     info('generating html...')
     video_id = self.job.video_source_name
     # video_type = self.job.video_type
     # force it to use the local template, even for youtube videos
     video_type = 'local'
     copyfile('templates/template-' + video_type + '.html', self.job.result_html_file)
     if video_type == 'youtube':
         video_id = YouTube(self.job.video_url).video_id
     subjects = Path(self.job.subjects_file).read_text()
     matches = Path(self.job.matches_file).read_text()
     results = Path(self.job.results_file).read_text()
     with fileinput.FileInput(self.job.result_html_file, inplace=True) as file:
         for line in file:
             print(
                 line.replace('%frame_interval_seconds%', str(self.job.frame_interval_seconds))
                     .replace('%video_id%', video_id)
                     .replace('%video_url_mp4%', self.job.video_source_path)
                     .replace('%video_poster_url%', self.job.video_poster_path)
                     .replace('%subjects%', subjects)
                     .replace('%matches%', matches)
                     .replace('%results%', results)
                 , end='')
     success('stored to ' + self.job.result_html_file)
     webbrowser.open('file:///' + self.job.result_html_file, new=2)
Ejemplo n.º 4
0
    def extract_frames(self, area_key, output_dir):
        # extract frames
        info('extracting frames from ' + area_key + ' area ...')

        # area definitions
        crop_areas = {
            'rt': ('(in_w/4)*3', '0', 'in_w/4', 'in_h/3'),
            'rb': ('(in_w/4)*3', '(in_h/3)*2', 'in_w/4', 'in_h/3'),
            'lt': ('0', '0', 'in_w/4', 'in_h/3'),
            'lb': ('0', '(in_h/3)*2', 'in_w/4', 'in_h/3'),
        }

        crop_area = crop_areas.get(area_key)
        create_dir(output_dir)

        # for area_key, crop_area in crop_areas.items():
        try:
            (
                ffmpeg
                    .input(self.job.video_source_path)
                    .crop(*crop_area)
                    .filter('fps', '1/' + str(self.job.frame_interval_seconds))
                    .output(os.path.join(output_dir, '%05d.jpg'))
                    .run(quiet=True, capture_stderr=True)
            )
        except ffmpeg.Error as err:
            # decode error message and print to stdout
            # from: https://stackoverflow.com/a/37059682/580651
            error(codecs.escape_decode(err.stderr)[0].decode("utf-8"))
        success('stored ' + str(len(glob.glob(os.path.join(output_dir, '*.jpg')))) + ' frames to ' + output_dir)
Ejemplo n.º 5
0
    def download_subjects(self, job):
        # download subjects
        info('downloading subjects definition from "' + self.xml_stats_url +
             '"...')

        # download xml
        stats_xml = os.path.join(job.job_dir, 'subjects.xml')
        urllib.request.urlretrieve(self.xml_stats_url, stats_xml)

        # parse xml to json
        root = ET.parse(stats_xml).getroot()
        subjects = parker.data(
            root.find("./achievements"),
            preserve_root=True).get('achievements').get('achievement')
        store_json(subjects, job.subjects_file)

        # download icons
        info('downloading subjects...')
        i = 0
        total = len(subjects)
        progress(i, total)
        for subject in subjects:
            subject_url = subject.get('iconClosed')
            urllib.request.urlretrieve(
                subject_url,
                os.path.join(job.subjects_dir, ntpath.basename(subject_url)))
            i = i + 1
            progress(i, total)
        success('stored ' +
                str(len(glob.glob(os.path.join(job.subjects_dir, "*.jpg")))) +
                ' subjects to ' + job.subjects_dir)
Ejemplo n.º 6
0
 def transcode_video(self, video_file, output):
     if os.path.exists(output):
         return
     info('transcoding video to ' + output + '...')
     try:
         ffmpeg.input(video_file).output(output).run(quiet=False, capture_stderr=True)
     except ffmpeg.Error as err:
         error(codecs.escape_decode(err.stderr)[0].decode("utf-8"))
Ejemplo n.º 7
0
 def prepare_video(self):
     self.transcode_video(self.job.video_url, self.job.video_source_path)
     # self.transcode_video(self.job.video_source_path, self.job.video_source_webm_path)
     video_probe = self.get_video_probe()
     width = int(video_probe['width'])
     height = int(video_probe['height'])
     duration = float(video_probe['duration'])
     info('duration: ' + str(duration), 'dimensions: ' + str(width) + 'x' + str(height))
     poster_time = int(float(video_probe['duration']) / 3)
     self.extract_poster(self.job.video_source_path, self.job.video_poster_path, poster_time)
Ejemplo n.º 8
0
 def extract_poster(self, video_file, output, time):
     if os.path.exists(self.job.video_poster_path):
         success('poster exists')
         return
     info('extracting poster at ' + str(int(time)) + '...')
     try:
         (
             ffmpeg.input(video_file, ss=time)
                 .output(output, vframes=1)
                 .overwrite_output()
                 .run(quiet=True, capture_stderr=True)
         )
     except ffmpeg.Error as err:
         error(codecs.escape_decode(err.stderr)[0].decode("utf-8"))
Ejemplo n.º 9
0
    def download_video(self):
        if os.path.isfile(self.job.video_source_path):
            success('source video exists')
            return

        info('downloading "' + self.job.video_url + '"...')
        create_dir(self.job.source_dir)

        if self.job.video_type == 'youtube':
            # from: https://stackoverflow.com/a/46562895/580651
            YouTube(self.job.video_url).streams.first() \
                .download(output_path=self.job.source_dir, filename=self.job.job_id)

        if self.job.video_type == 'local':
            extension = Path(self.job.video_url).suffix
            copy(self.job.video_url, os.path.join(self.job.source_dir, self.job.video_source_name + extension))

        success('stored to ' + self.job.source_dir)
Ejemplo n.º 10
0
    def search_area(self, area_key):
        info('searching in ' + area_key + ' area ...')
        area_dir = os.path.join(self.job.frames_dir, area_key)
        if not os.path.exists(area_dir):
            self.extract_frames(area_key, area_dir)

        frames = glob.glob(os.path.join(area_dir, "*.jpg"))

        if len(frames) == 0:
            raise ValueError('No frames available. Video too short?')

        # sort frames in reverse - assuming unlocks usually happen later a video
        def file_basename(s):
            return int(os.path.basename(s)[:-4]) * -1

        frames.sort(key=file_basename)

        subjects = glob.glob(os.path.join(self.job.subjects_dir, "*.jpg"))
        return self.ir(frames, subjects)
Ejemplo n.º 11
0
    def __init__(self,
                 video_url,
                 handler,
                 frame_interval_seconds=5,
                 icon_on_screen_size=64):
        self.video_url = video_url
        self.job_id = hashlib.sha1(self.video_url.encode('utf-8')).hexdigest()
        self.job_dir = os.path.join(os.getcwd(), 'jobs', self.job_id)

        self.video_type = self.determine_video_type(video_url)
        self.video_source_name = self.job_id
        self.handler = handler
        self.frame_interval_seconds = frame_interval_seconds
        self.icon_on_screen_size = icon_on_screen_size
        self.source_dir = os.path.join(self.job_dir, source_folder_name)
        self.frames_dir = os.path.join(self.job_dir, frames_folder_name)
        self.subjects_dir = os.path.join(self.job_dir, subjects_folder_name)
        self.matches_dir = os.path.join(self.job_dir, matches_folder_name)

        self.source_file = os.path.join(self.job_dir, 'source.json')
        self.subjects_file = os.path.join(self.job_dir, 'subjects.json')
        self.matches_file = os.path.join(self.job_dir, 'matches.json')
        self.results_file = os.path.join(self.job_dir, 'results.json')
        self.result_html_file = os.path.join(self.job_dir, 'index.html')

        self.video_source_path = os.path.join(self.source_dir,
                                              self.video_source_name + '.mp4')
        self.video_source_webm_path = os.path.join(
            self.source_dir, self.video_source_name + '.webm')
        self.video_poster_path = os.path.join(self.source_dir,
                                              self.video_source_name + '.jpg')

        create_dir(self.job_dir)
        section('[job]')
        info('id: ' + self.job_id)
        info('handler: ' + self.handler.type)
        info('video: ' + self.video_type)
        section_end()
Ejemplo n.º 12
0
 def collect_results(self):
     info('collecting results...')
     # TODO: merge into results.json: matches.json, subjects.json, ocr.json
     # warn('skipping for now')
     store_json('', self.job.results_file)
Ejemplo n.º 13
0
 def ocr(self, matches):
     info('running ocr for matches...')
     warn('skipping for now')