def get_synced_timings(self, slide_data): logger.debug("Loading files %s and %s" % (self.original_video_file, self.slide_video_file)) # Load audio files first original_audio, original_file_sr = utils.get_audio_from_file(self.original_video_file) logger.debug("Loaded original file %s, preprocessing...", self.original_video_file) original_audio, original_sr = sync.preprocess_audio(original_audio, original_file_sr, bandpass=False) logger.debug("Done.") slide_audio, original_slide_sr = utils.get_audio_from_file(self.slide_video_file) logger.debug("Loaded slide file %s, preprocessing...", self.slide_video_file) slide_audio, slide_sr = sync.preprocess_audio(slide_audio, original_slide_sr, bandpass=False) logger.debug("Done.") assert original_file_sr == original_slide_sr logger.debug("Load done, calculating sync timings...") if self.global_method: updated_slides = self._get_synced_timings_global(slide_data, original_audio, slide_audio, slide_sr) else: updated_slides = self._get_synced_timings_local(slide_data, original_audio, slide_audio, slide_sr) assert len(updated_slides) > 0 if updated_slides[0][0] > 0: # Pull down first slide to 0 if all slides are positive updated_slides[0] = (0, updated_slides[0][1]) else: # Pull up incoming slide if there are negatively timed slides zero_index = bisect.bisect_left([time for time,image in updated_slides], 0) assert zero_index > 0 updated_slides[zero_index - 1] = (0, updated_slides[zero_index - 1][1]) return updated_slides
def work(f1, f2): print "Loading first audio..." file1_audio, file1_orig_sr = utils.get_audio_from_file(f1) print "Preprocessing first audio..." file1_audio, file1_sr = sync.preprocess_audio(file1_audio, file1_orig_sr, bandpass=False) print "Loading second audio..." file2_audio, file2_orig_sr = utils.get_audio_from_file(f2) print "Preprocessing second audio..." file2_audio, file2_sr = sync.preprocess_audio(file2_audio, file2_orig_sr, bandpass=False) print file1_sr, " - ", file2_sr global_offset = correlate.get_offset(file1_audio, file2_audio, file1_sr) print "Found offset", global_offset
def find_offset(target, source, source_offset): """ Finds piece of source file around source_offset in target file target - File in which to match the piece (file path or (numpy array, samplerate) tuple of preprocessed samples) source - File to extract piece of file from (file path or (numpy array, samplerate) tuple of preprocessed samples) source_offset - offset of piece of interest in source file (in seconds) Returns offset in target file in seconds """ if isinstance(target, basestring): logger.debug("Target file passed as filepath, loading...") target_audio, target_samplerate = utils.get_audio_from_file(target) target_audio, target_samplerate = preprocess_audio( target_audio, target_samplerate) logger.debug("Loaded.") else: target_audio, target_samplerate = target if isinstance(source, basestring): logger.debug("Source file passed as filepath, loading...") source_audio, source_samplerate = utils.get_audio_from_file(source) source_audio, source_samplerate = preprocess_audio( source_audio, source_samplerate) logger.debug("Done.") else: source_audio, source_samplerate = source assert target_samplerate == source_samplerate # Take 20 second stretch of audio to look for correlation start_index = max(0, (source_offset * source_samplerate) - ((WINDOW_SIZE * source_samplerate) / 2)) end_index = min( len(source_audio) - 1, (source_offset * source_samplerate) + ((WINDOW_SIZE * source_samplerate) / 2)) source_slice = source_audio[start_index:end_index] offset, correlation = correlate.get_offset(source_slice, target_audio, source_samplerate) return offset