Beispiel #1
0
 def run(self):
     # Initialize signals
     self.percentDoneSignal.emit(0)
     percent_scale = 1000.0 / 5
     self.doneSignal.emit(0)
     self.statusSignal.emit("")
     # Load in audio data
     self.statusSignal.emit("Loading {}".format(
         os.path.split(self.mix_file)[1]))
     mix, self.fs = librosa.load(self.mix_file, sr=None)
     self.percentDoneSignal.emit(1 * percent_scale)
     self.statusSignal.emit("Loading {}".format(
         os.path.split(self.source_file)[1]))
     source, self.fs = librosa.load(self.source_file, sr=self.fs)
     self.percentDoneSignal.emit(2 * percent_scale)
     # Fix any gross timing offset
     self.statusSignal.emit("Aligning...")
     mix, source = estimate.align(mix, source, self.fs)
     self.percentDoneSignal.emit(3 * percent_scale)
     self.statusSignal.emit("Subtracting...")
     source = estimate.reverse_channel(mix, source)
     mix, source = estimate.pad(mix, source)
     self.percentDoneSignal.emit(4 * percent_scale)
     self.statusSignal.emit("Enhancing...")
     self.subtracted = estimate.wiener_enhance(mix - source, source,
                                               self.wiener_threshold)
     self.percentDoneSignal.emit(5 * percent_scale)
     self.doneSignal.emit(1)
Beispiel #2
0
 def run(self):
     # Initialize signals
     self.percentDoneSignal.emit(0)
     percent_scale = 1000.0 / 5
     self.doneSignal.emit(0)
     self.statusSignal.emit("")
     # Load in audio data
     self.statusSignal.emit("Loading {}".format(os.path.split(self.mix_file)[1]))
     mix, self.fs = librosa.load(self.mix_file, sr=None)
     self.percentDoneSignal.emit(1 * percent_scale)
     self.statusSignal.emit("Loading {}".format(os.path.split(self.source_file)[1]))
     source, self.fs = librosa.load(self.source_file, sr=self.fs)
     self.percentDoneSignal.emit(2 * percent_scale)
     # Fix any gross timing offset
     self.statusSignal.emit("Aligning...")
     mix, source = estimate.align(mix, source, self.fs)
     self.percentDoneSignal.emit(3 * percent_scale)
     self.statusSignal.emit("Subtracting...")
     source = estimate.reverse_channel(mix, source)
     mix, source = estimate.pad(mix, source)
     self.percentDoneSignal.emit(4 * percent_scale)
     self.statusSignal.emit("Enhancing...")
     self.subtracted = estimate.wiener_enhance(mix - source, source, self.wiener_threshold)
     self.percentDoneSignal.emit(5 * percent_scale)
     self.doneSignal.emit(1)
Beispiel #3
0
for subdirectory in [d for d in subdirectories if os.path.isdir(d)]:
    print 'Processing file {}'.format(subdirectory)
    # Load in the full mixture
    mix, fs = librosa.load(os.path.join(subdirectory, 'M.wav'), sr=None)
    # Perform timing/channel estimation for both a cappella ('A' files) and instrumental ('I' files)
    for s in ['A', 'I']:
        # Load in the source waveform
        source, fs = librosa.load(os.path.join(subdirectory,
                                               '{}.wav'.format(s)),
                                  sr=fs)
        # Align the source to the mixture
        # Small resampling rate range because it's all digital
        mix, source_aligned = estimate.align(mix,
                                             source,
                                             fs,
                                             correlation_size=4.,
                                             max_global_offset=0.,
                                             max_skew=.0001,
                                             hop=1.,
                                             max_local_offset=.01)
        # Estimate the filter
        source_filtered = estimate.reverse_channel(mix, source_aligned)
        # Write out the aligned and filtered versions
        librosa.output.write_wav(
            os.path.join(subdirectory, '{}-aligned.wav'.format(s)),
            source_aligned, fs)
        librosa.output.write_wav(
            os.path.join(subdirectory, '{}-aligned-filtered.wav'.format(s)),
            source_filtered, fs)

# <markdowncell>
Beispiel #4
0
# Path to .wav files ripped from CDs...
data_directory = '../Dataset/Digital/'
# ... with subdirectories 1, 2, 3... each for a different song
subdirectories = [os.path.join(data_directory, d) for d in os.listdir(data_directory)]
for subdirectory in [d for d in subdirectories if os.path.isdir(d)]:
    print 'Processing file {}'.format(subdirectory)
    # Load in the full mixture
    mix, fs = librosa.load(os.path.join(subdirectory, 'M.wav'), sr=None)
    # Perform timing/channel estimation for both a cappella ('A' files) and instrumental ('I' files)
    for s in ['A', 'I']:
        # Load in the source waveform
        source, fs = librosa.load(os.path.join(subdirectory, '{}.wav'.format(s)), sr=fs)
        # Align the source to the mixture
        # Small resampling rate range because it's all digital
        mix, source_aligned = estimate.align(mix, source, fs, correlation_size=4., max_global_offset=0.,
                                             max_skew=.0001, hop=1., max_local_offset=.01)
        # Estimate the filter
        source_filtered = estimate.reverse_channel(mix, source_aligned)
        # Write out the aligned and filtered versions
        librosa.output.write_wav(os.path.join(subdirectory, '{}-aligned.wav'.format(s)), source_aligned, fs)
        librosa.output.write_wav(os.path.join(subdirectory, '{}-aligned-filtered.wav'.format(s)), source_filtered, fs)

# <markdowncell>

# ## Step 2: Compute SDR
# For each song, we compute the SDR of $m - c$, $m - c_\mathcal{R}$ and $m - c_\mathcal{F}$ relative to $s$ where we consider both the case when $c$ is the a cappella and $s$ is the instrumental and the case where $c$ is the instrumental and $s$ is the a cappella.

# <codecell>

def trim(signals, trim_length):
    '''