Example #1
0
def test_center_freq_correction(kernel_size, stride_factor):
    spec = torch.randn(2, kernel_size + 2, 50)
    stride = None if stride_factor is None else kernel_size // stride_factor
    new_spec = transforms.centerfreq_correction(spec,
                                                kernel_size=kernel_size,
                                                stride=stride)
    assert spec.shape == new_spec.shape
    assert_allclose(transforms.mag(spec), transforms.mag(new_spec))
def test_pmsqe_pit(n_src, sample_rate):
    # Define supported STFT
    if sample_rate == 16000:
        stft = Encoder(STFTFB(kernel_size=512, n_filters=512, stride=256))
    else:
        stft = Encoder(STFTFB(kernel_size=256, n_filters=256, stride=128))
    # Usage by itself
    ref, est = torch.randn(2, n_src, 16000), torch.randn(2, n_src, 16000)
    ref_spec = transforms.mag(stft(ref))
    est_spec = transforms.mag(stft(est))
    loss_func = PITLossWrapper(SingleSrcPMSQE(sample_rate=sample_rate), pit_from="pw_pt")
    # Assert forward ok.
    loss_func(est_spec, ref_spec)
Example #3
0
 def dc_head_separate(self, x):
     """ Cluster embeddings to produce binary masks, output waveforms """
     kmeans = KMeans(n_clusters=self.masker.n_src)
     if len(x.shape) == 2:
         x = x.unsqueeze(1)
     tf_rep = self.encoder(x)
     mag_spec = mag(tf_rep)
     proj, mask_out = self.masker(mag_spec)
     active_bins = ebased_vad(mag_spec)
     active_proj = proj[active_bins.view(1, -1)]
     #
     bin_clusters = kmeans.fit_predict(active_proj.cpu().data.numpy())
     # Create binary masks
     est_mask_list = []
     for i in range(self.masker.n_src):
         # Add ones in all inactive bins in each mask.
         mask = ~active_bins
         mask[active_bins] = torch.from_numpy((bin_clusters == i)).to(mask.device)
         est_mask_list.append(mask.float())  # Need float, not bool
     # Go back to time domain
     est_masks = torch.stack(est_mask_list, dim=1)
     masked = apply_mag_mask(tf_rep, est_masks)
     wavs = pad_x_to_y(self.decoder(masked), x)
     dic_out = dict(tfrep=tf_rep, mask=mask_out, masked_tfrep=masked, proj=proj)
     return wavs, dic_out
Example #4
0
 def unpack_data(self, batch, EPS=1e-8):
     mix, sources, noise = batch
     # Take only the first channel
     mix = mix[..., 0]
     sources = sources[..., 0]
     noise = noise[..., 0]
     noise = noise.unsqueeze(1)
     # Compute magnitude spectrograms and IRM
     src_mag_spec = mag(self.model.encoder(sources))
     noise_mag_spec = mag(self.model.encoder(noise))
     noise_mag_spec = noise_mag_spec.unsqueeze(1)
     real_mask = src_mag_spec / (noise_mag_spec +
                                 src_mag_spec.sum(1, keepdim=True) + EPS)
     # Get the src idx having the maximum energy
     binary_mask = real_mask.argmax(1)
     return mix, binary_mask, real_mask
def test_pmsqe(sample_rate):
    # Define supported STFT
    if sample_rate == 16000:
        stft = Encoder(STFTFB(kernel_size=512, n_filters=512, stride=256))
    else:
        stft = Encoder(STFTFB(kernel_size=256, n_filters=256, stride=128))
    # Usage by itself
    ref, est = torch.randn(2, 1, 16000), torch.randn(2, 1, 16000)
    ref_spec = transforms.mag(stft(ref))
    est_spec = transforms.mag(stft(est))
    loss_func = SingleSrcPMSQE(sample_rate=sample_rate)
    loss_value = loss_func(est_spec, ref_spec)
    # Assert output has shape (batch,)
    assert loss_value.shape[0] == ref.shape[0]
    # Assert support for transposed inputs.
    tr_loss_value = loss_func(est_spec.transpose(1, 2), ref_spec.transpose(1, 2))
    assert_allclose(loss_value, tr_loss_value)
Example #6
0
 def unpack_data(self, batch, EPS=1e-8):
     mix, sources = batch
     # Compute magnitude spectrograms and IRM
     src_mag_spec = mag(self.model.encoder(sources))
     real_mask = src_mag_spec / (src_mag_spec.sum(1, keepdim=True) + EPS)
     # Get the src idx having the maximum energy
     binary_mask = real_mask.argmax(1)
     return mix, binary_mask, real_mask
Example #7
0
def test_griffinlim(fb_config, feed_istft, feed_angle):
    stft = Encoder(STFTFB(**fb_config))
    istft = None if not feed_istft else Decoder(STFTFB(**fb_config))
    wav = torch.randn(2, 1, 8000)
    spec = stft(wav)
    tf_mask = torch.sigmoid(torch.randn_like(spec))
    masked_spec = spec * tf_mask
    mag = transforms.mag(masked_spec, -2)
    angles = None if not feed_angle else transforms.angle(masked_spec, -2)
    griffin_lim(mag, stft, angles=angles, istft_dec=istft, n_iter=3)
Example #8
0
 def separate(self, x):
     """ Separate with mask-inference head, output waveforms """
     if len(x.shape) == 2:
         x = x.unsqueeze(1)
     tf_rep = self.encoder(x)
     proj, mask_out = self.masker(mag(tf_rep))
     masked = apply_mag_mask(tf_rep.unsqueeze(1), mask_out)
     wavs = torch_utils.pad_x_to_y(self.decoder(masked), x)
     dic_out = dict(tfrep=tf_rep, mask=mask_out, masked_tfrep=masked, proj=proj)
     return wavs, dic_out
Example #9
0
def distance(estimate, target, is_complex=True):
    """Compute the average distance in the complex plane. Makes more sense
    when the network computes a complex mask.

    Args:
        estimate (torch.Tensor): Estimate complex spectrogram.
        target (torch.Tensor): Speech target complex spectrogram.
        is_complex (bool): Whether to compute the distance in the complex or
            the magnitude space.

    Returns:
        torch.Tensor the loss value, in a tensor of size 1.
    """
    if is_complex:
        # Take the difference in the complex plane and compute the squared norm
        # of the remaining vector.
        return mag(estimate - target).pow(2).mean()
    else:
        # Compute the mean difference between magnitudes.
        return (mag(estimate) - mag(target)).pow(2).mean()
Example #10
0
def test_angle_mag_recompostion(dim):
    """ Test complex --> (mag, angle) --> complex conversions"""
    max_tested_ndim = 4
    # Random tensor shape
    tensor_shape = [random.randint(1, 10) for _ in range(max_tested_ndim)]
    # Make sure complex dimension has even shape
    tensor_shape[dim] = 2 * tensor_shape[dim]
    complex_tensor = torch.randn(tensor_shape)
    phase = transforms.angle(complex_tensor, dim=dim)
    mag = transforms.mag(complex_tensor, dim=dim)
    tensor_back = transforms.from_magphase(mag, phase, dim=dim)
    assert_allclose(complex_tensor, tensor_back)
Example #11
0
 def common_step(self, batch, batch_nb, train=False):
     inputs, targets, masks = self.unpack_data(batch)
     embeddings, est_masks = self(inputs)
     spec = mag(self.model.encoder(inputs.unsqueeze(1)))
     if self.mask_mixture:
         est_masks = est_masks * spec.unsqueeze(1)
         masks = masks * spec.unsqueeze(1)
     loss, loss_dic = self.loss_func(embeddings,
                                     targets,
                                     est_src=est_masks,
                                     target_src=masks,
                                     mix_spec=spec)
     return loss, loss_dic
Example #12
0
def test_misi(fb_config, feed_istft, feed_angle):
    stft = Encoder(STFTFB(**fb_config))
    istft = None if not feed_istft else Decoder(STFTFB(**fb_config))
    n_src = 3
    # Create mixture
    wav = torch.randn(2, 1, 8000)
    spec = stft(wav).unsqueeze(1)
    # Create n_src masks on mixture spec and apply them
    shape = list(spec.shape)
    shape[1] *= n_src
    tf_mask = torch.sigmoid(torch.randn(*shape))
    masked_specs = spec * tf_mask
    # Separate mag and angle.
    mag = transforms.mag(masked_specs, -2)
    angles = None if not feed_angle else transforms.angle(masked_specs, -2)
    est_wavs = misi(wav, mag, stft, angles=angles, istft_dec=istft, n_iter=2)
    # We actually don't know the last dim because ISTFT(STFT()) cuts the end
    assert est_wavs.shape[:-1] == (2, n_src)
Example #13
0
 def forward(self, x):
     if len(x.shape) == 2:
         x = x.unsqueeze(1)
     # Compute STFT
     tf_rep = self.encoder(x)
     # Estimate TF mask from STFT features : cat([re, im, mag])
     if self.is_complex:
         to_masker = magreim(tf_rep)
     else:
         to_masker = mag(tf_rep)
     # LSTM masker expects a feature dimension last (not like 1D conv)
     est_masks = self.masker(to_masker.transpose(1, 2)).transpose(1, 2)
     # Apply TF mask
     if self.is_complex:
         masked_tf_rep = apply_real_mask(tf_rep, est_masks)
     else:
         masked_tf_rep = apply_mag_mask(tf_rep, est_masks)
     return masked_tf_rep
Example #14
0
def test_mag(encoder_list):
    for (enc, fb_dim) in encoder_list:
        tf_rep = enc(torch.randn(2, 1, 16000))  # [batch, freq, time]
        batch, freq, time = tf_rep.shape
        mag = transforms.mag(tf_rep, dim=1)
        assert mag.shape == (batch, freq // 2, time)
Example #15
0
 def forward(self, x):
     if len(x.shape) == 2:
         x = x.unsqueeze(1)
     tf_rep = self.encoder(x)
     final_proj, mask_out = self.masker(mag(tf_rep))
     return final_proj, mask_out