def __init__(self, cfg):

        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        self.dtype = torch.float32
        self.eps = 1e-4

        self.stft_module = STFTModule(cfg['stft_params'], self.device)
        self.train_data_num = cfg['train_data_num']
        self.valid_data_num = cfg['valid_data_num']
        self.sample_len = cfg['sample_len']
        self.epoch_num = cfg['epoch_num']
        self.train_batch_size = cfg['train_batch_size']
        self.valid_batch_size = cfg['valid_batch_size']

        self.train_dataset = DSD100Dataset(data_num=self.train_data_num,
                                           sample_len=self.sample_len,
                                           folder_type='train')
        self.valid_dataset = DSD100Dataset(data_num=self.valid_data_num,
                                           sample_len=self.sample_len,
                                           folder_type='validation')

        self.train_data_loader = FastDataLoader(
            self.train_dataset, batch_size=self.train_batch_size, shuffle=True)
        self.valid_data_loader = FastDataLoader(
            self.valid_dataset, batch_size=self.valid_batch_size, shuffle=True)
        self.model = UNet().to(self.device)
        self.criterion = MSE()
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-3)
        self.save_path = 'results/model/dsd_unet_config_1/'
    def __init__(self, cfg):
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        self.dtype = torch.float32
        self.eps = 1e-4
        self.eval_path = cfg['eval_path']

        self.model = CNNOpenUnmix_p2(cfg['dnn_cfg']).to(self.device)
        self.model.eval()
        self.model.load_state_dict(
            torch.load(self.eval_path, map_location=self.device))

        self.stft_module = STFTModule(cfg['stft_params'], self.device)
        self.stft_module_ex2 = STFTModule(cfg['stft_params_ex2'], self.device)

        self.test_data_num = cfg['test_data_num']
        self.test_batch_size = cfg['test_batch_size']
        self.sample_len = cfg['sample_len']

        self.test_dataset = VoicebankDemandDataset(data_num=self.test_data_num,
                                                   sample_len=self.sample_len,
                                                   folder_type='test',
                                                   shuffle=False)

        self.test_data_loader = FastDataLoader(self.test_dataset,
                                               batch_size=self.test_batch_size,
                                               shuffle=False)

        self.stoi_list = np.array([])
        self.pesq_list = np.array([])
        self.si_sdr_list = np.array([])
        self.si_sdr_improve_list = np.array([])
    def __init__(self, cfg):
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        self.dtype = torch.float32
        self.eps = 1e-4
        self.eval_path = cfg['eval_path']

        self.model = UNet().to(self.device)
        self.model.eval()
        self.model.load_state_dict(
            torch.load(self.eval_path, map_location=self.device))

        self.stft_module = STFTModule(
            cfg['stft_params'],
            self.device,
        )
        self.test_data_num = cfg['test_data_num']
        self.test_batch_size = cfg['test_batch_size']
        self.sample_len = cfg['sample_len']
        self.test_dataset = DSD100Dataset(data_num=self.test_data_num,
                                          sample_len=self.sample_len,
                                          folder_type='test',
                                          shuffle=False)
        self.test_data_loader = FastDataLoader(self.test_dataset,
                                               batch_size=self.test_batch_size,
                                               shuffle=False)

        self.sdr_list = np.array([])
        self.sar_list = np.array([])
        self.sir_list = np.array([])
    def __init__(self, cfg):

        self.device = torch.device(
            'cuda:0' if torch.cuda.is_available() else 'cpu')
        self.dtype = torch.float32
        self.eps = 1e-8

        self.stft_module = STFTModule(cfg['stft_params'], self.device)
        self.stft_module_ex1 = STFTModule(cfg['stft_params_ex1'], self.device)

        self.train_data_num = cfg['train_data_num']
        self.valid_data_num = cfg['valid_data_num']
        self.sample_len = cfg['sample_len']
        self.epoch_num = cfg['epoch_num']
        self.train_batch_size = cfg['train_batch_size']
        self.valid_batch_size = cfg['valid_batch_size']

        self.train_full_data_num = cfg['train_full_data_num']
        self.valid_full_data_num = cfg['valid_full_data_num']
        self.save_path = cfg['save_path']

        self.train_dataset = VoicebankDemandDataset(
            data_num=self.train_data_num,
            full_data_num=self.train_full_data_num,
            sample_len=self.sample_len,
            folder_type='train',
            shuffle=True,
            device=self.device,
            augmentation=True)

        self.valid_dataset = VoicebankDemandDataset(
            data_num=self.valid_data_num,
            full_data_num=self.valid_full_data_num,
            sample_len=self.sample_len,
            folder_type='validation',
            shuffle=True,
            device=self.device,
            augmentation=False)

        self.train_data_loader = FastDataLoader(
            self.train_dataset, batch_size=self.train_batch_size, shuffle=True)

        self.valid_data_loader = FastDataLoader(
            self.valid_dataset, batch_size=self.valid_batch_size, shuffle=True)

        self.model = CNNOpenUnmix_p1(cfg['dnn_cfg']).to(self.device)
        self.criterion = Clip_SDR()
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-3)
        self.early_stopping = EarlyStopping(patience=10)
示例#5
0
 def __init__(self, cfg):
     
     self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
     self.dtype= torch.float32
     self.eps = 1e-4
     
     self.stft_module = STFTModule(cfg['stft_params'])
     self.inst_num = cfg['inst_num']
     self.train_data_num = cfg['train_data_num']
     self.valid_data_num = cfg['valid_data_num']
     self.sample_len = cfg['sample_len']
     self.epoch_num = cfg['epoch_num']
     self.train_batch_size = cfg['train_batch_size']
     self.valid_batch_size = cfg['valid_batch_size']
     
     self.train_dataset = SlakhDataset(inst_num=self.inst_num, data_num=self.train_data_num, sample_len=self.sample_len, folder_type='train')
     self.valid_dataset = SlakhDataset(inst_num=self.inst_num, data_num=self.valid_data_num, sample_len=self.sample_len, folder_type='validation')
     
     self.train_data_loader = torch.utils.data.DataLoader(self.train_dataset, batch_size=self.train_batch_size, shuffle=True)
     self.valid_data_loader = torch.utils.data.DataLoader(self.valid_dataset, batch_size=self.valid_batch_size, shuffle=True)
     self.model = UNet().to(self.device)
     self.criterion = MSE()
     self.optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-3)
     self.save_path = 'results/model/inst_num_{0}/'.format(self.inst_num)
class FeatExtractorBlstm_p1_Runner():
    def __init__(self, cfg):

        self.device = torch.device(
            'cuda:0' if torch.cuda.is_available() else 'cpu')
        self.dtype = torch.float32
        self.eps = 1e-8

        self.stft_module = STFTModule(cfg['stft_params'], self.device)
        self.stft_module_ex1 = STFTModule(cfg['stft_params_ex1'], self.device)

        self.train_data_num = cfg['train_data_num']
        self.valid_data_num = cfg['valid_data_num']
        self.sample_len = cfg['sample_len']
        self.epoch_num = cfg['epoch_num']
        self.train_batch_size = cfg['train_batch_size']
        self.valid_batch_size = cfg['valid_batch_size']

        self.train_full_data_num = cfg['train_full_data_num']
        self.valid_full_data_num = cfg['valid_full_data_num']
        self.save_path = cfg['save_path']

        self.train_dataset = VoicebankDemandDataset(
            data_num=self.train_data_num,
            full_data_num=self.train_full_data_num,
            sample_len=self.sample_len,
            folder_type='train',
            shuffle=True,
            device=self.device,
            augmentation=True)

        self.valid_dataset = VoicebankDemandDataset(
            data_num=self.valid_data_num,
            full_data_num=self.valid_full_data_num,
            sample_len=self.sample_len,
            folder_type='validation',
            shuffle=True,
            device=self.device,
            augmentation=False)

        self.train_data_loader = FastDataLoader(
            self.train_dataset, batch_size=self.train_batch_size, shuffle=True)

        self.valid_data_loader = FastDataLoader(
            self.valid_dataset, batch_size=self.valid_batch_size, shuffle=True)

        self.model = CNNOpenUnmix_p1(cfg['dnn_cfg']).to(self.device)
        self.criterion = Clip_SDR()
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-3)
        self.early_stopping = EarlyStopping(patience=10)

    def _preprocess(self, noisy, clean):
        with torch.no_grad():
            noisy_spec = self.stft_module.stft(noisy, pad=False)
            noisy_amp_spec = taF.complex_norm(noisy_spec)
            noisy_mag_spec = self.stft_module.to_normalize_mag(noisy_amp_spec)

            clean_spec = self.stft_module.stft(clean, pad=False)
            clean_amp_spec = taF.complex_norm(clean_spec)

            #ex1
            ex1_noisy_spec = self.stft_module_ex1.stft(noisy, pad=False)
            ex1_noisy_amp_spec = taF.complex_norm(ex1_noisy_spec)
            ex1_noisy_mag_spec = self.stft_module_ex1.to_normalize_mag(
                ex1_noisy_amp_spec)

            return noisy_mag_spec, ex1_noisy_mag_spec, clean_amp_spec, noisy_amp_spec, noisy_spec, clean_spec

    def _run(self, mode=None, data_loader=None):
        running_loss = 0
        for i, (noisy, clean) in enumerate(data_loader):
            noisy = noisy.to(self.dtype).to(self.device)
            clean = clean.to(self.dtype).to(self.device)
            noisy_mag_spec, ex1_noisy_mag_spec, clean_amp_spec, noisy_amp_spec, noisy_spec, clean_spec = self._preprocess(
                noisy, clean)

            self.model.zero_grad()
            est_mask = self.model(noisy_mag_spec, ex1_noisy_mag_spec)
            est_source = noisy_spec * est_mask[..., None]

            if mode == 'train' or mode == 'validation':
                loss = self.criterion(est_source, clean_spec, self.stft_module)
                running_loss += loss.data
                if mode == 'train':
                    loss.backward()
                    self.optimizer.step()

        return (running_loss /
                (i + 1)), est_source, est_mask, noisy_amp_spec, clean_amp_spec

    def train(self):
        train_loss = np.array([])
        valid_loss = np.array([])
        print("start train")
        for epoch in range(self.epoch_num):
            # train
            print('epoch{0}'.format(epoch))
            start = time.time()
            self.model.train()
            tmp_train_loss, _, _, _, _ = self._run(
                mode='train', data_loader=self.train_data_loader)
            train_loss = np.append(train_loss,
                                   tmp_train_loss.cpu().clone().numpy())

            self.model.eval()
            with torch.no_grad():
                tmp_valid_loss, est_source, est_mask, noisy_amp_spec, clean_amp_spec = self._run(
                    mode='validation', data_loader=self.valid_data_loader)
                valid_loss = np.append(valid_loss,
                                       tmp_valid_loss.cpu().clone().numpy())

            if (epoch + 1) % 10 == 0:
                plot_time = time.time()
                est_source = taF.complex_norm(est_source)
                show_TF_domein_result(train_loss, valid_loss,
                                      noisy_amp_spec[0, :, :],
                                      est_mask[0, :, :], est_source[0, :, :],
                                      clean_amp_spec[0, :, :])
                print('plot_time:', time.time() - plot_time)
                torch.save(self.model.state_dict(),
                           self.save_path + 'u_net{0}.ckpt'.format(epoch + 1))

            end = time.time()
            print('----excute time: {0}'.format(end - start))
class DemandCNNOpenUnmix_p2_Tester():
    def __init__(self, cfg):
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        self.dtype = torch.float32
        self.eps = 1e-4
        self.eval_path = cfg['eval_path']

        self.model = CNNOpenUnmix_p2(cfg['dnn_cfg']).to(self.device)
        self.model.eval()
        self.model.load_state_dict(
            torch.load(self.eval_path, map_location=self.device))

        self.stft_module = STFTModule(cfg['stft_params'], self.device)
        self.stft_module_ex2 = STFTModule(cfg['stft_params_ex2'], self.device)

        self.test_data_num = cfg['test_data_num']
        self.test_batch_size = cfg['test_batch_size']
        self.sample_len = cfg['sample_len']

        self.test_dataset = VoicebankDemandDataset(data_num=self.test_data_num,
                                                   sample_len=self.sample_len,
                                                   folder_type='test',
                                                   shuffle=False)

        self.test_data_loader = FastDataLoader(self.test_dataset,
                                               batch_size=self.test_batch_size,
                                               shuffle=False)

        self.stoi_list = np.array([])
        self.pesq_list = np.array([])
        self.si_sdr_list = np.array([])
        self.si_sdr_improve_list = np.array([])

    def _preprocess(self, noisy):
        with torch.no_grad():
            noisy_spec = self.stft_module.stft(noisy, pad=False)
            noisy_amp_spec = taF.complex_norm(noisy_spec)
            noisy_mag_spec = self.stft_module.to_normalize_mag(noisy_amp_spec)

            #ex2
            ex2_noisy_spec = self.stft_module_ex2.stft(noisy, pad=False)
            ex2_noisy_amp_spec = taF.complex_norm(ex2_noisy_spec)
            ex2_noisy_mag_spec = self.stft_module_ex2.to_normalize_mag(
                ex2_noisy_amp_spec)

            return noisy_mag_spec, ex2_noisy_mag_spec, noisy_spec

    def test(self, mode='test'):
        with torch.no_grad():
            for i, (noisy, clean) in enumerate(self.test_data_loader):
                start = time.time()
                noisy = noisy.to(self.dtype).to(self.device)
                clean = clean.to(self.dtype).to(self.device)
                siglen = noisy.shape[1]
                noisy_mag_spec, ex2_noisy_mag_spec, noisy_spec = self._preprocess(
                    noisy)
                est_mask = self.model(noisy_mag_spec, ex2_noisy_mag_spec)
                est_source = noisy_spec * est_mask[..., None]
                est_wave = self.stft_module.istft(est_source, siglen)
                print(est_wave.shape)
                est_wave = est_wave.squeeze(0)
                clean = clean.squeeze(0)
                noisy = noisy.squeeze(0)

                pesq_val, stoi_val, si_sdr_val, si_sdr_improve = sp_enhance_evals(
                    est_wave, clean, noisy, fs=16000)
                self.pesq_list = np.append(self.pesq_list, pesq_val)
                self.stoi_list = np.append(self.stoi_list, stoi_val)
                self.si_sdr_list = np.append(self.si_sdr_list, si_sdr_val)
                self.si_sdr_improve_list = np.append(self.si_sdr_improve_list,
                                                     si_sdr_improve)
                print('test time:', time.time() - start)

            print('pesq mean:', np.mean(self.pesq_list))
            print('stoi mean:', np.mean(self.stoi_list))
            print('sdr mean:', np.mean(self.si_sdr_list))
            print('sdr improve mena:', np.mean(self.si_sdr_improve_list))
示例#8
0
class UNetRunner():
    def __init__(self, cfg):
        
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.dtype= torch.float32
        self.eps = 1e-4
        
        self.stft_module = STFTModule(cfg['stft_params'])
        self.inst_num = cfg['inst_num']
        self.train_data_num = cfg['train_data_num']
        self.valid_data_num = cfg['valid_data_num']
        self.sample_len = cfg['sample_len']
        self.epoch_num = cfg['epoch_num']
        self.train_batch_size = cfg['train_batch_size']
        self.valid_batch_size = cfg['valid_batch_size']
        
        self.train_dataset = SlakhDataset(inst_num=self.inst_num, data_num=self.train_data_num, sample_len=self.sample_len, folder_type='train')
        self.valid_dataset = SlakhDataset(inst_num=self.inst_num, data_num=self.valid_data_num, sample_len=self.sample_len, folder_type='validation')
        
        self.train_data_loader = torch.utils.data.DataLoader(self.train_dataset, batch_size=self.train_batch_size, shuffle=True)
        self.valid_data_loader = torch.utils.data.DataLoader(self.valid_dataset, batch_size=self.valid_batch_size, shuffle=True)
        self.model = UNet().to(self.device)
        self.criterion = MSE()
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-3)
        self.save_path = 'results/model/inst_num_{0}/'.format(self.inst_num)
        
    def _preprocess(self, mixture, true_sources):
        mix_spec = self.stft_module.stft(mixture, pad=True)
        mix_phase = mix_spec[:,:,1]
        mix_amp_spec = taF.complex_norm(mix_spec)
        mix_amp_spec = mix_amp_spec[:,1:,:]
        mix_mag_spec = torch.log10(mix_amp_spec + self.eps)
        mix_mag_spec = mix_mag_spec[:,1:,:]
        
        true_sources_spec = self.stft_module.stft_3D(true_sources, pad=True)
        true_sources_amp_spec = taF.complex_norm(true_sources_spec)
        true_sources_amp_spec = true_sources_amp_spec[:,:,1:,:]
        
        true_res = mixture.unsqueeze(1).repeat(1, self.inst_num, 1) - true_sources
        true_res_spec = self.stft_module.stft_3D(true_res, pad=True)
        true_res_amp_spec = taF.complex_norm(true_res_spec)
        return mix_mag_spec, true_sources_amp_spec, true_res_amp_spec, mix_phase, mix_amp_spec
        
    def _postporcess(self, est_sources):
        pass
        
    def _run(self, model, criterion, data_loader, batch_size, mode=None):
        running_loss = 0
        for i, (mixture, sources, _) in enumerate(data_loader):
            mixture = mixture.to(self.dtype).to(self.device)
            sources = sources.to(self.dtype).to(self.device)
            mix_mag_spec, true_sources_amp_spec, true_res_amp_spec, _, mix_amp_spec = self._preprocess(mixture, sources)
            
            model.zero_grad()
            est_mask = model(mix_mag_spec.unsqueeze(1))
            est_sources = mix_amp_spec.unsqueeze(1) * est_mask
            
            if mode == 'train' or mode == 'validation':
                loss = 10 * criterion(est_sources, true_sources_amp_spec, self.inst_num)
                running_loss += loss.data
                if mode == 'train':
                    loss.backward()
                    self.optimizer.step()
            
        return (running_loss / (i+1)), est_sources, est_mask, mix_amp_spec
    
    def train(self):
        train_loss = np.array([])
        valid_loss = np.array([])
        print("start train")
        for epoch in range(self.epoch_num):
            # train
            print('epoch{0}'.format(epoch))
            start = time.time()
            self.model.train()
            tmp_train_loss, _, _, _ = self._run(self.model, self.criterion, self.train_data_loader, self.train_batch_size, mode='train')
            train_loss = np.append(train_loss, tmp_train_loss.cpu().clone().numpy())
            # validation
            self.model.eval()
            with torch.no_grad():
               tmp_valid_loss, est_source, est_mask, mix_amp_spec = self._run(self.model, self.criterion, self.valid_data_loader, self.valid_batch_size, mode='validation')
               valid_loss = np.append(valid_loss, tmp_valid_loss.cpu().clone().numpy())
                 
            if (epoch + 1) % 10 == 0:
                torch.save(self.model.state_dict(), self.save_path + 'u_net{0}.ckpt'.format(epoch + 1))
            
            end = time.time()
            print('----excute time: {0}'.format(end - start))
            show_TF_domein_result(valid_loss, mix_amp_spec[0,:,:], est_mask[0,0,:,:], est_source[0,0,:,:])
class DSDOpenUnmixTester():
    def __init__(self, cfg):
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.dtype= torch.float32
        self.eps = 1e-4
        self.eval_path = cfg['eval_path']
        
        self.model =  OpenUnmix(cfg['dnn_cfg']).to(self.device)
        self.model.eval()
        self.model.load_state_dict(torch.load(self.eval_path, map_location=self.device))
        
        self.stft_module = STFTModule(cfg['stft_params'], self.device)
        
        self.test_data_num = cfg['test_data_num']
        self.test_batch_size = cfg['test_batch_size']
        self.sample_len = cfg['sample_len']

        self.test_dataset = DSD100Dataset(data_num=self.test_data_num, 
                                          sample_len=self.sample_len, 
                                          folder_type='Test', 
                                          shuffle=False,
                                          device=self.device,
                                          augmentation=False)
        
        self.test_data_loader =  FastDataLoader(self.test_dataset, batch_size=self.test_batch_size, shuffle=False)
        
        self.sdr_list = np.array([])
        self.sir_list = np.array([])
        self.sar_list = np.array([])
        self.si_sdr_list = np.array([])
        self.si_sdr_improve_list = np.array([])
    
    def _preprocess(self, noisy):
        with torch.no_grad():
            noisy_spec = self.stft_module.stft(noisy, pad=None)
            noisy_amp_spec = taF.complex_norm(noisy_spec)
            noisy_mag_spec = self.stft_module.to_normalize_mag(noisy_amp_spec)
            
            return noisy_mag_spec, noisy_spec
    
            
    def test(self, mode='test'):
        with torch.no_grad():
            for i, (noisy, _, _, _, clean) in enumerate(self.test_data_loader):
                start = time.time()
                noisy = noisy.to(self.dtype).to(self.device)
                clean = clean.to(self.dtype).to(self.device)
                siglen = noisy.shape[1]
                noisy_mag_spec, noisy_spec = self._preprocess(noisy)
                est_mask = self.model(noisy_mag_spec)
                est_source = noisy_spec * est_mask[...,None]
                est_wave = self.stft_module.istft(est_source, siglen)
                print(est_wave.shape)
                est_wave = est_wave.squeeze(0)
                clean = clean.squeeze(0)
                noisy = noisy.squeeze(0)
                                
                sdr, sir, sar, si_sdr, si_sdr_improve = mss_evals(est_wave, clean, noisy)
                self.sdr_list = np.append(self.sdr_list, sdr)
                self.sir_list = np.append(self.sir_list, sir)
                self.sar_list = np.append(self.sar_list, sar)
                self.si_sdr_list = np.append(self.si_sdr_list, si_sdr)
                self.si_sdr_improve_list = np.append(self.si_sdr_improve_list, si_sdr_improve)
                print('test time:', time.time() - start)
                
            print('sdr mean:', np.mean(self.sdr_list))
            print('sir mean:', np.mean(self.sir_list))
            print('sar mean:', np.mean(self.sar_list))
            print('si-sdr mean:', np.mean(self.si_sdr_list))
            print('sdr improve mean:', np.mean(self.si_sdr_improve_list))
class UNetRunner():
    def __init__(self, cfg):

        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        self.dtype = torch.float32
        self.eps = 1e-4

        self.stft_module = STFTModule(cfg['stft_params'], self.device)
        self.train_data_num = cfg['train_data_num']
        self.valid_data_num = cfg['valid_data_num']
        self.sample_len = cfg['sample_len']
        self.epoch_num = cfg['epoch_num']
        self.train_batch_size = cfg['train_batch_size']
        self.valid_batch_size = cfg['valid_batch_size']

        self.train_dataset = DSD100Dataset(data_num=self.train_data_num,
                                           sample_len=self.sample_len,
                                           folder_type='train')
        self.valid_dataset = DSD100Dataset(data_num=self.valid_data_num,
                                           sample_len=self.sample_len,
                                           folder_type='validation')

        self.train_data_loader = FastDataLoader(
            self.train_dataset, batch_size=self.train_batch_size, shuffle=True)
        self.valid_data_loader = FastDataLoader(
            self.valid_dataset, batch_size=self.valid_batch_size, shuffle=True)
        self.model = UNet().to(self.device)
        self.criterion = MSE()
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-3)
        self.save_path = 'results/model/dsd_unet_config_1/'

    def _preprocess(self, mixture, true):
        with torch.no_grad():
            mix_spec = self.stft_module.stft(mixture, pad=True)

            mix_amp_spec = taF.complex_norm(mix_spec)
            mix_amp_spec = mix_amp_spec[:, 1:, :]
            mix_mag_spec = torch.log10(mix_amp_spec + self.eps)

            true_spec = self.stft_module.stft(true, pad=True)
            true_amp_spec = taF.complex_norm(true_spec)
            true_amp_spec = true_amp_spec[:, 1:, :]

        return mix_mag_spec, true_amp_spec, mix_amp_spec

    def _postporcess(self, est_sources):
        pass

    def _run(self, mode=None, data_loader=None):
        running_loss = 0
        for i, (mixture, _, _, _, vocals) in enumerate(data_loader):
            mixture = mixture.to(self.dtype).to(self.device)
            true = vocals.to(self.dtype).to(self.device)
            mix_mag_spec, true_amp_spec, mix_amp_spec = self._preprocess(
                mixture, true)
            self.model.zero_grad()
            est_mask = self.model(mix_mag_spec.unsqueeze(1))
            est_source = mix_amp_spec.unsqueeze(1) * est_mask

            if mode == 'train' or mode == 'validation':
                loss = 10 * self.criterion(est_source, true_amp_spec)
                running_loss += loss.data
                if mode == 'train':
                    loss.backward()
                    self.optimizer.step()

        return (running_loss /
                (i + 1)), est_source, est_mask, mix_amp_spec, true_amp_spec

    def train(self):
        train_loss = np.array([])
        print("start train")
        for epoch in range(self.epoch_num):
            # train
            print('epoch{0}'.format(epoch))
            start = time.time()
            self.model.train()
            tmp_train_loss, est_source, est_mask, mix_amp_spec, true_amp_spec = self._run(
                mode='train', data_loader=self.train_data_loader)
            train_loss = np.append(train_loss,
                                   tmp_train_loss.cpu().clone().numpy())

            if (epoch + 1) % 10 == 0:
                plot_time = time.time()
                show_TF_domein_result(train_loss, mix_amp_spec[0, :, :],
                                      est_mask[0,
                                               0, :, :], est_source[0,
                                                                    0, :, :],
                                      true_amp_spec[0, :, :])
                print('plot_time:', time.time() - plot_time)
                torch.save(self.model.state_dict(),
                           self.save_path + 'u_net{0}.ckpt'.format(epoch + 1))

            end = time.time()
            print('----excute time: {0}'.format(end - start))
示例#11
0
class UNet_pp_Tester():
    def __init__(self, cfg):
        self.device = torch.device(
            'cuda:0' if torch.cuda.is_available() else 'cpu')
        self.dtype = torch.float32
        self.eps = 1e-4
        self.eval_path = cfg['eval_path']

        self.model = UNet_pp().to(self.device)
        self.model.eval()
        self.model.load_state_dict(
            torch.load(self.eval_path, map_location=self.device))

        self.stft_module = STFTModule(cfg['stft_params'], self.device)
        self.stft_module_ex1 = STFTModule(cfg['stft_params_ex1'], self.device)
        self.stft_module_ex2 = STFTModule(cfg['stft_params_ex2'], self.device)

        self.test_data_num = cfg['test_data_num']
        self.test_batch_size = cfg['test_batch_size']
        self.sample_len = cfg['sample_len']
        self.test_dataset = DSD100Dataset(data_num=self.test_data_num,
                                          sample_len=self.sample_len,
                                          folder_type='test',
                                          device=self.device,
                                          shuffle=False)
        self.test_data_loader = FastDataLoader(self.test_dataset,
                                               batch_size=self.test_batch_size,
                                               shuffle=False)

        self.sdr_list = np.array([])
        self.sar_list = np.array([])
        self.sir_list = np.array([])

    def _preprocess(self, mixture, true):
        with torch.no_grad():
            mix_spec = self.stft_module.stft(mixture, pad=True)
            mix_amp_spec = taF.complex_norm(mix_spec)
            mix_amp_spec = mix_amp_spec[:, 1:, :]
            mix_mag_spec = torch.log10(mix_amp_spec + self.eps)

            #ex1
            ex1_mix_spec = self.stft_module_ex1.stft(mixture, pad=True)
            ex1_mix_amp_spec = taF.complex_norm(ex1_mix_spec)
            ex1_mix_mag_spec = torch.log10(ex1_mix_amp_spec + self.eps)
            ex1_mix_mag_spec = ex1_mix_mag_spec[:, 1:, 1:513]

            #ex2
            ex2_mix_spec = self.stft_module_ex2.stft(mixture, pad=True)
            ex2_mix_amp_spec = taF.complex_norm(ex2_mix_spec)
            ex2_mix_mag_spec = torch.log10(ex2_mix_amp_spec + self.eps)
            ex2_mix_mag_spec = ex2_mix_mag_spec[:, 1:, :]
            batch_size, f_size, t_size = ex2_mix_mag_spec.shape
            pad_ex2_mix_mag_spec = torch.zeros((batch_size, f_size, 128),
                                               dtype=self.dtype,
                                               device=self.device)
            pad_ex2_mix_mag_spec[:, :1024, :127] = ex2_mix_mag_spec[:, :, :]

            return mix_mag_spec, ex1_mix_mag_spec, pad_ex2_mix_mag_spec, mix_spec

    def _postprocess(self, x):
        x = x.squeeze(1)
        batch_size, f_size, t_size = x.shape
        pad_x = torch.zeros((batch_size, f_size + 2, t_size),
                            dtype=self.dtype,
                            device=self.device)
        pad_x[:, 1:-1, :] = x[:, :, :]
        return pad_x

    def test(self, mode='test'):
        with torch.no_grad():
            for i, (mixture, _, _, _,
                    vocals) in enumerate(self.test_data_loader):
                start = time.time()
                mixture = mixture.squeeze(0).to(self.dtype).to(self.device)
                true = vocals.squeeze(0).to(self.dtype).to(self.device)

                mix_mag_spec, ex1_mix_mag_spec, ex2_mix_mag_spec, mix_spec = self._preprocess(
                    mixture, true)
                est_mask = self.model(mix_mag_spec.unsqueeze(1),
                                      ex1_mix_mag_spec.unsqueeze(1),
                                      ex2_mix_mag_spec.unsqueeze(1))
                est_mask = self._postprocess(est_mask)
                est_source = mix_spec * est_mask[..., None]
                est_wave = self.stft_module.istft(est_source)

                est_wave = est_wave.flatten()
                mixture = mixture.flatten()
                true = true.flatten()
                true_accompany = mixture - true
                est_accompany = mixture - est_wave
                sdr, sir, sar = mss_evals(est_wave, est_accompany, true,
                                          true_accompany)
                self.sdr_list = np.append(self.sdr_list, sdr)
                self.sar_list = np.append(self.sar_list, sar)
                self.sir_list = np.append(self.sir_list, sir)
                print('test time:', time.time() - start)

            print('sdr mean:', np.mean(self.sdr_list))
            print('sir mean:', np.mean(self.sir_list))
            print('sar mean:', np.mean(self.sar_list))