Exemplo n.º 1
0
 def __init__(self,
              model,
              optimizer,
              loss_func,
              train_loader,
              val_loader=None,
              scheduler=None,
              config=None):
     super().__init__(model,
                      optimizer,
                      loss_func,
                      train_loader,
                      val_loader=val_loader,
                      scheduler=scheduler,
                      config=config)
     self.enc = fb.Encoder(fb.STFTFB(**config['filterbank']))
Exemplo n.º 2
0
def make_model_and_optimizer(conf):
    """ Function to define the model and optimizer for a config dictionary.
    Args:
        conf: Dictionary containing the output of hierachical argparse.
    Returns:
        model, optimizer.
    The main goal of this function is to make reloading for resuming
    and evaluation very simple.
    """
    enc = fb.Encoder(fb.STFTFB(**conf['filterbank']))
    masker = ChimeraPP(int(enc.filterbank.n_feats_out/2), 2,
                       embedding_dim=20, n_layers=2, hidden_size=600, \
                       dropout=0, bidirectional=True)
    model = Model(enc, masker)
    optimizer = make_optimizer(model.parameters(), **conf['optim'])
    return model, optimizer
Exemplo n.º 3
0
def make_encoder_from(fb_class, config):
    enc = fb.Encoder(fb_class(**config))
    fb_dim = enc.filterbank.n_feats_out
    return enc, fb_dim
Exemplo n.º 4
0
import numpy as np
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
from torch import optim

from asteroid.filterbanks.transforms import take_mag
import asteroid.filterbanks as fb
from asteroid.data.wsj0_mix import WSJ2mixDataset, BucketingSampler, \
        collate_fn
from asteroid.masknn.blocks import SingleRNN
from asteroid.losses import PITLossWrapper, pairwise_mse
from asteroid.losses import deep_clustering_loss

EPS = torch.finfo(torch.float32).eps
enc = fb.Encoder(fb.STFTFB(256, 256, stride=64))
enc = enc.cuda()

parser = argparse.ArgumentParser()
parser.add_argument('--gpus', type=str, help='list of GPUs', default='-1')
parser.add_argument('--exp_dir',
                    default='exp/tmp',
                    help='Full path to save best validation model')

pit_loss = PITLossWrapper(pairwise_mse, mode='pairwise')


class Model(nn.Module):
    def __init__(self):
        #def __init__(self, in_chan, n_src, rnn_type = 'lstm',
        #        embedding_dim=20, n_layers=2, hidden_size=600,