Ejemplo n.º 1
0
right_context = int(os.environ["RIGHT_CONTEXT"])
time_dim = (left_context + right_context + 1)

# Set up training parameters
on_gpu = torch.cuda.is_available()

# Fix random seed for debugging
torch.manual_seed(1)
if on_gpu:
    torch.cuda.manual_seed_all(1)

# Set up the model and associated checkpointing directory
model = setup_model(e2e=True)
if on_gpu:
    model.cuda()
ckpt_path = best_ckpt_path(e2e=True)

# Load checkpoint
ckpt = torch.load(ckpt_path, map_location=lambda storage,loc: storage)
model.load_state_dict(ckpt["state_dict"])
model.eval()

# Set up data files
scp_dir = os.path.join(os.environ["%s_FEATS" % source_class.upper()], data_dir)
# scp_name = os.path.join(scp_dir, "feats.scp")
scp_file = os.path.join(scp_dir, "feats-norm.scp")

loader_kwargs = {"num_workers": 1, "pin_memory": True} if on_gpu else {}

dataset = KaldiEvalDataset(scp_file,
                           shuffle_utts=False)
Ejemplo n.º 2
0
torch.manual_seed(1)
if on_gpu:
    torch.cuda.manual_seed_all(1)

# Set up the model and associated checkpointing directory
model = setup_model(denoiser=True)
print(model, flush=True)

# Count number of trainable parameters
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print("Model has %d trainable parameters" % params, flush=True)

if on_gpu:
    model.cuda()
ckpt_path = best_ckpt_path(denoiser=True)

# Set up data files
decoder_classes = []
for res_str in os.environ["DECODER_CLASSES_DELIM"].split("_"):
    if len(res_str) > 0:
        decoder_classes.append(res_str)

training_scps = []
for decoder_class in decoder_classes:
    training_scp_dir = os.path.join(
        os.environ["%s_FEATS" % decoder_class.upper()], "train")
    # training_scp_name = os.path.join(training_scp_dir, "feats.scp")
    training_scp_name = os.path.join(training_scp_dir, "feats-norm.scp")
    training_scps.append(training_scp_name)
Ejemplo n.º 3
0
torch.manual_seed(1)
if on_gpu:
    torch.cuda.manual_seed_all(1)

# Set up the model and associated checkpointing directory
model = setup_model(phone=True)
print(model, flush=True)

# Count number of trainable parameters
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print("Model has %d trainable parameters" % params, flush=True)

if on_gpu:
    model.cuda()
ckpt_path = best_ckpt_path(phone=True)

# Set up data files
decoder_classes = []
for res_str in os.environ["DECODER_CLASSES_DELIM"].split("_"):
    if len(res_str) > 0:
        decoder_classes.append(res_str)

train_feat_scps = []
for decoder_class in decoder_classes:
    train_scp_dir = os.path.join(
        os.environ["%s_FEATS" % decoder_class.upper()], "train")
    # train_scp_name = os.path.join(train_scp_dir, "feats.scp")
    train_scp_name = os.path.join(train_scp_dir, "feats-norm.scp")
    train_feat_scps.append(train_scp_name)
Ejemplo n.º 4
0
torch.manual_seed(1)
if on_gpu:
    torch.cuda.manual_seed_all(1)

# Set up the model and associated checkpointing directory
model = setup_model()
print(model, flush=True)

# Count number of trainable parameters
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print("Model has %d trainable parameters" % params, flush=True)

if on_gpu:
    model.cuda()
ckpt_path = best_ckpt_path()

# Set up data files
decoder_classes = []
for res_str in os.environ["DECODER_CLASSES_DELIM"].split("_"):
    if len(res_str) > 0:
        decoder_classes.append(res_str)

training_scps = []
for decoder_class in decoder_classes:
    training_scp_dir = os.path.join(
        os.environ["%s_FEATS" % decoder_class.upper()], "train")
    # training_scp_name = os.path.join(training_scp_dir, "feats.scp")
    training_scp_name = os.path.join(training_scp_dir, "feats-norm.scp")
    training_scps.append(training_scp_name)