Exemple #1
0
def train(epoch):
    avg_loss = 0.0
    epoch_time = 0
    progbar = Progbar(len(train_loader.dataset) // c.batch_size)
    for num_iter, batch in enumerate(train_loader):
        start_time = time.time()
        wav = batch[0].unsqueeze(1)
        mel = batch[1].transpose(1, 2)
        lens = batch[2]
        target = batch[3]
        if use_cuda:
            wav = wav.cuda()
            mel = mel.cuda()
            target = target.cuda()
        current_step = num_iter + epoch * len(train_loader) + 1
        optimizer.zero_grad()
        out = model(wav, mel)
        loss, fp, tp = criterion(out, target, lens)
        loss.backward()
        grad_norm, skip_flag = check_update(model, 5, 100)
        if skip_flag:
            optimizer.zero_grad()
            print(" | > Iteration skipped!!")
            continue
        optimizer.step()
        step_time = time.time() - start_time
        epoch_time += step_time
        # update
        progbar.update(num_iter+1, values=[('total_loss', loss.item()),
                                           ('grad_norm', grad_norm.item()),
                                           ('fp', fp),
                                           ('tp', tp)
                                          ])
        avg_loss += loss.item()
Exemple #2
0
def train(epoch):
    avg_loss = 0.0
    epoch_time = 0
    # progbar = Progbar(len(train_loader.dataset) // c.batch_size)
    num_iter_epoch = len(train_loader.dataset) // c.batch_size
    if c.ema_decay > 0:
        ema = EMA(c.ema_decay)
        for name, param in model.named_parameters():
            if param.requires_grad:
                ema.register(name, param)
    else:
        ema = None
    model.train()

    for num_iter, batch in enumerate(train_loader):
        start_time = time.time()
        wav = batch[0].unsqueeze(1)
        mel = batch[1].transpose(1, 2)
        lens = batch[2]
        target = batch[3]
        if use_cuda:
            wav = wav.cuda()
            mel = mel.cuda()
            target = target.cuda()
        current_step = num_iter + epoch * len(train_loader) + 1
        lr = lr_decay(c.lr, current_step, c.warmup_steps)
        for params_group in optimizer.param_groups:
            params_group['lr'] = lr
        optimizer.zero_grad()
        out = torch.nn.parallel.data_parallel(model, (wav, mel))
        # out = model(wav, mel)
        loss, fp, tp = criterion(out, target, lens)
        loss.backward()
        grad_norm, skip_flag = check_update(model, c.grad_clip, c.grad_top)
        if skip_flag:
            optimizer.zero_grad()
            print(" | > Iteration skipped!!")
            continue
        optimizer.step()
        # model ema
        if ema is not None:
            for name, param in model.named_parameters():
                if name in ema.shadow:
                    ema.update(name, param.data)
        step_time = time.time() - start_time
        epoch_time += step_time
        if current_step % c.print_iter == 0:
            print(
                " | > step:{}/{}\tgloba_step:{}\tloss:{:.4f}\tgrad_norm:{:.4f}\t\
                  fp:{}\ttp:{}\tlr:{:.5f}\t".format(num_iter, num_iter_epoch,
                                                    current_step, loss.item(),
                                                    grad_norm, fp, tp,
                                                    params_group['lr']))
        avg_loss += loss.item()
    avg_loss /= num_iter
    return ema, avg_loss
Exemple #3
0
def train(epoch):
    avg_loss = 0.0
    epoch_time = 0
    progbar = Progbar(len(train_loader.dataset) // c.batch_size)
    if c.ema_decay > 0:
        ema = EMA(c.ema_decay)
        for name, param in model.named_parameters():
            if param.requires_grad:
                ema.register(name, param)
    else:
        ema = None
    model.train()
    for num_iter, batch in enumerate(train_loader):
        start_time = time.time()
        wav = batch[0].unsqueeze(1)
        mel = batch[1].transpose(1, 2)
        lens = batch[2]
        target = batch[3]
        if use_cuda:
            wav = wav.cuda()
            mel = mel.cuda()
            target = target.cuda()
        current_step = num_iter + epoch * len(train_loader) + 1
        optimizer.zero_grad()
        # out = torch.nn.parallel.data_parallel(model, (wav, mel))
        out = model(wav, mel)
        loss, fp, tp = criterion(out, target, lens)
        loss.backward()
        grad_norm, skip_flag = check_update(model, 5, 100)
        if skip_flag:
            optimizer.zero_grad()
            print(" | > Iteration skipped!!")
            continue
        optimizer.step()
        # model ema
        if ema is not None:
            for name, param in model.named_parameters():
                if name in ema.shadow:
                    ema.update(name, param.data)
        step_time = time.time() - start_time
        epoch_time += step_time
        # update
        progbar.update(num_iter + 1,
                       values=[('total_loss', loss.item()),
                               ('grad_norm', grad_norm.item()), ('fp', fp),
                               ('tp', tp)])
        avg_loss += loss.item()
    return ema, avg_loss