示例#1
0
def test(checkpoint_path, model, config):
    test_dataset = myDataset(
        '/search/odin/wts/my-pytorch-try/text_cnn_rnn/data/cnews/cnews.test.txt',
        config)
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=config.batch_size,
                             shuffle=True,
                             num_workers=2)

    model.load_state_dict(torch.load(checkpoint_path + '/cnn_model.pkl'))
    model.eval()
    loss_total = 0
    predict_all = np.array([], dtype=int)
    labels_all = np.array([], dtype=int)
    with torch.no_grad():
        for inputs, labels in test_loader:
            inputs = inputs.to(config.device)
            labels = labels.to(config.device)
            outputs = model(inputs)

            loss = F.cross_entropy(outputs, labels)
            loss_total += loss
            labels = labels.data.cpu().numpy()
            predic = torch.max(outputs.data, 1)[1].cpu().numpy()
            labels_all = np.append(labels_all, labels)
            predict_all = np.append(predict_all, predic)

    acc = metrics.accuracy_score(labels_all, predict_all)

    report = metrics.classification_report(labels_all,
                                           predict_all,
                                           target_names=config.class_list,
                                           digits=4)
    confusion = metrics.confusion_matrix(labels_all, predict_all)

    print(acc)
    print(report)
    print(confusion)
    return acc, loss_total / len(labels_all), report, confusion
示例#2
0
                loss = loss_rec - current_alpha * loss_clf
                reset_grad([self.Encoder, self.Decoder])
                loss.backward()
                grad_clip([self.Encoder, self.Decoder], self.hps.max_grad_norm)
                self.ae_opt.step()
                info = {
                    f'{flag}/loss_rec': loss_rec.item(),
                    f'{flag}/G_loss_clf': loss_clf.item(),
                    f'{flag}/alpha': current_alpha,
                    f'{flag}/G_acc': acc,
                }
                slot_value = (iteration + 1, hps.iters) + tuple(
                    [value for value in info.values()])
                log = 'G:[%06d/%06d], loss_rec=%.3f, loss_clf=%.2f, alpha=%.2e, acc=%.2f'
                print(log % slot_value)
                if iteration % 100 == 0:
                    for tag, value in info.items():
                        self.logger.scalar_summary(tag, value, iteration + 1)
                if iteration % 1000 == 0 or iteration + 1 == hps.iters:
                    self.save_model(model_path, iteration)


if __name__ == '__main__':
    hps = Hps()
    hps.load('./hps/v7.json')
    hps_tuple = hps.get_tuple()
    dataset = myDataset('/storage/raw_feature/voice_conversion/vctk/vctk.h5',\
            '/storage/raw_feature/voice_conversion/vctk/64_513_2000k.json')
    data_loader = DataLoader(dataset)
    solver = Solver(hps_tuple, data_loader)
from utils import myDataset
from solver import Solver
import argparse

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--train', default=True, action='store_true')
    parser.add_argument('--test', default=False, action='store_true')
    parser.add_argument('--load_model', default=False, action='store_true')
    parser.add_argument('-flag', default='train')
    parser.add_argument('-hps_path', default='./hps/v7.json')
    parser.add_argument('-load_model_path', default='/storage/model/voice_conversion/'
            'pretrain_model.pkl-19999')
    parser.add_argument('-dataset_path', default='/storage/raw_feature/voice_conversion/vctk/vctk.h5')
    parser.add_argument('-index_path', default='/storage/raw_feature/voice_conversion/vctk/128_513_2000k.json')
    parser.add_argument('-output_model_path', default='/storage/model/voice_conversion/model.pkl')
    args = parser.parse_args()
    hps = Hps()
    hps.load(args.hps_path)
    hps_tuple = hps.get_tuple()
    dataset = myDataset(args.dataset_path,
            args.index_path,
            seg_len=hps_tuple.seg_len)
    data_loader = DataLoader(dataset)

    solver = Solver(hps_tuple, data_loader)
    if args.load_model:
        solver.load_model(args.load_model_path)
    if args.train:
        solver.train(args.output_model_path, args.flag)
示例#4
0
    parser.add_argument('--train', default=True, action='store_true')
    parser.add_argument('--load_model', default=False, action='store_true')
    parser.add_argument('-flag', default='train')
    parser.add_argument('-hps_path', default='./hps/clf.json')
    parser.add_argument('-load_model_path', default='/storage/model/voice_conversion/'
            'pretrain_model.pkl-19999')
    parser.add_argument('-encoder_model_path', default='/storage/model/voice_conversion/ae/model.pkl-59999')
    parser.add_argument('-dataset_path', default='/storage/feature/voice_conversion/vctk/log_vctk.h5')
    parser.add_argument('-train_index_path', \
            default='/storage/feature/voice_conversion/vctk/128_513_2000k.json')
    parser.add_argument('-valid_index_path', \
            default='/storage/feature/voice_conversion/vctk/128_513_2000k.json')
    parser.add_argument('-output_model_path', default='/storage/model/voice_conversion/model.pkl')
    args = parser.parse_args()
    hps = Hps()
    hps.load(args.hps_path)
    hps_tuple = hps.get_tuple()
    train_dataset = myDataset(args.dataset_path,
            args.train_index_path,
            seg_len=hps_tuple.seg_len)
    valid_dataset = myDataset(args.dataset_path,
            args.valid_index_path,
            dset='test',
            seg_len=hps_tuple.seg_len)
    data_loader = DataLoader(train_dataset)
    valid_data_loader = DataLoader(valid_dataset, batch_size=100)
    classifier = Classifier(hps_tuple, data_loader, valid_data_loader)
    classifier.load_encoder(args.encoder_model_path)
    if args.train:
        classifier.train(args.output_model_path, args.flag)
示例#5
0
                loss = loss_rec - current_alpha * loss_clf
                reset_grad([self.Encoder, self.Decoder])
                loss.backward()
                grad_clip([self.Encoder, self.Decoder], self.hps.max_grad_norm)
                self.ae_opt.step()
                info = {
                    f'{flag}/loss_rec': loss_rec.item(),
                    f'{flag}/G_loss_clf': loss_clf.item(),
                    f'{flag}/alpha': current_alpha,
                    f'{flag}/G_acc': acc,
                }
                slot_value = (iteration + 1, hps.iters) + tuple(
                    [value for value in info.values()])
                log = 'G:[%06d/%06d], loss_rec=%.3f, loss_clf=%.2f, alpha=%.2e, acc=%.2f'
                print(log % slot_value)
                if iteration % 100 == 0:
                    for tag, value in info.items():
                        self.logger.scalar_summary(tag, value, iteration + 1)
                if iteration % 1000 == 0 or iteration + 1 == hps.iters:
                    self.save_model(model_path, iteration)


if __name__ == '__main__':
    hps = Hps()
    hps.load('./hps/v7.json')
    hps_tuple = hps.get_tuple()
    dataset = myDataset('/home/daniel/Documents/voice_integrador/vctk.h5',\
            '/home/daniel/Documents/programacion/multitarget-voice-conversion-vctk/preprocess/speaker_id_by_gender.json')
    data_loader = DataLoader(dataset)
    solver = Solver(hps_tuple, data_loader)
示例#6
0
def train(checkpoint_path, model, config):
    start_time = time.time()

    model = nn.DataParallel(model, device_ids=[0, 1])

    train_dataset = myDataset(
        '/search/odin/wts/my-pytorch-try/text_cnn_rnn/data/cnews/cnews.train.txt',
        config)
    val_dataset = myDataset(
        '/search/odin/wts/my-pytorch-try/text_cnn_rnn/data/cnews/cnews.val.txt',
        config)
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=config.batch_size,
                              shuffle=True,
                              num_workers=2)
    val_loader = DataLoader(dataset=val_dataset,
                            batch_size=config.batch_size,
                            shuffle=True,
                            num_workers=2)

    model.train()
    optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)

    # 学习率指数衰减,每次epoch:学习率 = gamma * 学习率
    # scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
    total_batch = 0  # 记录进行到多少batch
    dev_best_loss = float('inf')
    dev_best_acc = 0.0
    last_improve = 0  # 记录上次验证集loss下降的batch数
    flag = False  # 记录是否很久没有效果提升
    writer = SummaryWriter(log_dir=checkpoint_path + '/log/' +
                           time.strftime('%m-%d_%H.%M', time.localtime()))

    for epoch in range(config.num_epochs):
        for i, data in enumerate(val_loader, 0):  #python enumerate用法总结
            # get the inputs
            inputs, labels = data
            inputs = inputs.to(config.device)
            labels = labels.to(config.device)

            outputs = model(inputs)
            model.zero_grad()
            loss = F.cross_entropy(outputs, labels)
            loss.backward()
            optimizer.step()

            if total_batch % 100 == 0:
                golden_tag = labels.data.cpu()
                predict = torch.max(outputs.data, 1)[1].cpu()
                train_acc = metrics.accuracy_score(golden_tag, predict)
                dev_acc, dev_loss = evaluate(config, model, val_loader)
                if dev_acc > dev_best_acc and dev_loss < dev_best_loss:
                    dev_best_acc = dev_acc
                    dev_best_loss = dev_loss
                    torch.save(model.module.state_dict(),
                               checkpoint_path + '/cnn_model.pkl')
                    last_improve = total_batch
                    improve = '*'
                else:
                    improve = ''
                time_dif = get_time_dif(start_time)
                msg = 'Epoch: {0:>6}, Iter: {1:>6},  Train Loss: {2:>5.2},  Train Acc: {3:>6.2%},  Val Loss: {4:>5.2},  Val Acc: {5:>6.2%},  Time: {6} {7}'
                print(
                    msg.format(epoch, total_batch, loss.item(), train_acc,
                               dev_loss, dev_acc, time_dif, improve))
                writer.add_scalar("loss/train", loss.item(), total_batch)
                writer.add_scalar("loss/dev", dev_loss, total_batch)
                writer.add_scalar("acc/train", train_acc, total_batch)
                writer.add_scalar("acc/dev", dev_acc, total_batch)
                model.train()
            total_batch += 1
            if total_batch - last_improve > config.require_improvement:
                # 验证集loss超过1000batch没下降,结束训练
                print("No optimization for a long time, auto-stopping...")
                flag = True
                break
        if flag:
            break
    writer.close()