Esempio n. 1
0
def train(config):
    lera.log_hyperparams({
        "title": "hw1",
        "epoch": config.epochs,
        "lr": config.lr
    })
    dataset = img_dataset("./dataset/train", "train")
    dataloader = torch.utils.data.DataLoader(dataset=dataset,
                                             batch_size=config.bs,
                                             shuffle=True,
                                             drop_last=True)

    net = Classifier(num_classes=13).cuda()
    net.load_state_dict(
        torch.load(
            join(f"{config.weight_path}",
                 f"{config.pre_epochs}_classifier.pth")))
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = optim.Adam(net.parameters(), lr=config.lr)
    for epoch in range(config.epochs):
        for _, data in enumerate(dataloader, 0):
            optimizer.zero_grad()
            net.train()
            inputs, labels = data
            inputs = inputs.cuda()
            labels = labels.cuda()
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            _, predicted = torch.max(outputs.data, 1)
            correct_counts = predicted.eq(labels.data.view_as(predicted))
            train_acc = torch.sum(correct_counts).item() / predicted.size(0)
        lera.log({"loss": loss.item(), "acc": train_acc})
        print("epoch:{}/{}, loss:{}, acc:{:02f}".format(
            epoch + 1 + config.pre_epochs,
            config.epochs + config.pre_epochs,
            loss.item(),
            train_acc,
        ))
        if (epoch + 1 + config.pre_epochs) % 10 == 0:
            torch.save(
                net.state_dict(),
                join(
                    f"{config.weight_path}",
                    f"{epoch + 1 + config.pre_epochs}_classifier.pth",
                ),
            )
Esempio n. 2
0
global_par_dict = {
    'title': str('TDAAv4 Transformer PIT Tasnet.'),
    'updates': updates,
    'batch_size': config.batch_size,
    'WFM': config.WFM,
    'global_emb': config.global_emb,
    'schmidt': config.schmidt,
    'log path': str(log_path),
    'selfTune': config.is_SelfTune,
    'cnn': config.speech_cnn_net,  # 是否采用CNN的结构来抽取
    'relitu': config.relitu,
    'ct_recu': config.ct_recu,  # 控制是否采用att递减的ct构成
    'loss': str(config.loss),
    'score fnc': str(config.infer_classifier),
}
lera.log_hyperparams(global_par_dict)
for item in list(global_par_dict.keys()):
    writer.add_text(item, str(global_par_dict[item]))


def train(epoch):
    global e, updates, total_loss, start_time, report_total, report_correct, total_loss_sgm, total_loss_ss
    e = epoch
    model.train()
    SDR_SUM = np.array([])
    SDRi_SUM = np.array([])

    if updates <= config.warmup:  #如果不在warm阶段就正常规划
        pass
    elif config.schedule and scheduler.get_lr()[0] > 5e-7:
        scheduler.step()
if device.type == 'cuda':
    os.environ["CUDA_VISIBLE_DEVICES"] = str(
        np.argmax([
            int(x.split()[2]) for x in subprocess.Popen(
                "nvidia-smi -q -d Memory | grep -A4 GPU | grep Free",
                shell=True,
                stdout=subprocess.PIPE).stdout.readlines()
        ]))

##

lera.log_hyperparams({
    'title': 'Image Evaluator',
    'batch_size': BATCH_SIZE,
    'epochs': epoch_count,
    'optimizer': 'Adam',
    'lr': learning_rate,
})

##

best_acc = 0  # best test accuracy
start_epoch = 0  # start from epoch 0 or last checkpoint epoch

# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
    transforms.RandomCrop(224, padding=4),
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
Esempio n. 4
0
def train(args):
    try:
        os.makedirs(args.save_img_path)
    except OSError:
        pass

    try:
        os.makedirs(args.weight_path)
    except OSError:
        pass

    lera.log_hyperparams(
        {
            "title": "hw2",
            "batch_size": args.bs,
            "epochs": args.epochs,
            "g_lr": args.g_lr,
            "d_lr": args.d_lr,
            "z_size": args.z_size,
        }
    )

    # dataset
    dataloader = data_loader(
        args.data_path, args.imgsize, args.bs, shuffle=True
    )

    # model
    generator = Generator(args.bs, args.imgsize, z_dim=args.z_size).cuda()
    discriminator = Discriminator(args.bs, args.imgsize).cuda()
    if args.pre_epochs != 0:
        generator.load_state_dict(
            torch.load(
                join(f"{args.weight_path}", f"generator_{args.pre_epochs}.pth")
            )
        )

        discriminator.load_state_dict(
            torch.load(
                join(
                    f"{args.weight_path}",
                    f"discriminator_{args.pre_epochs}.pth",
                )
            )
        )

    # optimizer
    g_optimizer = torch.optim.Adam(
        filter(lambda p: p.requires_grad, generator.parameters()), lr=args.g_lr
    )
    d_optimizer = torch.optim.SGD(
        filter(lambda p: p.requires_grad, discriminator.parameters()),
        lr=args.d_lr,
    )

    # validate noise
    fixed_noise = torch.randn(9, args.z_size)
    fixed_noise = torch.tensor(fixed_noise).cuda()

    # train
    for epoch in range(args.pre_epochs, args.epochs):
        for i, data in enumerate(dataloader):
            discriminator.train()
            generator.train()
            # train discriminator
            if i % 5 == 0:
                d_optimizer.zero_grad()
                real_img = torch.tensor(data[0]).cuda() * 2 - 1  # (-1, 1)
                d__real, _, _ = discriminator(real_img)
                z = torch.randn(args.bs, args.z_size)
                z = torch.tensor(z).cuda()
                fake_img, _, _ = generator(z)
                d_fake, _, _ = discriminator(fake_img)

                # hinge loss
                d_loss_real = torch.nn.ReLU()(1.0 - d__real).mean()
                d_loss_fake = torch.nn.ReLU()(1.0 + d_fake).mean()
                d_loss = d_loss_real + d_loss_fake
                d_loss.backward()

                d_optimizer.step()
            # train generator
            g_optimizer.zero_grad()
            z = torch.randn(args.bs, args.z_size)
            z = torch.tensor(z).cuda()
            fake_img, _, _ = generator(z)
            g_fake, _, _ = discriminator(fake_img)

            # hinge loss
            g_loss = -g_fake.mean()
            g_loss.backward()
            g_optimizer.step()

            if i % 100 == 0:
                lera.log({"d_loss": d_loss.item(), "g_loss": g_loss.item()})
                print(
                    "[epoch:%4d/%4d %3d/%3d] \t d_loss: %0.6f \t g_loss: %0.6f"
                    % (
                        epoch + 1,
                        args.epochs,
                        i,
                        len(dataloader),
                        d_loss.item(),
                        g_loss.item(),
                    )
                )
                if i % 300 == 0:
                    validate(
                        generator, i, epoch, args.save_img_path, fixed_noise
                    )

        torch.save(
            discriminator.state_dict(),
            f"./{args.weight_path}/discriminator_{epoch+1}.pth",
        )
        torch.save(
            generator.state_dict(),
            f"./{args.weight_path}/generator_{epoch+1}.pth",
        )
Esempio n. 5
0
scores = collections.OrderedDict(zip(config.metric, scores))
best_SDR = 0.0

with open(opt.label_dict_file, 'r') as f:
    label_dict = json.load(f)

# train
lera.log_hyperparams({
    'title': unicode('SS CN v0.2a'),
    'updates': updates,
    'batch_size': config.batch_size,
    'WFM': config.WFM,
    'MLMSE': config.MLMSE,
    'top1': config.top1,  #控制是否用top-1的方法来生产第二个通道
    'global_emb': config.global_emb,
    # 'spk_emb_size':config.SPK_EMB_SIZE, #注意这个东西需要和上面的spk_emb(如果是global_emb的话),否认则就跟decoder的hidden_size对应
    # 'hidden_mix':config.hidden_mix,#这个模式是不采用global emb的时候,把hidden和下一步的embeding一起cat起来作为ss的输入进去。
    'schmidt': config.schmidt,
    'log path': unicode(log_path),
    'selfTune': config.is_SelfTune,
    'is_dis': config.is_dis,
    'cnn': config.speech_cnn_net,  #是否采用CNN的结构来抽取
    'relitu': config.relitu,
    'ct_recu': config.ct_recu,  #控制是否采用att递减的ct构成
})


def train(epoch):
    e = epoch
    model.train()
    SDR_SUM = np.array([])
def train(config):
    print('Random seed: %d' % int(config.seed))
    torch.manual_seed(config.seed)
    
    torch.backends.cudnn.benchmark = True

    dset = config.dataset
    if dset == 'modelnet10' or dset == 'modelnet40':
        dataset = ClsDataset(root=config.root, npoints=config.npoints, train=True)
        test_dataset = ClsDataset(root=config.root, npoints=config.npoints, train=False)
    else:
        raise NotImplementedError('Dataset not supported.')
    
    print('Selected %s' % dset)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=config.batchsize, shuffle=True, 
                num_workers=config.workers)
    test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=config.batchsize, shuffle=True, 
        num_workers=config.workers)

    num_classes = dataset.num_classes
    print('number of classes: %d' % num_classes)
    print('train set size: %d | test set size: %d' % (len(dataset), len(test_dataset)))
    try:
        os.makedirs(config.outf)
    except:
        pass

    blue = lambda x: '\033[94m' + x + '\033[0m'
    yellow = lambda x: '\033[93m' + x + '\033[0m'
    red = lambda x: '\033[91m' + x + '\033[0m'

    classifier = PointNetCls(k=num_classes)

    if config.model != '':
        classifier.load_state_dict(torch.load(config.model))

    optimizer = optim.SGD(classifier.parameters(), lr=config.lr, momentum=config.momentum)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    classifier.to(device)
    if config.mgpu:
        classifier = torch.nn.DataParallel(classifier, device_ids=config.gpuids)

    num_batch = len(dataset) / config.batchsize

    lera.log_hyperparams({
        'title': dset, 
        'batchsize': config.batchsize, 
        'epochs': config.nepochs, 
        'npoints': config.npoints, 
        'optimizer': 'SGD', 
        'lr': config.lr, 
        })

    for epoch in range(config.nepochs):
        train_acc_epoch, test_acc_epoch = [], []
        for i, data in enumerate(dataloader):
            points, labels = data
            points = points.transpose(2, 1)
            labels = labels[:, 0]
            points, labels = points.to(device), labels.to(device)
            optimizer.zero_grad()
            classifier = classifier.train()
            pred, _ = classifier(points)
            pred = pred.view(-1, num_classes)
            # print(pred.size(), labels.size())
            loss = F.nll_loss(pred, labels)
            loss.backward()
            optimizer.step()
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(labels.data).cpu().sum()
            train_acc = correct.item() / float(config.batchsize)
            print('epoch %d: %d/%d | train loss: %f | train acc: %f' % (epoch+1, i+1, num_batch+1, loss.item(), train_acc))
            train_acc_epoch.append(train_acc)
            lera.log({
                'train loss': loss.item(), 
                'train acc': train_acc
                })

            if (i+1) % 10 == 0:
                j, data = next(enumerate(test_dataloader, 0))
                points, labels = data
                points = points.transpose(2, 1)
                labels = labels[:, 0]
                points, labels = points.to(device), labels.to(device)
                classifier = classifier.eval()
                with torch.no_grad():
                    pred, _ = classifier(points)
                pred = pred.view(-1, num_classes)
                loss = F.nll_loss(pred, labels)
                pred_choice = pred.data.max(1)[1]
                correct = pred_choice.eq(labels.data).cpu().sum()
                test_acc = correct.item() / float(config.batchsize)
                print(blue('epoch %d: %d/%d | test loss: %f | test acc: %f') % (epoch+1, i+1, num_batch+1, loss.item(), test_acc))
                test_acc_epoch.append(test_acc)
                lera.log({
                    'test loss': loss.item(), 
                    'test acc': test_acc
                    })
        print(yellow('epoch %d | mean train acc: %f') % (epoch+1, np.mean(train_acc_epoch)))
        print(red('epoch %d | mean test acc: %f') % (epoch+1, np.mean(test_acc_epoch)))
        lera.log({
            'train acc epoch': np.mean(train_acc_epoch), 
            'test acc epoch': np.mean(test_acc_epoch)})
        torch.save(classifier.state_dict(), '%s/%s_model_%d.pth' % (config.outf, config.dataset, epoch))
Esempio n. 7
0
                        .permute(0, 3, 1, 4, 2).contiguous() # [4, w, 4, w, 3]
                        .view(w * 4, w * 4, 3))              # [w * 4, w * 4, 3]
            lera.log_image('reconstruction', result.numpy(), clip=(0, 1))

    # continue training
    if step < total_steps:
        train(epoch, step)

## Train

lera.enabled(args.lera)
lera.log_hyperparams({
    'title' : "VQ-VAE",
    'dataset': dataset,
    'batch_size': batch_size,
    'K': K,
    'lr': lr,
    'D': D,
    'beta': beta,
    'total_steps' : total_steps
    })
lera.log_file(__file__)

enc = encoder(D) 
dec = decoder(D)
embeddings = torch.randn(K, D).div(D)
sensitivity = torch.zeros(K)

# calculate number of embedding vectors used
emb_count = torch.zeros(K)

if use_cuda: