def train(): batch_size = 2 grad_accu_times = 8 init_lr = 0.01 img_csv_file = 'train_masks.csv' train_img_dir = 'train' train_mask_dir = 'train_masks_png' dataset = CarvanaDataset(img_csv_file, train_img_dir, train_mask_dir) model = UNet().cuda() loss_fn = torch.nn.BCEWithLogitsLoss(size_average=True) opt = torch.optim.RMSprop(model.parameters(), lr=init_lr) opt.zero_grad() epoch = 0 forward_times = 0 for epoch in range(30): data_loader = DataLoader(dataset, batch_size, shuffle=True, num_workers=2) lr = init_lr * (0.1**(epoch // 10)) for param_group in opt.param_groups: param_group['lr'] = lr for idx, batch_data in enumerate(data_loader): batch_input = Variable(batch_data['img']).cuda() batch_gt_mask = Variable(batch_data['mask']).cuda() pred_mask = model(batch_input) forward_times += 1 if (idx + 1) % 10 == 0: show_example(batch_input[0], batch_gt_mask[0], F.sigmoid(pred_mask[0])) loss = loss_fn(pred_mask, batch_gt_mask) loss += dice_loss(F.sigmoid(pred_mask), batch_gt_mask) loss.backward() print('Epoch {:>3} | Batch {:>5} | lr {:>1.5f} | Loss {:>1.5f} '. format(epoch + 1, idx + 1, lr, loss.cpu().data.numpy()[0])) if forward_times == grad_accu_times: opt.step() opt.zero_grad() forward_times = 0 print('\nUpdate weights ... \n') if (epoch + 1) % 5 == 0: checkpoint = { 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'optimizer': opt.state_dict(), } torch.save(checkpoint, 'unet1024-{}'.format(epoch + 1)) del data_loader
self.dir_mask = r"E:\pic\carvana\just_for_test\train_masks" self.save_path = r"checkpoint" self.cuda = False if torch.cuda.is_available(): self.cuda = True torch.backends.cudnn.benchmark = True self.pretrained = False self.net_path = r"checkpoint\unet-epoch26.pkl" if __name__ == '__main__': __spec__ = None opt = Option() dataset = CarvanaDataset(opt.dir_img, opt.dir_mask, scale=opt.scale) dataloader = DataLoader(dataset=dataset, batch_size=opt.batchsize, shuffle=True, num_workers=opt.workers) unet = UNet(in_dim=opt.in_dim) loss_func = nn.BCEWithLogitsLoss() if opt.cuda: unet = unet.cuda() loss_func = loss_func.cuda() optimizer = torch.optim.Adam(unet.parameters(), lr=opt.lr, weight_decay=0.0005) # 加载预训练的参数 if opt.pretrained:
if __name__ == '__main__': """ Тут модель, которую мы реализовали в файле """ m = UNet() """ Делаем критерий, который будем оптимайзить. """ criterion = nn.BCEWithLogitsLoss() optimizer = optim.Adam(m.parameters(), lr=0.001) if useCuda == True: m = m.cuda() criterion = criterion.cuda() ds = CarvanaDataset(train, train_masks) ds_test = CarvanaDataset(test, test_masks) dl = dt.DataLoader(ds, shuffle=True, num_workers=4, batch_size=5) dl_test = dt.DataLoader(ds_test, shuffle=False, num_workers=4, batch_size=5) global_iter = 0 for epoch in range(0, n_epoch): print("Current epoch: ", epoch) epoch_loss = 0 m.train(True) for iter, (i, t) in enumerate(tqdm(dl)): i = Variable(i)
def main(): args = parse_arguments() model = SegmenterModel(3, 1).to(DEVICE) criterion = torch.nn.BCEWithLogitsLoss(reduction='sum').to(DEVICE) optimizer = optim.Adam(model.parameters(), lr=args.learning_rate) ds = CarvanaDataset(TRAIN_DIR, TRAIN_MASKS_DIR) ds_test = CarvanaDataset(TEST_DIR, TEST_MASKS_DIR, is_train=False) dl = dt.DataLoader(ds, shuffle=True, num_workers=8, batch_size=args.batch_size, pin_memory=True) dl_test = dt.DataLoader(ds_test, shuffle=False, num_workers=8, batch_size=args.batch_size, pin_memory=True) global_iter = 0 for epoch in range(0, args.n_epochs): print ("Current epoch: ", epoch) epoch_loss = 0 model.train(True) for i, (input_batch, target_batch) in enumerate(tqdm(dl)): input_batch = input_batch.to(DEVICE) target_batch = target_batch.to(DEVICE) optimizer.zero_grad() output_batch = model(input_batch) loss = criterion(output_batch, target_batch) loss.backward() optimizer.step() global_iter += 1 epoch_loss += loss.item() epoch_loss = epoch_loss / float(len(ds)) print ("Epoch loss", epoch_loss) tb_writer.add_scalar('Loss/Train', epoch_loss, epoch) print ("Make test") test_loss = 0 model.train(False) tb_out = np.random.choice(range(0, len(dl_test)), 3) for i, (input_batch, target_batch) in enumerate(tqdm(dl_test)): input_batch = input_batch.to(DEVICE) target_batch = target_batch.to(DEVICE) with torch.no_grad(): output_batch = model(input_batch) loss = criterion(output_batch, target_batch) test_loss += loss.item() for img_id, checkpoint in enumerate(tb_out): if checkpoint == i: tb_writer.add_image(f'Image/Test_input_{img_id}', input_batch[0].cpu(), epoch) tb_writer.add_image(f'Image/Test_target_{img_id}', target_batch[0].cpu(), epoch) tb_writer.add_image(f'Image/Test_output_{img_id}', output_batch[0].cpu() > 0, epoch) test_loss = test_loss / float(len(ds_test)) print ("Test loss", test_loss) tb_writer.add_scalar('Loss/Test', test_loss, epoch) torch.save(model.state_dict(), "/data/rvgorb/hw11/unet_dump_recent")
def train_net(useCuda=True, n_epoch = 100): """ Тут модель, которую мы реализовали в файле model.py """ m = SegmenterModel() """ Делаем критерий, который будем оптимайзить """ criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(m.parameters(), lr=0.001) if useCuda == True: m = m.cuda() criterion= criterion.cuda() ds = CarvanaDataset(train, train_masks) ds_test = CarvanaDataset(test, test_masks) dl = dt.DataLoader(ds, shuffle=True, num_workers=4, batch_size=5) dl_test = dt.DataLoader(ds_test, shuffle=False, num_workers=4, batch_size=5) global_iter = 0 for epoch in range(0, n_epoch): print ("Current epoch: ", epoch) epoch_loss = 0 m.train(True) for iter, (i, t) in enumerate(tqdm( dl) ): i = Variable(i) t = Variable(t).long() if useCuda : i = i.cuda() t = t.cuda() o = m(i) t = t.view((t.shape[0], t.shape[2], t.shape[3])) loss = criterion(o, t) loss.backward() optimizer.step() global_iter += 1 epoch_loss += loss.data epoch_loss = epoch_loss / float(len(ds)) print ("Epoch loss", epoch_loss) tb_writer.add_scalar('Loss/Train', epoch_loss, epoch) print ("Make test") test_loss = 0 m.train(False) tb_out = np.random.choice(range(0, len(dl_test)), 3 ) for iter, (i, t) in enumerate(tqdm(dl_test)): i = i.requires_grad_(False) t = t.requires_grad_(False).long() if useCuda : i = i.cuda() t = t.cuda() o = m(i) t = t.view((t.shape[0], t.shape[2], t.shape[3])) loss = criterion(o, t) o = torch.argmax(o, dim=1) test_loss += loss.data for k, c in enumerate(tb_out): if c == iter: tb_writer.add_image('Image/Test_input_%d'%k, i[0].cpu(), epoch) # Tensor tb_writer.add_image('Image/Test_target_%d'%k, t[0].cpu(), epoch, dataformats='HW') # Tensor tb_writer.add_image('Image/Test_output_%d'%k, o[0].cpu(), epoch, dataformats='HW') # Tensor test_loss = test_loss / float(len(ds_test)) print ("Test loss", test_loss) tb_writer.add_scalar('Loss/Test', test_loss, epoch)
import torch from torch.autograd import Variable from torch.utils.data import DataLoader from Unet import UNet from carvana_dataset import CarvanaDataset import numpy as np import matplotlib.pyplot as plt img_csv_file = './data/train_masks.csv' train_img_dir = './data/train' train_mask_dir = './data/train_masks_png' dataset = CarvanaDataset(img_csv_file, train_img_dir, train_mask_dir) trainLoader = DataLoader(dataset, shuffle=True, batch_size=4) net = UNet().cuda() loss_fn = torch.nn.MultiLabelSoftMarginLoss() opt = torch.optim.SGD(net.parameters(), lr=0.000001, momentum=0.5) lossValue = [] opt.zero_grad() for epoch in range(7): runningLoss = 0.0 for i, datum in enumerate(trainLoader): img, label = datum inputImg, lbl = Variable(img.cuda()), Variable(label.cuda()) imgOut = net(inputImg) imgOut = imgOut.squeeze(0) imgOut = torch.nn.functional.sigmoid(imgOut) loss = loss_fn(imgOut, lbl) loss.backward() opt.step()
def main(): args = parse_arguments() model = SegmenterModel().to(DEVICE) # Модель criterion = torch.nn.BCEWithLogitsLoss().to(DEVICE) # Лосс optimizer = optim.Adam(model.parameters(), lr=args.learning_rate) # Алгоритм оптимизации ds = CarvanaDataset(TRAIN_DIR, TRAIN_MASKS_DIR) # Обучающая выборка ds_test = CarvanaDataset(TEST_DIR, TEST_MASKS_DIR) # Тестовая выборка # Инструменты для подгрузки тензоров с данными dl = dt.DataLoader(ds, shuffle=True, num_workers=8, batch_size=args.batch_size) dl_test = dt.DataLoader(ds_test, shuffle=False, num_workers=8, batch_size=args.batch_size) global_iter = 0 for epoch in range(0, args.n_epochs): print ("Current epoch: ", epoch) epoch_loss = 0 model.train(True) for i, (input_batch, target_batch) in enumerate(tqdm(dl)): optimizer.zero_grad() input_batch = Variable(input_batch).cuda() target_batch = Variable(target_batch).cuda() output_batch = model(input_batch) loss = criterion(output_batch, target_batch) loss.backward() optimizer.step() global_iter += 1 epoch_loss += loss.item() epoch_loss = epoch_loss / float(len(ds)) print ("Epoch loss", epoch_loss) tb_writer.add_scalar('Loss/Train', epoch_loss, epoch) print ("Make test") test_loss = 0 model.train(False) tb_out = np.random.choice(range(0, len(dl_test)), 3) for i, (input_batch, target_batch) in enumerate(tqdm(dl_test)): input_batch = input_batch.to(DEVICE) target_batch = target_batch.to(DEVICE) with torch.no_grad(): output_batch = model(input_batch) loss = criterion(output_batch, target_batch) test_loss += loss.item() for img_id, checkpoint in enumerate(tb_out): if checkpoint == i: tb_writer.add_image(f'Image/Test_input_{img_id}', input_batch[0].cpu(), epoch) tb_writer.add_image(f'Image/Test_target_{img_id}', target_batch[0].cpu(), epoch) tb_writer.add_image(f'Image/Test_output_{img_id}', output_batch[0].cpu() > 0, epoch) test_loss = test_loss / float(len(ds_test)) print ("Test loss", test_loss) tb_writer.add_scalar('Loss/Test', test_loss, epoch) torch.save(model.state_dict(), "unet_dump_recent")