예제 #1
0
torch.cuda.manual_seed(123)

expname = (datetime.datetime.now().isoformat()[:-10] + '_' +
           '_'.join(sys.argv).replace('/', '-'))[0:255]
model_dir = os.path.join('models', expname)
txt_path = os.path.join('results', 'train', expname)
os.makedirs(model_dir, exist_ok=True)

frozen_generator = args.freeze_generator

p = Printer(file_path=os.path.join(txt_path))

p.print(args)
p.print("cmd: python3 " + " ".join(sys.argv))

DDataset = DenoisingDataset(train_data, test_reserve=test_reserve)
data_loader = DataLoader(dataset=DDataset,
                         num_workers=args.threads,
                         drop_last=True,
                         batch_size=args.batch_size,
                         shuffle=True)

if use_D:
    discriminator = Discriminator(network=args.d_network,
                                  model_path=args.d_model_path,
                                  device=device,
                                  loss_function=args.d_loss_function,
                                  activation=args.d_activation,
                                  funit=args.d_funit,
                                  beta1=args.beta1,
                                  lr=args.d_lr,
예제 #2
0
 elif args.lossf == 'MSE':
     criterion = torch.nn.MSELoss()
 else:
     exit('Error: requested loss function ' + args.lossf +
          ' has not been implemented.')
 if cuda:
     model = model.cuda()
     criterion = criterion.cuda()
 else:
     print("Warning: running on CPU is not sane")
 # Dataset
 #TODO replace num_workers
 DDataset = DenoisingDataset(train_data,
                             compressionmin=args.compressionmin,
                             compressionmax=args.compressionmax,
                             sigmamin=args.sigmamin,
                             sigmamax=args.sigmamax,
                             test_reserve=args.test_reserve,
                             yval=args.yval,
                             do_sizecheck=args.do_sizecheck)
 DLoader = DataLoader(dataset=DDataset,
                      num_workers=4,
                      drop_last=True,
                      batch_size=batch_size,
                      shuffle=True)
 if args.model == 'UNet':
     loss_crop_lb = int((DDataset.cs - DDataset.ucs) / 2)
     loss_crop_up = loss_crop_lb + DDataset.ucs
 else:
     loss_crop_lb = int((DDataset.cs - DDataset.ucs) / 4)
     loss_crop_up = DDataset.cs - loss_crop_lb
 print('Using %s as bounds' % (str((loss_crop_lb, loss_crop_up))))
    model = DnCNN()

    initial_epoch = findLastCheckpoint(save_dir=save_dir)  # load the last model in matconvnet style
    if initial_epoch > 0:
        print('resuming by loading epoch %03d' % initial_epoch)
        # model.load_state_dict(torch.load(os.path.join(save_dir, 'model_%03d.pth' % initial_epoch)))
        model = torch.load(os.path.join(save_dir, 'model_%03d.pth' % initial_epoch))
    model.train()
    # criterion = nn.MSELoss(reduction = 'sum')  # PyTorch 0.4.1
    criterion = sum_squared_error()
    if cuda:
        model = model.cuda()
        # device_ids = [0]
        # model = nn.DataParallel(model, device_ids=device_ids).cuda()
        # criterion = criterion.cuda()
    DDataset = DenoisingDataset(args.train_data)
    DLoader = DataLoader(dataset=DDataset, num_workers=8, drop_last=True, batch_size=batch_size, shuffle=True)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    scheduler = MultiStepLR(optimizer, milestones=[30, 60, 90], gamma=0.2)  # learning rates
    for epoch in range(initial_epoch, n_epoch):
        scheduler.step(epoch)  # step to the learning rate in this epcoh
        epoch_loss = 0
        start_time = time.time()

        for n_count, batch_yx in enumerate(DLoader):
            optimizer.zero_grad()
            if cuda:
                batch_x, batch_y = batch_yx[1].cuda(), batch_yx[0].cuda()
            loss = criterion(model(batch_y), batch_x)
            epoch_loss += loss.item()
            loss.backward()