示例#1
0
    def __init__(self, method_name="runTest"):
        super().__init__(method_name)
        self._current_base_path = Path(__file__).parent.parent.resolve()
        init_config(str(self._current_base_path / "set_locations.ini"))

        ds = TrainDataset(ds_path=config.get('DataPaths', 'run_data'))
        logger.info(f"Found unique users: {len(ds.get_users())}")
        self.train_dataset = ds.create_dataset()
    def test_train_dataset_creation(self):
        ds = TrainDataset(ds_path=config.get('DataPaths', 'run_data'))
        logger.info(f"Found unique users: {len(ds.get_users())}")
        gaze_data = ds.create_dataset()

        self.assertTrue(len(gaze_data) > 0)
        logger.info(f"Length of dataset is: {gaze_data.shape} with columns: {gaze_data.columns}")
        logger.info(f"Size of dataset is {sys.getsizeof(gaze_data) / 1048576} Mb.")
        logger.info(f"Unique stimulus types: {gaze_data['stimulus_type'].unique()}")
示例#3
0
文件: train.py 项目: HCShi/IONet
def main():
    args = get_args()
    torch.backends.cudnn.enabled = False
    cudnn.benchmark = False
    torch.multiprocessing.set_sharing_strategy('file_system')

    train_loader = torch.utils.data.DataLoader(TrainDataset(
        args=args,
        transform=transforms.Compose([
            transforms.CenterCrop((224, 224)),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor()
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=0)

    video_val_loader = torch.utils.data.DataLoader(VideoDataset(
        args=args,
        transform=transforms.Compose([
            transforms.CenterCrop((224, 224)),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor()
        ]),
        test_mode=True),
                                                   batch_size=args.batch_size,
                                                   shuffle=False,
                                                   num_workers=0)

    print("start training")
    for epoch in range(args.epochs):
        train(train_loader, video_val_loader, args)
示例#4
0
    def __init__(self, method_name="runTest"):
        logger.info(f"Testing Eye movements Module started")
        super().__init__(method_name)
        init_config("../set_locations.ini")

        self.train_dataset = TrainDataset(config.get(
            "DataPaths", "run_data"), ).create_dataset()
        logger.info(f"Shape of loaded data: {self.train_dataset.shape}")
        logger.info(f"Unique users: {self.train_dataset['user_id'].nunique()}")
        logger.info(
            f"Unique sessions: {self.train_dataset['session_id'].nunique()}")
示例#5
0
 def start_training(self, config):
     if not self.training:
         self.train_config = config
         self.epochs_without_progress = 0
         self.msg_dir = self.train_config['message_dir']
         model_dir = self.train_config['model_dir']
         self.train_set = TrainDataset(self.train_config['train_annot_dir'],
                                       self.train_config['dataset_dir'],
                                       self.in_w, self.out_w)
         model_paths = model_utils.get_latest_model_paths(model_dir, 1)
         if model_paths:
             self.model = model_utils.load_model(model_paths[0])
         else:
             self.model = create_first_model_with_random_weights(model_dir)
         self.optimizer = torch.optim.SGD(self.model.parameters(),
                                          lr=0.01,
                                          momentum=0.99,
                                          nesterov=True)
         self.model.train()
         self.training = True
示例#6
0
def __main__():
    args = get_args_parser()
    dist.init_process_group(backend='nccl')
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = True
    set_random_seed(args.random_seed + dist.get_rank())
    torch.cuda.set_device(torch.device('cuda:{}'.format(dist.get_rank())))
    dist_logger = DistributedLogger(args.name, args.output_base_path,
                                    args.master_rank, args.use_tensorboard)

    train_dataset = TrainDataset(args.dataset_root, args.dataset_year,
                                 (args.input_size_h, args.input_size_w),
                                 args.pooler_size)
    train_sampler = data.distributed.DistributedSampler(train_dataset)
    train_dataloader = data.DataLoader(train_dataset,
                                       batch_size=args.batch_size,
                                       num_workers=args.num_workers,
                                       sampler=train_sampler,
                                       pin_memory=True,
                                       drop_last=True)

    val_dataset = ValDataset(args.dataset_root, args.dataset_year,
                             (args.input_size_h, args.input_size_w))
    val_sampler = data.distributed.DistributedSampler(val_dataset)
    val_dataloader = data.DataLoader(val_dataset,
                                     batch_size=args.batch_size,
                                     num_workers=args.num_workers,
                                     pin_memory=True,
                                     sampler=val_sampler)

    model = BlendMask(len(COCO_CLASSES), args.fpn_channels,
                      args.bases_module_channels, args.num_bases,
                      args.atten_size, args.pooler_size).cuda()
    # model.load_state_dict(torch.load(f'./output/{args.name}/model/param.pth'))
    model = parallel.DistributedDataParallel(model,
                                             device_ids=[dist.get_rank()],
                                             find_unused_parameters=True)
    criterion = Criterion(args.focal_alpha, args.focal_gamma)

    optim_parameters = [{
        'params': [
            p for n, p in model.module.named_parameters()
            if not n.endswith('bias') and p.requires_grad
        ]
    }, {
        'params': [
            p for n, p in model.module.named_parameters()
            if n.endswith('bias') and p.requires_grad
        ],
        'lr':
        args.lr * args.bias_lr_mul,
        'weight_decay':
        args.weight_decay * args.bias_weight_decay_mul
    }]
    optimizer = optim.SGD(optim_parameters,
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    lr_lambda = utils.lr_lambda.get_warm_up_multi_step_lr_lambda(
        len(train_dataloader), args.warm_up_epoch, args.warm_up_ratio,
        args.milestones, args.step_gamma)
    lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)

    nms_cfg = {
        'nms_pre': args.nms_pre,
        'cls_score_thr': args.nms_cls_score_thr,
        'iou_thr': args.nms_iou_thr
    }

    for epoch_idx in range(args.epochs):
        train_sampler.set_epoch(epoch_idx)
        val_sampler.set_epoch(epoch_idx)

        engine.train_one_epoch(model, criterion, optimizer, lr_scheduler,
                               train_dataloader, epoch_idx, dist_logger)
示例#7
0
def train(args):

    # Device, save and log configuration

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    save_dir = Path(os.path.join(args.save_dir, args.name))
    save_dir.mkdir(exist_ok=True, parents=True)
    log_dir = Path(os.path.join(args.log_dir, args.name))
    log_dir.mkdir(exist_ok=True, parents=True)
    writer = SummaryWriter(log_dir=str(log_dir))

    # Prepare datasets

    content_dataset = TrainDataset(args.content_dir, args.img_size)
    texture_dataset = TrainDataset(args.texture_dir,
                                   args.img_size,
                                   gray_only=True)
    color_dataset = TrainDataset(args.color_dir, args.img_size)

    content_iter = iter(
        data.DataLoader(content_dataset,
                        batch_size=args.batch_size,
                        sampler=InfiniteSamplerWrapper(content_dataset),
                        num_workers=args.n_threads))
    texture_iter = iter(
        data.DataLoader(texture_dataset,
                        batch_size=args.batch_size,
                        sampler=InfiniteSamplerWrapper(texture_dataset),
                        num_workers=args.n_threads))
    color_iter = iter(
        data.DataLoader(color_dataset,
                        batch_size=args.batch_size,
                        sampler=InfiniteSamplerWrapper(color_dataset),
                        num_workers=args.n_threads))

    # Prepare network

    network = Net(args)
    network.train()
    network.to(device)

    # Training options

    opt_L = torch.optim.Adam(network.L_path.parameters(), lr=args.lr)
    opt_AB = torch.optim.Adam(network.AB_path.parameters(), lr=args.lr)

    opts = [opt_L, opt_AB]

    # Start Training

    for i in tqdm(range(args.max_iter)):
        # S1: Adjust lr and prepare data

        adjust_learning_rate(opts, iteration_count=i, args=args)

        content_l, content_ab = [x.to(device) for x in next(content_iter)]
        texture_l = next(texture_iter).to(device)
        color_l, color_ab = [x.to(device) for x in next(color_iter)]

        # S2: Forward

        l_pred, ab_pred = network(content_l, content_ab, texture_l, color_ab)

        # S3: Calculate loss

        loss_ct, loss_t = network.ct_t_loss(l_pred, content_l, texture_l)
        loss_cr = network.cr_loss(ab_pred, color_ab)

        loss_ctw = args.content_weight * loss_ct
        loss_tw = args.texture_weight * loss_t
        loss_crw = args.color_weight * loss_cr

        loss = loss_ctw + loss_tw + loss_crw

        # S4: Backward

        for opt in opts:
            opt.zero_grad()
        loss.backward()
        for opt in opts:
            opt.step()

        # S5: Summary loss and save subnets

        writer.add_scalar('loss_content', loss_ct.item(), i + 1)
        writer.add_scalar('loss_texture', loss_t.item(), i + 1)
        writer.add_scalar('loss_color', loss_cr.item(), i + 1)

        if (i + 1) % args.save_model_interval == 0 or (i + 1) == args.max_iter:
            state_dict = network.state_dict()
            for key in state_dict.keys():
                state_dict[key] = state_dict[key].to(torch.device('cpu'))
            torch.save(state_dict,
                       save_dir / 'network_iter_{:d}.pth.tar'.format(i + 1))
    writer.close()
示例#8
0
    

    ## Config
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    crop_size=88
    upscale_factor=4
    batch_size=64
    nblocks = 3
    lr = 0.001
    betas = (0.99, 0.999)
    TRAIN_PATH = './compress_data/voc_train.pkl'
    VALID_PATH = './compress_data/voc_valid.pkl'
    
    
    ## Set up
    train_dataset = TrainDataset(TRAIN_PATH, crop_size=crop_size, upscale_factor=upscale_factor)
    valid_dataset = ValidDataset(VALID_PATH, crop_size=crop_size, upscale_factor=upscale_factor)

    trainloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=2)
    trainloader_v2 = DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=2) # need to calculate score metrics
    validloader = DataLoader(valid_dataset, batch_size=1, shuffle=False, num_workers=2)



    generator = Generator(in_channels=3, n_residual_blocks=nblocks, up_scale=upscale_factor)
    discriminator = Discriminator()
    vggExtractor = VGGExtractor()
    optimizer_G = torch.optim.Adam(params=generator.parameters(), lr=lr, betas=betas)
    optimizer_D = torch.optim.Adam(params=discriminator.parameters(), lr=lr, betas=betas)
    
示例#9
0
def main():

  if not torch.cuda.is_available():
    logging.info('no gpu device available')
    sys.exit(1)

  np.random.seed(args.seed)
  torch.cuda.set_device(args.gpu)
  cudnn.benchmark = True
  torch.manual_seed(args.seed)
  cudnn.enabled=True
  torch.cuda.manual_seed(args.seed)
  logging.info('gpu device = %d' % args.gpu)
  logging.info("args = %s", args)

  criterion = nn.CrossEntropyLoss()
  criterion = criterion.cuda()
  model = Network(args.init_channels, CIFAR_CLASSES, args.layers, criterion, darts = args.darts, pc = args.pc)
  model = model.cuda()
  logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

  optimizer = torch.optim.SGD(
      model.parameters(),
      args.learning_rate,
      momentum=args.momentum,
      weight_decay=args.weight_decay)

  train_transform, valid_transform = utils._data_transforms_cifar10(args)
  if args.sample:
    train_data = TrainDataset(root = "./new_cifar.pth", transform = train_transform, sample = True)
  else:
    train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)

  num_train = len(train_data)
  indices = list(range(num_train))
  split = int(np.floor(args.train_portion * num_train))
  train_grow = int(np.floor(args.grow_portion * split))
  valid_grow = int(np.floor(args.grow_portion * (num_train - split)))

  train_indices = indices[:split]
  valid_indices = indices[split:]

  train_queue = torch.utils.data.DataLoader(
      train_data, batch_size=args.batch_size,
      sampler=torch.utils.data.sampler.SubsetRandomSampler(train_indices),
      pin_memory=True, num_workers=2)

  valid_queue = torch.utils.data.DataLoader(
      train_data, batch_size=args.batch_size,
      sampler=torch.utils.data.sampler.SubsetRandomSampler(valid_indices),
      pin_memory=True, num_workers=2)


  scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, float(args.epochs), eta_min=args.learning_rate_min)



  architect = Architect(model, args)

  for epoch in range(args.epochs):
    lr = scheduler.get_last_lr()[0]
    logging.info('epoch %d lr %e', epoch, lr)

    genotype = model.genotype()
    logging.info('genotype = %s', genotype)

    if not args.darts:
      alphas_reduce = torch.where(model.alphas_reduce==0,torch.FloatTensor([float("-inf")]).cuda(),model.alphas_reduce)
      alphas_normal = torch.where(model.alphas_normal==0,torch.FloatTensor([float("-inf")]).cuda(),model.alphas_normal)
      logging.info(F.softmax(alphas_normal, dim=-1))
      logging.info(F.softmax(alphas_reduce, dim=-1))
    else:
      logging.info(F.softmax(model.alphas_normal, dim=-1))
      logging.info(F.softmax(model.alphas_reduce, dim=-1))


   # print("post grow")
   # alphas_reduce = torch.where(model.alphas_reduce==0,torch.FloatTensor([float("-inf")]).cuda(),model.alphas_reduce)
   # alphas_normal = torch.where(model.alphas_normal==0,torch.FloatTensor([float("-inf")]).cuda(),model.alphas_normal)
   # print(F.softmax(alphas_normal, dim=-1))
   # print(F.softmax(alphas_reduce, dim=-1))


    # training
    train_s = time.time()
    train_acc, train_obj = train(train_queue, valid_queue, model, architect, criterion, optimizer, lr, epoch)
    logging.info('train_acc %f', train_acc)
    train_e = time.time()
    t_record["train"]+=(train_e-train_s)
    if not args.darts and epoch > args.init:
        architect.print_arch_grad()

    #scheduler update
    scheduler.step()
    #if architect.scheduler is not None:
    #  architect.scheduler.step()



    # validation
    if epoch > 47:
      valid_acc, valid_obj = infer(valid_queue, model, criterion)
      logging.info('valid_acc %f', valid_acc)



    if not args.darts and epoch % args.grow_freq == 0 and epoch < args.epochs-args.final and epoch >= args.init:
      train_indices_grow = np.random.choice(train_indices, train_grow, replace = False)
      valid_indices_grow = np.random.choice(valid_indices, valid_grow, replace = False)

      train_grow_queue = torch.utils.data.DataLoader(
          train_data, batch_size=args.grow_batch_size,
          sampler=torch.utils.data.sampler.SubsetRandomSampler(train_indices_grow),
          pin_memory=True, num_workers=2)

      valid_grow_queue = torch.utils.data.DataLoader(
          train_data, batch_size=args.grow_batch_size,
          sampler=torch.utils.data.sampler.SubsetRandomSampler(valid_indices_grow),
          pin_memory=True, num_workers=2)

      grow_s = time.time()
      grow(train_grow_queue, valid_grow_queue, model, architect, criterion, optimizer, lr, args.num_grow)
      grow_e = time.time()
      t_record["grow"]+=(grow_e-grow_s)
      for param_group in optimizer.param_groups:
        param_group["lr"] = args.learning_rate_middle
        param_group["initial_lr"] = args.learning_rate_middle
      optimizer.defaults["lr"] = args.learning_rate_middle
      scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, args.grow_freq, eta_min=args.learning_rate_stable)

    if not args.darts and epoch == args.epochs-args.final:
      for param_group in optimizer.param_groups:
        param_group["lr"] = args.learning_rate_stable
        param_group["initial_lr"] = args.learning_rate_stable
      optimizer.defaults["lr"]=args.learning_rate_stable
      #scheduler = None
      #scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
      #    optimizer, 10.0, eta_min=args.learning_rate_min)
      scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
                 optimizer, args.final, eta_min=args.learning_rate_min)


      #scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
      #    optimizer, 10.0, eta_min=args.learning_rate_min)

      #for param_group in architect.optimizer.param_groups:
      #  param_group["lr"] = architect.lr
      #architect.scheduler = torch.optim.lr_scheduler.StepLR(architect.optimizer, step_size = 1, gamma=0.9)

    torch.save(t_record, "time_record.pt")

    utils.save(model, os.path.join(args.save, 'weights.pt'))
  logging.info("total train: %f", t_record["train"])
  logging.info("total grow: %f", t_record["grow"])
  logging.info("total grow search: %f", t_record["grow_search"])
示例#10
0
from pathlib import Path

from datasets import TrainDataset, TestDataset
from model import FSRCNN
from config import model_settings, batch_size, learning_rate, epochs

device = "cuda" if torch.cuda.is_available() else "cpu"

with h5py.File("datasets/General-100.h5") as f:

    outdir = Path("out")
    outdir.mkdir(exist_ok=True)

    # Create data loaders.
    train_dataloader = DataLoader(
        TrainDataset(f["train"]), batch_size=batch_size, shuffle=True)
    test_dataloader = DataLoader(TestDataset(f["test"]), batch_size=1)

    # Create the model
    model = FSRCNN(**model_settings).to(device)

    loss_fn = nn.MSELoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)

    def train(dataloader, model, loss_fn, optimizer):
        size = len(dataloader.dataset)
        for batch, (X, y) in enumerate(tqdm(dataloader, total=size // batch_size)):
            X, y = X.to(device), y.to(device)

            # Compute prediction error
            pred = model(X)
                map_location=lambda storage, loc: storage).items():
            if n in state_dict.keys():
                state_dict[n].copy_(p)
            else:
                raise KeyError(n)

    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    # my_lr_scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=4500/opt.batch_size*opt.num_epochs,eta_min=0.000001)

    my_lr_scheduler = lr_scheduler.StepLR(optimizer,
                                          step_size=int(4000 / opt.batch_size *
                                                        200),
                                          gamma=0.5)

    train_dataset = TrainDataset(opt.train_file,
                                 patch_size=opt.patch_size,
                                 scale=opt.scale)
    train_dataloader = DataLoader(dataset=train_dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=True,
                                  num_workers=opt.num_workers,
                                  pin_memory=True)
    eval_dataset = EvalDataset(opt.eval_file)
    eval_dataloader = DataLoader(dataset=eval_dataset, batch_size=1)

    best_weights = copy.deepcopy(model.state_dict())
    best_epoch = 0
    best_psnr = 0.0

    for epoch in range(opt.num_epochs):
        for param_group in optimizer.param_groups:
示例#12
0
def main():
    # Load dataset
    print('Loading dataset ...\n')
    #dataset_train = Dataset(train=True)
    #dataset_val = Dataset(train=False)
    dataset_train = TrainDataset('train_DIV_new.h5', opt.patch_size,
                                 int(opt.upscale_factor[0]))
    dataset_val = EvalDataset('test_DIV.h5')
    loader_train = DataLoader(dataset=dataset_train,
                              num_workers=1,
                              batch_size=opt.batchSize,
                              shuffle=True)
    loader_val = DataLoader(dataset=dataset_val, batch_size=1)
    print("# of training samples: %d\n" % int(len(dataset_train)))
    # Build model
    netG = Generator_RDN(opt)
    print('# generator parameters:',
          sum(param.numel() for param in netG.parameters()))
    netD = Discriminator()
    print('# discriminator parameters:',
          sum(param.numel() for param in netD.parameters()))
    #    net.apply(weights_init_kaiming)
    #    content_criterion = nn.MSELoss()
    #    feature_extractor = FeatureExtractor(torchvision.models.vgg19(pretrained=True))
    content_criterion = nn.L1Loss()
    adversarial_criterion = nn.BCELoss()

    ones_const = Variable(torch.ones(opt.batchSize, 1))
    # Move to GPU
    if torch.cuda.is_available():
        netG.cuda()
        netD.cuda()
        content_criterion.cuda()
        adversarial_criterion.cuda()
        ones_const = ones_const.cuda()


#        feature_extractor.cuda()

    optim_rdn = optim.Adam(netG.parameters(), lr=1e-4)
    # Optimizer
    # training
    writer = SummaryWriter(opt.outf)
    step = 0
    #    noiseL_B=[0,55] # ingnored when opt.mode=='S'

    # Generator Pretraining(Using MSE Loss)
    for epoch in range(10):
        mean_generator_content_loss = 0.0
        for i, (lrimg, hrimg) in enumerate(loader_train):
            # adding noise
            #print(lrimg[-1].shape)
            #print(hrimg[-1].shape)
            #cv2.imshow('win1',lrimg[-1].detach().numpy().transpose((1,2,0)))
            #cv2.imshow('win2',hrimg[-1].detach().numpy().transpose((1,2,0)))
            #cv2.waitKey(0)
            for j in range(opt.batchSize):
                noise = torch.FloatTensor(lrimg[j].size()).normal_(
                    mean=0.0, std=opt.noiseL / 255.)
                #lrimg[j] = lrimg[j] + noise
                lrimg[j] = lrimg[j]
            # Generate real and fake inputs
            if opt.cuda:
                high_res_real = Variable(hrimg.cuda())
                high_res_fake = netG(Variable(lrimg).cuda())
            else:
                high_res_real = Variable(hrimg)
                high_res_fake = netG(Variable(lrimg))

            ######### Train generator #########
            netG.zero_grad()

            generator_content_loss = content_criterion(high_res_fake,
                                                       high_res_real)
            mean_generator_content_loss += generator_content_loss.data

            generator_content_loss.backward()
            optim_rdn.step()

            ######### Status and display #########
            #  sys.stdout.write('\r[%d/%d][%d/%d] Generator_MSE_Loss: %.4f' % (epoch, 2, i, len(loader_train), generator_content_loss.data))
            #  visualizer.show(low_res, high_res_real.cpu().data, high_res_fake.cpu().data)
            out_train = torch.clamp(high_res_fake, 0., 1.)
            psnr_train, ssim_train = batch_PSNR(out_train,
                                                high_res_real,
                                                scale=3.0,
                                                data_range=1.)
            if step % 10 == 0:
                # Log the scalar values
                writer.add_scalar('generator_content_loss',
                                  generator_content_loss.item(), step)
                writer.add_scalar('PSNR on training data', psnr_train, step)
            step += 1
        #  sys.stdout.write('\r[%d/%d][%d/%d] PSNR: %.4f, SSIM:%.4f' % (epoch, 2, i, len(loader_train), psnr_train, ssim_train))

        sys.stdout.write('\r[%d/%d][%d/%d] Generator_MSE_Loss: %.4f\n' %
                         (epoch, 2, i, len(loader_train),
                          mean_generator_content_loss / len(loader_train)))
        #log_value('generator_mse_loss', mean_generator_content_loss/len(dataloader), epoch)

    # Do checkpointing
    torch.save(netG.state_dict(), '%s/model/generator_pretrain.pth' % opt.outf)

    #SRGAN-RDN training
    optim_generator = optim.Adam(netG.parameters(), lr=opt.generatorLR)
    optim_discriminator = optim.Adam(netD.parameters(), lr=opt.discriminatorLR)
    scheduler_dis = torch.optim.lr_scheduler.StepLR(optim_discriminator, 50,
                                                    0.1)
    scheduler_gen = torch.optim.lr_scheduler.StepLR(optim_generator, 50, 0.1)
    print('SRGAN training')
    step_new = 0
    for epoch in range(opt.epochs):
        mean_generator_content_loss = 0.0
        mean_generator_adversarial_loss = 0.0
        mean_generator_total_loss = 0.0
        mean_discriminator_loss = 0.0

        netG.train()
        scheduler_gen.step()
        scheduler_dis.step()
        for i, (lrimg, hrimg) in enumerate(loader_train):

            #print(lrimg[-1].shape)
            #print(hrimg[-1].shape)
            #cv2.imshow('win1',lrimg[-1].detach().numpy().transpose((1,2,0)))
            #cv2.imshow('win2',hrimg[-1].detach().numpy().transpose((1,2,0)))
            #cv2.waitKey(0)

            for j in range(opt.batchSize):
                noise = torch.FloatTensor(lrimg[j].size()).normal_(
                    mean=0, std=opt.noiseL / 255.)
                #lrimg[j] = lrimg[j] + noise
                lrimg[j] = lrimg[j]

            # Generate real and fake inputs
            if opt.cuda:
                high_res_real = Variable(hrimg.cuda())
                high_res_fake = netG(Variable(lrimg).cuda())
                target_real = Variable(
                    torch.rand(opt.batchSize, 1) * 0.5 + 0.7).cuda()
                target_fake = Variable(torch.rand(opt.batchSize, 1) *
                                       0.3).cuda()
            else:
                high_res_real = Variable(hrimg)
                high_res_fake = netG(Variable(lrimg))
                target_real = Variable(
                    torch.rand(opt.batchSize, 1) * 0.5 + 0.7)
                target_fake = Variable(torch.rand(opt.batchSize, 1) * 0.3)

            ######### Train discriminator #########
            netD.zero_grad()

            discriminator_loss = adversarial_criterion(netD(high_res_real), target_real) + \
                                 adversarial_criterion(netD(Variable(high_res_fake.data)), target_fake)
            mean_discriminator_loss += discriminator_loss.data

            discriminator_loss.backward()
            optim_discriminator.step()

            ######### Train generator #########
            netG.zero_grad()

            #real_features = Variable(feature_extractor(high_res_real).data)
            #fake_features = feature_extractor(high_res_fake)

            generator_content_loss = content_criterion(
                high_res_fake, high_res_real
            )  #+ 0.006*content_criterion(fake_features, real_features)
            mean_generator_content_loss += generator_content_loss.data
            generator_adversarial_loss = adversarial_criterion(
                netD(high_res_fake), ones_const)
            mean_generator_adversarial_loss += generator_adversarial_loss.data

            generator_total_loss = generator_content_loss + 1e-3 * generator_adversarial_loss
            mean_generator_total_loss += generator_total_loss.data

            generator_total_loss.backward()
            optim_generator.step()

            ######### Status and display #########
            sys.stdout.write(
                '\r[%d/%d][%d/%d] Discriminator_Loss: %.4f Generator_Loss (Content/Advers/Total): %.4f/%.4f/%.4f'
                % (epoch, opt.epochs, i, len(loader_train),
                   discriminator_loss.data, generator_content_loss.data,
                   generator_adversarial_loss.data, generator_total_loss.data))
            #            visualizer.show(low_res, high_res_real.cpu().data, high_res_fake.cpu().data)
            out_train = torch.clamp(high_res_fake, 0., 1.)
            psnr_train, ssim_train = batch_PSNR(out_train,
                                                high_res_real,
                                                scale=3.0,
                                                data_range=1.)
            if step_new % 10 == 0:
                # Log the scalar values
                writer.add_scalar('generator_content_loss',
                                  generator_content_loss.item(), step)
                writer.add_scalar('PSNR on training data', psnr_train, step)
                writer.add_scalar('discriminator_loss',
                                  discriminator_loss.data, step_new)
                writer.add_scalar('generator_adversarial_loss',
                                  generator_adversarial_loss.item(), step_new)
                writer.add_scalar('generator_total_loss', generator_total_loss,
                                  step_new)

            step += 1
            step_new += 1
        sys.stdout.write(
            '\r[%d/%d][%d/%d] Discriminator_Loss: %.4f Generator_Loss (Content/Advers/Total): %.4f/%.4f/%.4f\n'
            %
            (epoch, opt.epochs, i, len(loader_train), mean_discriminator_loss /
             len(loader_train), mean_generator_content_loss /
             len(loader_train), mean_generator_adversarial_loss /
             len(loader_train), mean_generator_total_loss / len(loader_train)))

        # Do checkpointing
        torch.save(netG.state_dict(),
                   '%s/model/generator_final_%d.pth' % (opt.outf, epoch))
        torch.save(netD.state_dict(),
                   '%s/model/discriminator_final%d.pth' % (opt.outf, epoch))

        ## the end of each epoch
        netG.eval()
        # validate
        psnr_val = 0
        ssim_val = 0.0
        val_images = []
        num = 0
        numofex = opt.noiseL

        for index, (lrimg_val, hrimg_val) in enumerate(loader_val):
            #lrimg_val, hrimg_val = dataset_val[k]
            noise = torch.FloatTensor(lrimg_val.size()).normal_(
                mean=0, std=opt.val_noiseL / 255.)
            #lrimgn_val = lrimg_val + noise
            lrimgn_val = lrimg_val
            #lrimgn_val = torch.Tensor(np.expand_dims(lrimgn_val, axis=0))
            #hrimg_val = torch.Tensor(np.expand_dims(hrimg_val, axis=0))
            #lrimg_val = lrimg_val + noise
            hrimg_val, lrimgn_val = Variable(
                hrimg_val.cuda(), volatile=True), Variable(lrimgn_val.cuda(),
                                                           volatile=True)
            #print(lrimgn_val[-1].shape)
            #print(hrimg_val[-1].shape)
            #cv2.imshow('win1', lrimgn_val[-1].detach().cpu().numpy().transpose((1,2,0)))
            #cv2.imshow('win2', hrimg_val[-1].detach().cpu().numpy().transpose((1,2,0)))
            #cv2.waitKey(0)

            out_val = netG(lrimgn_val)
            psnr_val_e, ssim_val_e = batch_PSNR(out_val,
                                                hrimg_val,
                                                scale=3.0,
                                                data_range=1.)
            psnr_val += psnr_val_e
            ssim_val += ssim_val_e
            hrimg_val = np.transpose(hrimg_val[0].detach().cpu().numpy(),
                                     (1, 2, 0))
            out_val = np.transpose(out_val[0].detach().cpu().numpy(),
                                   (1, 2, 0))

            if num < 5:
                num += 1
                #                hrimg_val = hrimg_val[int(hrimg_val.shape[0] / 2) - 160:int(hrimg_val.shape[0] / 2) + 160,
                #                    int(hrimg_val.shape[1] / 2) - 160:int(hrimg_val.shape[1] / 2) + 160]
                #                out_val = out_val[int(out_val.shape[0] / 2) - 160:int(out_val.shape[0] / 2) + 160,
                #                    int(out_val.shape[1] / 2) - 160:int(out_val.shape[1] / 2) + 160]
                val_images.extend([hrimg_val, out_val])

        output_image = make_grid(val_images, nrow=2, nline=1)
        if not os.path.exists('%s/training_results/%d/' % (opt.outf, numofex)):
            os.makedirs('%s/training_results/%d/' % (opt.outf, numofex))
        save_result(output_image,
                    path='%s/training_results/%d/epoch%d.png' %
                    (opt.outf, numofex, epoch))

        psnr_val /= len(dataset_val)
        ssim_val /= len(dataset_val)
        print("\n[epoch %d] PSNR_val: %.4f" % (epoch + 1, psnr_val))
        print("\n[epoch %d] SSIM_val: %.4f" % (epoch + 1, ssim_val))
        writer.add_scalar('PSNR on validation data', psnr_val, epoch)
        writer.add_scalar('SSIM on validation data', ssim_val, epoch)
示例#13
0
    },
    {
        'params': model.conv2.parameters(),
        'lr': 1e-4
    },
    {
        'params': model.conv3.parameters(),
        'lr': 1e-5
    },
],
                      lr=1e-4,
                      momentum=0.9)  #momentum not specified

########## Dataset ###########

train_dataset = TrainDataset(train_dir)
train_dataloader = DataLoader(dataset=train_dataset,
                              batch_size=batch,
                              shuffle=True,
                              num_workers=10,
                              pin_memory=True)
val_dataset = TrainDataset(val_dir)
val_dataloader = DataLoader(
    dataset=val_dataset,
    batch_size=batch_eval,
    shuffle=False,
    num_workers=10,
)

dataloaders = {'train': train_dataloader, 'validation': val_dataloader}
示例#14
0
import h5py
import numpy as np
from torch.utils.data import Dataset
from torch.utils.data.dataloader import DataLoader
from datasets import TrainDataset, EvalDataset
import imageio
from random import randrange

train_dataset = TrainDataset("output/train_holopix50k_small.h5")
train_dataloader = DataLoader(dataset=train_dataset,
                              batch_size=40,
                              shuffle=True,
                              num_workers=10,
                              pin_memory=True)

t = (next(iter(train_dataloader)))
crop_list = []
for k in range(0, 40):
    inputt, label = t

    crop_list.append(label[k, :, :, :])

for i in crop_list:
    jj = i.detach().cpu().numpy()
    jj = jj.squeeze(0)
    jj = jj.transpose()
    imageio.imwrite("img_crops/random_crop_{}.png".format(randrange(1000)), jj)
示例#15
0
def main():
    # Load dataset
    print('Loading dataset ...\n')
    #dataset_train = TrainDataset(1000, opt.batchSize, train=True)
    #dataset_val = ValDataset(train=False)
    dataset_train = TrainDataset('train_BSD500.h5', opt.patch_size, int(opt.upscale_factor[0]))
    print(len(dataset_train))
    dataset_val = EvalDataset('test_BSD500.h5')
    loader_train = DataLoader(dataset=dataset_train, num_workers=1, batch_size=opt.batchSize, shuffle=True)
    loader_val = DataLoader(dataset=dataset_val, batch_size=1)
    print("# of training samples: %d\n" % int(len(dataset_train)))
    # Build model
    netG = make_model(opt)
    print('# generator parameters:', sum(param.numel() for param in netG.parameters()))
    #netG.apply(weights_init_kaiming)
#    content_criterion = nn.MSELoss()
    #feature_extractor = FeatureExtractor(torchvision.models.vgg19(pretrained=True))
    content_criterion = nn.L1Loss()
    
    # Move to GPU
    if torch.cuda.is_available():
        netG.cuda()
        content_criterion.cuda()
    
    optim_rdn = optim.Adam(netG.parameters(), lr=opt.generatorLR)
    # Optimizer
    # training
    writer = SummaryWriter(opt.outf)
    step = 0
#    noiseL_B=[0,55] # ingnored when opt.mode=='S'
    
    # Generator Pretraining(Using MSE Loss)
    for epoch in range(opt.epochs):
        mean_generator_content_loss = 0.0
        mean_generator_PSNRs = 0.0
        mean_generator_SSIMs = 0.0
        for param_group in optim_rdn.param_groups:
            param_group['lr'] = opt.generatorLR * (0.1 ** (epoch // int(opt.epochs * 0.8)))
        for i, (lrimg, hrimg) in enumerate(loader_train):
            # adding noise
            for j in range(opt.batchSize):
                #noise = torch.FloatTensor(lrimg[j].size()).normal_(mean=0.0, std=opt.noiseL/255.)
                #lrimg[j] = lrimg[j] + noise
                lrimg[j] = lrimg[j] 
            # Generate real and fake inputs
            if opt.cuda:
                high_res_real = Variable(hrimg.cuda())
                high_res_fake = netG(Variable(lrimg).cuda())
            else:
                high_res_real = Variable(hrimg)
                high_res_fake = netG(Variable(lrimg))
    
            ######### Train generator #########
            netG.zero_grad()
    
            generator_content_loss = content_criterion(high_res_fake, high_res_real)
            mean_generator_content_loss += generator_content_loss.data
    
            generator_content_loss.backward()
            optim_rdn.step()

                        ######### Status and display #########
            sys.stdout.write('\r[%d/%d][%d/%d] Generator_MSE_Loss: %.4f' % (epoch, opt.epochs, i, len(loader_train), generator_content_loss.data))
          #  visualizer.show(low_res, high_res_real.cpu().data, high_res_fake.cpu().data)
            out_train = torch.clamp(high_res_fake, 0., 1.)
            psnr_train, ssim_train = batch_PSNR(out_train, high_res_real, scale=3, data_range=255.0)
            mean_generator_PSNRs += psnr_train
            mean_generator_SSIMs += ssim_train

            if step % 10 == 0:
                # Log the scalar values
                writer.add_scalar('generator_content_loss', generator_content_loss.item(), step)
                #writer.add_scalar('PSNR on training data', psnr_train, step)
                #writer.add_scalar('SSIM on training data', ssim_train, step)
            step += 1   
          #  sys.stdout.write('\r[%d/%d][%d/%d] PSNR: %.4f, SSIM:%.4f' % (epoch, 2, i, len(loader_train), psnr_train, ssim_train))
    
        psnr_avg_train = mean_generator_PSNRs/len(loader_train)
        ssim_avg_train = mean_generator_SSIMs/len(loader_train)
        sys.stdout.write('\r[%d/%d][%d/%d] Generator_MSE_Loss: %.4f\n' % (epoch, opt.epochs, i, len(loader_train), mean_generator_content_loss/len(loader_train)))
        print("\n[epoch %d] PSNR_train: %.4f" % (epoch+1, psnr_avg_train))
        print("\n[epoch %d] SSIM_train: %.4f" % (epoch+1, ssim_avg_train))
        writer.add_scalar('PSNR on training data', psnr_avg_train, epoch)
        writer.add_scalar('SSIM on training data', ssim_avg_train, epoch)
        #log_value('generator_mse_loss', mean_generator_content_loss/len(dataloader), epoch)
        torch.save(netG.state_dict(), '%s/model/rdn_final_%d.pth'%(opt.outf,epoch))

        ## the end of each epoch
        # netG.eval()
        # validate
        psnr_val = 0
        ssim_val = 0.0
        val_images = []
        num = 0
        numofex=opt.noiseL

        for index, (lrimg_val, hrimg_val) in enumerate(loader_val):
            #lrimg_val, hrimg_val = dataset_val[k]
            #noise = torch.FloatTensor(lrimg_val.size()).normal_(mean=0, std=opt.val_noiseL/255.)
            #lrimgn_val = lrimg_val + noise
            lrimgn_val = lrimg_val 
            #lrimgn_val = torch.Tensor(np.expand_dims(lrimgn_val, axis=0))
            #hrimg_val = torch.Tensor(np.expand_dims(hrimg_val, axis=0))
            hrimg_val, lrimg_val = Variable(hrimg_val.cuda(), volatile=True), Variable(lrimgn_val.cuda(), volatile=True)
            out_val = netG(lrimg_val)
            psnr_val_e, ssim_val_e = batch_PSNR(out_val, hrimg_val, scale=3, data_range=255.0)
            psnr_val += psnr_val_e
            ssim_val += ssim_val_e
            hrimg_val = np.transpose(hrimg_val[0].detach().cpu().numpy(), (1,2,0))
            out_val = np.transpose(out_val[0].detach().cpu().numpy(),(1,2,0))
            
            if num<5:
                num+=1
#                hrimg_val = hrimg_val[int(hrimg_val.shape[0] / 2) - 160:int(hrimg_val.shape[0] / 2) + 160,
#                    int(hrimg_val.shape[1] / 2) - 160:int(hrimg_val.shape[1] / 2) + 160]
#                out_val = out_val[int(out_val.shape[0] / 2) - 160:int(out_val.shape[0] / 2) + 160,
#                    int(out_val.shape[1] / 2) - 160:int(out_val.shape[1] / 2) + 160]
                val_images.extend([hrimg_val,out_val])


        output_image=make_grid(val_images,nrow=2,nline=1)
        if not os.path.exists('%s/training_results/%d/' % (opt.outf, numofex)):
            os.makedirs('%s/training_results/%d/' % (opt.outf, numofex))
        save_result(output_image,path='%s/training_results/%d/epoch%d.png' % (opt.outf,numofex,epoch))

        psnr_val /= len(dataset_val)
        ssim_val /= len(dataset_val)
        print("\n[epoch %d] PSNR_val: %.4f" % (epoch+1, psnr_val))
        print("\n[epoch %d] SSIM_val: %.4f" % (epoch+1, ssim_val))
        writer.add_scalar('PSNR on validation data', psnr_val, epoch)
        writer.add_scalar('SSIM on validation data', ssim_val, epoch)
示例#16
0
    cudnn.benchmark = True
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    torch.manual_seed(args.seed)

    model = ESPCN(scale_factor=args.scale).to(device)
    criterion = nn.MSELoss()
    optimizer = optim.Adam([{
        'params': model.first_part.parameters()
    }, {
        'params': model.last_part.parameters(),
        'lr': args.lr * 0.1
    }],
                           lr=args.lr)

    train_dataset = TrainDataset(args.train_file)
    train_dataloader = DataLoader(dataset=train_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=args.num_workers,
                                  pin_memory=True)
    eval_dataset = EvalDataset(args.eval_file)
    eval_dataloader = DataLoader(dataset=eval_dataset, batch_size=1)

    best_weights = copy.deepcopy(model.state_dict())
    best_epoch = 0
    best_psnr = 0.0

    for epoch in range(args.num_epochs):
        for param_group in optimizer.param_groups:
            param_group['lr'] = args.lr * (0.1**(epoch //
示例#17
0
                growth_rate=args.growth_rate,
                num_blocks=args.num_blocks,
                num_layers=args.num_layers).to(device)

    if args.weights_file is not None:
        state_dict = model.state_dict()
        for n, p in torch.load(args.weights_file, map_location=lambda storage, loc: storage).items():
            if n in state_dict.keys():
                state_dict[n].copy_(p)
            else:
                raise KeyError(n)

    criterion = nn.L1Loss()
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    train_dataset = TrainDataset(args.train_file, patch_size=args.patch_size, scale=args.scale)
    train_dataloader = DataLoader(dataset=train_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=args.num_workers,
                                  pin_memory=True)
    eval_dataset = EvalDataset(args.eval_file)
    eval_dataloader = DataLoader(dataset=eval_dataset, batch_size=1)

    best_weights = copy.deepcopy(model.state_dict())
    best_epoch = 0
    best_psnr = 0.0

    for epoch in range(args.num_epochs):
        for param_group in optimizer.param_groups:
            param_group['lr'] = args.lr * (0.1 ** (epoch // int(args.num_epochs * 0.8)))