Пример #1
0
                    default=123,
                    help='random seed to use. Default=123')
opt = parser.parse_args()

print(opt)

cuda = opt.cuda
if cuda and not torch.cuda.is_available():
    raise Exception("No GPU found, please run without --cuda")

torch.manual_seed(opt.seed)
if cuda:
    torch.cuda.manual_seed(opt.seed)

print('===> Loading datasets')
train_set = get_training_set(opt.upscale_factor)
test_set = get_test_set(opt.upscale_factor)
training_data_loader = DataLoader(dataset=train_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.batchSize,
                                  shuffle=True)
testing_data_loader = DataLoader(dataset=test_set,
                                 num_workers=opt.threads,
                                 batch_size=opt.testBatchSize,
                                 shuffle=False)

print('===> Building model')
model = Net(upscale_factor=opt.upscale_factor)
criterion = nn.MSELoss()

if cuda:
Пример #2
0
                    help='root of DataSet')
opt = parser.parse_args()

print(opt)

cuda = opt.cuda
if cuda and not torch.cuda.is_available():
    raise Exception("No GPU found, please run without --cuda")

torch.manual_seed(opt.seed)
if cuda:
    torch.cuda.manual_seed(opt.seed)
device = torch.device("cuda" if opt.cuda else "cpu")

print('===> Loading datasets')
train_set = get_training_set(opt.dataset)
val_set = get_val_set(opt.dataset)
training_data_loader = DataLoader(dataset=train_set,
                                  batch_size=opt.batchSize,
                                  shuffle=True)
val_data_loader = DataLoader(dataset=val_set,
                             batch_size=opt.valBatchSize,
                             shuffle=False)

print('===> Building model')
model = LapSRN().to(device)
Loss = Loss()
criterion = nn.MSELoss()
if cuda:
    Loss = Loss.cuda()
    criterion = criterion.cuda()
Пример #3
0
image_size = 256 #入力画像全体のサイズ

print(opt)

if opt.cuda and not torch.cuda.is_available():
  raise Exception("No GPU found, please run without --cuda")

cudnn.benchmark = True

torch.manual_seed(opt.seed)
if opt.cuda:
  torch.cuda.manual_seed(opt.seed)

print('===> Loading datasets')
root_path            = "dataset/"
train_set            = get_training_set(root_path + opt.dataset)
test_set             = get_test_set(root_path + opt.dataset)
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=False)
testing_data_loader  = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False)

max_dataset_num = 1500 #データセットの数

train_set.image_filenames = train_set.image_filenames[:max_dataset_num]
test_set.image_filenames = test_set.image_filenames[:max_dataset_num]

print('===> Building model')

#先ず学習済みのGeneratorを読み込んで入れる
netG = torch.load(opt.G_model)

disc_input_nc = 4
Пример #4
0
    def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 64
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.model_name = args.gan_type

        # EBGAN parameters
        self.pt_loss_weight = 0.1
        self.margin = max(1, self.batch_size / 64.)  # margin for loss function
        # usually margin of 1 is enough, but for large batch size it must be larger than 1

        # networks init
        self.G = generator(self.dataset)
        self.D = discriminator(self.dataset)
        self.G_optimizer = optim.Adam(self.G.parameters(),
                                      lr=args.lrG,
                                      betas=(args.beta1, args.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(),
                                      lr=args.lrD,
                                      betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()
            self.MSE_loss = nn.MSELoss().cuda()
        else:
            self.MSE_loss = nn.MSELoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        print('-----------------------------------------------')

        # load dataset
        if self.dataset == 'mnist':
            self.data_loader = DataLoader(datasets.MNIST(
                'data/mnist',
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'fashion-mnist':
            self.data_loader = DataLoader(datasets.FashionMNIST(
                'data/fashion-mnist',
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'celebA':

            train_set = get_training_set(
                '/home/tmp_data_dir/zhaoyu/CelebA/img_align_celeba/',
                '/home/tmp_data_dir/zhaoyu/CelebA/img_align_celeba/')
            # train_set = get_training_set('/home/xujinchang/pytorch-CycleGAN-and-pix2pix/datasets/celeA_part/train/', '/home/xujinchang/pytorch-CycleGAN-and-pix2pix/datasets/celeA_part/train/')
            self.data_loader = DataLoader(dataset=train_set,
                                          batch_size=self.batch_size,
                                          shuffle=True)
            # self.data_loader = utils.load_celebA('data/celebA', transform=transforms.Compose(
            #     [transforms.CenterCrop(160), transforms.Scale(64), transforms.ToTensor()]), batch_size=self.batch_size,
            #                                      shuffle=True)
        self.z_dim = 62

        # fixed noise
        if self.gpu_mode:
            self.sample_z_ = Variable(torch.rand(
                (self.batch_size, self.z_dim)).cuda(),
                                      volatile=True)
        else:
            self.sample_z_ = Variable(torch.rand(
                (self.batch_size, self.z_dim)),
                                      volatile=True)
def main():

	print(f"epoch: {opt.niter+opt.niter_decay}")
	print(f"cuda: {opt.cuda}")
	print(f"dataset: {opt.dataset}")
	print(f"output: {opt.output_path}")

	if opt.cuda and not torch.cuda.is_available():
		raise Exception("No GPU found, please run without --cuda")

	cudnn.benchmark = True

	torch.manual_seed(opt.seed)
	if opt.cuda:
		torch.cuda.manual_seed(opt.seed)

	print('Loading datasets')
	train_set = get_training_set(root_path + opt.dataset, opt.direction)
	test_set = get_test_set(root_path + opt.dataset, opt.direction)

	training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batch_size, shuffle=True)
	testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.test_batch_size, shuffle=False)

	device = torch.device("cuda:0" if opt.cuda else "cpu")

	print('Building models')
	net_g = define_G(opt.input_nc, opt.output_nc, opt.g_ch, len(class_name_array), 'batch', False, 'normal', 0.02, gpu_id=device)
	net_d = define_D(opt.input_nc + opt.output_nc, opt.d_ch, len(class_name_array), 'basic', gpu_id=device)

	criterionGAN = GANLoss().to(device)
	criterionL1 = nn.L1Loss().to(device)
	criterionMSE = nn.MSELoss().to(device)

	# setup optimizer
	optimizer_g = optim.Adam(net_g.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
	optimizer_d = optim.Adam(net_d.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
	net_g_scheduler = get_scheduler(optimizer_g, opt)
	net_d_scheduler = get_scheduler(optimizer_d, opt)

	start_time = time.time()

	#save loss
	G_loss_array = []
	D_loss_array = []
	epoch_array = []

	for epoch in tqdm(range(opt.epoch_count, opt.niter + opt.niter_decay + 1), desc="Epoch"):
		# train
		loss_g_sum = 0
		loss_d_sum = 0
		for iteration, batch in enumerate(tqdm(training_data_loader, desc="Batch"), 1):
			# forward
			real_a, real_b, class_label, _ = batch[0].to(device), batch[1].to(device), batch[2].to(device), batch[3][0]
			fake_b = net_g(real_a, class_label)

			######################
			# (1) Update D network
			######################

			optimizer_d.zero_grad()
			
			# train with fake
			if opt.padding:
				real_a_for_d = padding(real_a)
				real_b_for_d = padding(real_b)
				fake_b_for_d = padding(fake_b)
			else:
				real_a_for_d = real_a
				real_b_for_d = real_b
				fake_b_for_d = fake_b
			
			fake_ab = torch.cat((real_a_for_d, fake_b_for_d), 1)
			pred_fake = net_d.forward(fake_ab.detach(), class_label)
			loss_d_fake = criterionGAN(pred_fake, False)

			# train with real
			real_ab = torch.cat((real_a_for_d, real_b_for_d), 1)
			pred_real = net_d.forward(real_ab, class_label)
			loss_d_real = criterionGAN(pred_real, True)
			
			# Combined D loss
			loss_d = (loss_d_fake + loss_d_real) * 0.5

			loss_d.backward()
		   
			optimizer_d.step()

			######################
			# (2) Update G network
			######################

			optimizer_g.zero_grad()

			# First, G(A) should fake the discriminator
			fake_ab = torch.cat((real_a_for_d, fake_b_for_d), 1)
			pred_fake = net_d.forward(fake_ab, class_label)
			loss_g_gan = criterionGAN(pred_fake, True)

			# Second, G(A) = B
			loss_g_l1 = criterionL1(fake_b, real_b) * opt.lamb
			
			loss_g = loss_g_gan + loss_g_l1
			
			loss_g.backward()

			optimizer_g.step()
			loss_d_sum += loss_d.item()
			loss_g_sum += loss_g.item()

		update_learning_rate(net_g_scheduler, optimizer_g)
		update_learning_rate(net_d_scheduler, optimizer_d)
		
		# test
		avg_psnr = 0
		dst = Image.new('RGB', (512*4, 256*4))
		n = 0
		for batch in tqdm(testing_data_loader, desc="Batch"):
			input, target, class_label, _ = batch[0].to(device), batch[1].to(device), batch[2].to(device), batch[3][0]

			prediction = net_g(input, class_label)
			mse = criterionMSE(prediction, target)
			psnr = 10 * log10(1 / mse.item())
			avg_psnr += psnr
			
			n += 1
			if n <= 16:
				#make test preview
				out_img = prediction.detach().squeeze(0).cpu()
				image_numpy = out_img.float().numpy()
				image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
				image_numpy = image_numpy.clip(0, 255)
				image_numpy = image_numpy.astype(np.uint8)
				image_pil = Image.fromarray(image_numpy)
				dst.paste(image_pil, ((n-1)%4*512, (n-1)//4*256))
				
		if not os.path.exists("results"):
			os.mkdir("results")
		if not os.path.exists(os.path.join("results", opt.output_path)):
			os.mkdir(os.path.join("results", opt.output_path))
		dst.save(f"results/{opt.output_path}/epoch{epoch}_test_preview.jpg")
		
		epoch_array += [epoch]
		G_loss_array += [loss_g_sum/len(training_data_loader)]
		D_loss_array += [loss_d_sum/len(training_data_loader)]
		
		if opt.graph_save_while_training and len(epoch_array) > 1:
			output_graph(epoch_array, G_loss_array, D_loss_array, False)
		
		#checkpoint
		if epoch % opt.save_interval == 0:
			if not os.path.exists("checkpoint"):
				os.mkdir("checkpoint")
			if not os.path.exists(os.path.join("checkpoint", opt.output_path)):
				os.mkdir(os.path.join("checkpoint", opt.output_path))
			net_g_model_out_path = "checkpoint/{}/netG_model_epoch_{}.pth".format(opt.output_path, epoch)
			net_d_model_out_path = "checkpoint/{}/netD_model_epoch_{}.pth".format(opt.output_path, epoch)
			torch.save(net_g, net_g_model_out_path)
			torch.save(net_d, net_d_model_out_path)

	#save the latest net
	if not os.path.exists("checkpoint"):
		os.mkdir("checkpoint")
	if not os.path.exists(os.path.join("checkpoint", opt.output_path)):
		os.mkdir(os.path.join("checkpoint", opt.output_path))
	net_g_model_out_path = "checkpoint/{}/netG_model_epoch_{}.pth".format(opt.output_path, opt.niter + opt.niter_decay)
	net_d_model_out_path = "checkpoint/{}/netD_model_epoch_{}.pth".format(opt.output_path, opt.niter + opt.niter_decay)
	torch.save(net_g, net_g_model_out_path)
	torch.save(net_d, net_d_model_out_path)
	print("\nCheckpoint saved to {}".format("checkpoint/" + opt.output_path))

	# output loss graph
	output_graph(epoch_array, G_loss_array, D_loss_array)

	# finish training
	now_time = time.time()
	t = now_time - start_time
	print(f"Training time: {t/60:.1f}m")
Пример #6
0
                    help='random seed to use. Default=123')
opt = parser.parse_args()

print(opt)

if opt.cuda and not torch.cuda.is_available():
    raise Exception("No GPU found, please run without --cuda")

torch.manual_seed(opt.seed)

device = torch.device("cuda" if opt.cuda else "cpu")

data_dir = 'dataset/BSD500/images'

print('===> Loading datasets')
train_set = get_training_set(data_dir, opt.upscale_factor)
test_set = get_test_set(data_dir, opt.upscale_factor)
training_data_loader = DataLoader(dataset=train_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.batchSize,
                                  shuffle=True)
testing_data_loader = DataLoader(dataset=test_set,
                                 num_workers=opt.threads,
                                 batch_size=opt.testBatchSize,
                                 shuffle=False)

print('===> Building model')
model = Net(upscale_factor=opt.upscale_factor).to(device)
criterion_mse = nn.MSELoss()
criterion = nn.L1Loss()
Пример #7
0
    model_out_path = opt.save_folder + opt.model_type + "_epoch_{}.pth".format(
        epoch)
    torch.save(model.state_dict(), model_out_path)
    print("Checkpoint saved to {}".format(model_out_path))


cuda = opt.gpu_mode
if cuda and not torch.cuda.is_available():
    raise Exception("No GPU found, please run without --cuda")

torch.manual_seed(opt.seed)
if cuda:
    torch.cuda.manual_seed(opt.seed)

print('===> Loading datasets')
train_set = get_training_set(opt.data_dir, opt.upscale_factor, opt.patch_size,
                             opt.data_augmentation)
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)

print('===> Building model ', opt.model_type)

model = VAE_denoise(input_dim=3, dim=32, feat_size=8, z_dim=512, prior='standard', number_component=512)

HR_feat_extractor = VGGFeatureExtractor(feature_layer=36, use_bn=False, use_input_norm=True, device='cuda')

model = torch.nn.DataParallel(model)
HR_feat_extractor = torch.nn.DataParallel(HR_feat_extractor)

L1_criterion = nn.L1Loss() #sum for VAE
L2_criterion = nn.MSELoss()

Пример #8
0
                    default="0",
                    help='GPU to use. Default=0')
# parser.add_argument('--logDir', type=str, default="Log", help='Log directory.')
opt = parser.parse_args()

print(opt)

device = torch.device("cuda")

os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu
device = torch.device("cuda")

torch.manual_seed(opt.seed)

print('===> Loading datasets')
train_set = get_training_set()
val_set = get_val_set()
training_data_loader = DataLoader(dataset=train_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.batchSize,
                                  shuffle=True)
validating_data_loader = DataLoader(dataset=val_set,
                                    num_workers=opt.threads,
                                    batch_size=opt.valBatchSize,
                                    shuffle=False)

print('===> Building model')

# model = models.vgg16(pretrained=True)
# model.classifier = Net().classifier
model = models.resnet50(pretrained=True)
Пример #9
0
def main():
    global opt
    opt = parser.parse_args()
    opt.gpuids = list(map(int, opt.gpuids))

    print(opt)

    if opt.cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    cudnn.benchmark = True

    if not opt.test:
        train_set = get_training_set(opt.dataset, opt.crop_size,
                                     opt.upscale_factor, opt.add_noise,
                                     opt.noise_std)
        validation_set = get_validation_set(opt.dataset, opt.crop_size,
                                            opt.upscale_factor)

    test_set = get_test_set(opt.dataset, opt.crop_size, opt.upscale_factor)

    if not opt.test:
        training_data_loader = DataLoader(dataset=train_set,
                                          num_workers=opt.threads,
                                          batch_size=opt.batch_size,
                                          shuffle=True)
        validating_data_loader = DataLoader(dataset=validation_set,
                                            num_workers=opt.threads,
                                            batch_size=opt.test_batch_size,
                                            shuffle=False)

    testing_data_loader = DataLoader(dataset=test_set,
                                     num_workers=opt.threads,
                                     batch_size=opt.test_batch_size,
                                     shuffle=False)

    model = VDSR()
    criterion = nn.MSELoss()

    if opt.cuda:
        torch.cuda.set_device(opt.gpuids[0])
        with torch.cuda.device(opt.gpuids[0]):
            model = model.cuda()
            criterion = criterion.cuda()
        model = nn.DataParallel(model,
                                device_ids=opt.gpuids,
                                output_device=opt.gpuids[0])

    optimizer = optim.Adam(model.parameters(),
                           lr=opt.lr,
                           weight_decay=opt.weight_decay)

    if opt.test:
        model_name = join("model", opt.model)
        model = torch.load(model_name)
        start_time = time.time()
        test(model, criterion, testing_data_loader)
        elapsed_time = time.time() - start_time
        print("===> average {:.2f} image/sec for test".format(100.0 /
                                                              elapsed_time))
        return

    train_time = 0.0
    validate_time = 0.0
    for epoch in range(1, opt.epochs + 1):
        start_time = time.time()
        train(model, criterion, epoch, optimizer, training_data_loader)
        elapsed_time = time.time() - start_time
        train_time += elapsed_time
        print("===> {:.2f} seconds to train this epoch".format(elapsed_time))
        start_time = time.time()
        validate(model, criterion, validating_data_loader)
        elapsed_time = time.time() - start_time
        validate_time += elapsed_time
        print(
            "===> {:.2f} seconds to validate this epoch".format(elapsed_time))
        if epoch % 10 == 0:
            checkpoint(model, epoch)

    print("===> average training time per epoch: {:.2f} seconds".format(
        train_time / opt.epochs))
    print("===> average validation time per epoch: {:.2f} seconds".format(
        validate_time / opt.epochs))
    print("===> training time: {:.2f} seconds".format(train_time))
    print("===> validation time: {:.2f} seconds".format(validate_time))
    print("===> total training time: {:.2f} seconds".format(train_time +
                                                            validate_time))
Пример #10
0
def main():
    global opt
    opt = parser.parse_args()
    opt.gpuids = list(map(int, opt.gpuids))

    print(opt)

    if opt.cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    cudnn.benchmark = True

    if not opt.test:
        train_set = get_training_set(opt.dataset, opt.upscale_factor, opt.crop)
        validation_set = get_validation_set(opt.dataset, opt.upscale_factor)

    test_set = get_test_set(opt.dataset, opt.upscale_factor)

    if not opt.test:
        training_data_loader = DataLoader(dataset=train_set,
                                          num_workers=opt.threads,
                                          batch_size=opt.batch_size,
                                          shuffle=True)
        validating_data_loader = DataLoader(dataset=validation_set,
                                            num_workers=opt.threads,
                                            batch_size=opt.test_batch_size,
                                            shuffle=False)

    testing_data_loader = DataLoader(dataset=test_set,
                                     num_workers=opt.threads,
                                     batch_size=opt.test_batch_size,
                                     shuffle=False)

    model = SVLRM()
    criterion1 = CharnonnierLoss()
    criterion2 = nn.MSELoss()
    Loss = []
    PSNR = []
    RMSE = []

    if opt.cuda:
        torch.cuda.set_device(opt.gpuids[0])
        with torch.cuda.device(opt.gpuids[0]):
            model = model.cuda()
            criterion1 = criterion1.cuda()
            criterion2 = criterion2.cuda()
        model = nn.DataParallel(model,
                                device_ids=opt.gpuids,
                                output_device=opt.gpuids[0])

    optimizer = optim.Adam(model.parameters(),
                           eps=opt.eps,
                           weight_decay=opt.weight_decay)

    if opt.test:
        model_name = join("model", opt.model)
        model = torch.load(model_name)
        model.eval()
        start_time = time.time()
        test(model, criterion2, testing_data_loader)
        elapsed_time = time.time() - start_time
        print("===> average {:.2f} image/sec for test".format(100.0 /
                                                              elapsed_time))
        return

    train_time = 0.0
    validate_time = 0.0
    for epoch in range(1, opt.epochs + 1):
        start_time = time.time()
        train(model, criterion1, epoch, optimizer, training_data_loader, Loss)
        elapsed_time = time.time() - start_time
        train_time += elapsed_time
        print("===> {:.2f} seconds to train this epoch".format(elapsed_time))
        if epoch % 50 == 0:
            start_time = time.time()
            validate(model, criterion2, validating_data_loader, PSNR, RMSE)
            elapsed_time = time.time() - start_time
            validate_time += elapsed_time
            print("===> {:.2f} seconds to validate this epoch".format(
                elapsed_time))
            checkpoint(model, epoch)

    print("===> average training time per epoch: {:.2f} seconds".format(
        train_time / opt.epochs))
    print("===> average validation time per epoch: {:.2f} seconds".format(
        validate_time / opt.epochs))
    print("===> training time: {:.2f} seconds".format(train_time))
    print("===> validation time: {:.2f} seconds".format(validate_time))
    print("===> total training time: {:.2f} seconds".format(train_time +
                                                            validate_time))
    plt.figure(figsize=(15, 5))
    plt.subplot(131)  # 1行3列,第一个图
    plt.plot(Loss)
    plt.ylabel('Loss')
    plt.xlabel('epochs')
    plt.subplot(132)  # 1行3列.第二个图
    plt.plot(PSNR)
    plt.ylabel('PSNR')
    plt.xlabel('epochsX50')
    plt.subplot(133)  # 1行3列.第3个图
    plt.plot(RMSE)
    plt.ylabel('RMSE')
    plt.xlabel('epochsX50')
    plt.savefig("Loss_PSNR_RMSE.jpg")
    '''
    file1 = open('Loss.txt','w')
    for item in Loss:
        file1.write(str(item)+"\n")
    file1.close()
    '''
    file2 = open('PSNR.txt', 'w')
    for item in PSNR:
        file2.write(str(item) + "\n")
    file2.close()
Пример #11
0
        epoch)
    torch.save(model.state_dict(), model_out_path)
    print("Checkpoint saved to {}".format(model_out_path))


cuda = opt.gpu_mode
if cuda and not torch.cuda.is_available():
    raise Exception("No GPU found, please run without --cuda")

torch.manual_seed(opt.seed)
if cuda:
    torch.cuda.manual_seed(opt.seed)

print('==> Loading datasets')
train_set = get_training_set(opt.data_dir, opt.nFrames, opt.upscale_factor,
                             opt.data_augmentation, opt.file_list,
                             opt.other_dataset, opt.patch_size,
                             opt.future_frame)
#test_set = get_eval_set(opt.test_dir, opt.nFrames, opt.upscale_factor, opt.data_augmentation)
training_data_loader = DataLoader(dataset=train_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.batchSize,
                                  shuffle=True)
#testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=False)

print('==> Building model ', opt.model_type)
if opt.model_type == 'RBPN':
    model = RBPN(num_channels=3,
                 base_filter=256,
                 feat=64,
                 num_stages=3,
                 n_resblock=5,
Пример #12
0
    # Create folder to log.
    log_dir = os.path.join(
        'runs',
        os.path.basename(args.config)[:-5] + '_' + str(seed))
    writer = SummaryWriter(log_dir=log_dir)

    # Create folder to store checkpoints.
    os.makedirs(os.path.join(config['training']['checkpoint_folder'],
                             os.path.basename(args.config)[:-5]),
                exist_ok=True)

    print('===> Loading datasets')
    sys.stdout.flush()
    train_set = get_training_set(
        img_dir=config['data']['train_root'],
        upscale_factor=config['model']['upscale_factor'],
        img_channels=config['model']['img_channels'],
        crop_size=config['data']['lr_crop_size'] *
        config['model']['upscale_factor'])
    train_dataloader = DataLoader(dataset=train_set,
                                  batch_size=config['training']['batch_size'],
                                  shuffle=True)

    val_set = get_val_set(img_dir=config['data']['test_root'],
                          upscale_factor=config['model']['upscale_factor'],
                          img_channels=config['model']['img_channels'])
    val_dataloader = DataLoader(dataset=val_set, batch_size=1, shuffle=False)

    print('===> Building model')
    sys.stdout.flush()
    model = FSRCNN(img_channels=config['model']['img_channels'],
                   upscale_factor=config['model']['upscale_factor']).to(device)
Пример #13
0
#     def __init__(self, indices):
#         self.indices = indices
#
#     def __iter__(self):
#         return (self.indices[i] for i in range(len(self.indices)))
#
#     def __len__(self):
#         return len(self.indices)

# newlist = [i for i in range(70000)]
# # print(newlist)
# train_sampler = MYSampler(indices=newlist)

print('===> Loading datasets')
root_path = "/data/JXR/VSRC/dataset/data/"
train_set = get_training_set(root_path)
# test_set = get_test_set(root_path)
training_data_loader = DataLoader(dataset=train_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.batchSize,
                                  shuffle=False,
                                  pin_memory=True)
# testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False)

print('===> Building model')
# netG = define_G(opt.input_nc, opt.output_nc, opt.ngf, 'batch', False, [0])
netG = EDVR_arch.EDVR(nf=64,
                      nframes=7,
                      groups=8,
                      front_RBs=5,
                      back_RBs=10,
Пример #14
0
def main():

    global opt, model
    opt = parser.parse_args()
    print opt

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    train_set = get_training_set(opt.train_dir)
    #train_set = DatasetFromHdf5("data/lap_pry_x4_small.h5")
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      shuffle=True)

    print("===> Building model")
    model = Net(opt.scale_factor)
    criterion = L1_Charbonnier_loss()

    print("===> Setting GPU")
    if cuda:
        model = model.cuda()
        criterion = criterion.cuda()
    else:
        model = model.cpu()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['model'].state_dict())
        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("===> Setting Optimizer")
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, optimizer, model, criterion, epoch)
        save_checkpoint(model, epoch)
Пример #15
0
opt = parser.parse_args()

print(opt)

if opt.cuda and not torch.cuda.is_available():
    raise Exception("No GPU found, please run without --cuda")

cudnn.benchmark = True

torch.manual_seed(opt.seed)
if opt.cuda:
    torch.cuda.manual_seed(opt.seed)

print('===> Loading datasets')
root_path = "dataset/"
train_set = get_training_set(root_path + opt.dataset)
test_set = get_test_set(root_path + opt.dataset)
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False)

print('===> Building model')
netG = G(opt.input_nc, opt.output_nc, opt.ngf)
pytorch_total_params = sum(p.numel() for p in netG.parameters() if p.requires_grad)
print ("\nTrainable parameters", pytorch_total_params)
#netG.apply(weights_init)
netD = D(opt.input_nc, opt.output_nc, opt.ndf)
#netD.apply(weights_init)

criterion = nn.BCELoss()
criterion_l1 = nn.L1Loss()
criterion_mse = nn.MSELoss()
Пример #16
0
opt = parser.parse_args()

print(opt)

if opt.cuda and not torch.cuda.is_available():
    raise Exception("No GPU found, please run without --cuda")

cudnn.benchmark = True

torch.manual_seed(opt.seed)
if opt.cuda:
    torch.cuda.manual_seed(opt.seed)

print('===> Loading datasets')
root_path = opt.dataset_root
train_set = get_training_set(root_path + opt.dataset, opt.direction)
test_set = get_test_set(root_path + opt.dataset, opt.direction)
training_data_loader = DataLoader(dataset=train_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.batch_size,
                                  shuffle=True)
testing_data_loader = DataLoader(dataset=test_set,
                                 num_workers=opt.threads,
                                 batch_size=opt.test_batch_size,
                                 shuffle=False)

device = torch.device("cuda:0" if opt.cuda else "cpu")

print('===> Building models')
net_g = define_G(opt.input_nc,
                 opt.output_nc,
Пример #17
0
parser.add_argument('--cuda', action='store_true', help='use cuda?')
parser.add_argument('--threads', type=int, default=4, help='number of threads for data loader to use')
parser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')
opt = parser.parse_args()

print(opt)

if opt.cuda and not torch.cuda.is_available():
    raise Exception("No GPU found, please run without --cuda")

torch.manual_seed(opt.seed)

device = torch.device("cuda" if opt.cuda else "cpu")

print('===> Loading datasets')
train_set = get_training_set(opt.upscale_factor)
test_set = get_test_set(opt.upscale_factor)
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False)

print('===> Building model')
model = Net(upscale_factor=opt.upscale_factor).to(device)
criterion = nn.MSELoss()

optimizer = optim.Adam(model.parameters(), lr=opt.lr)


def train(epoch):
    epoch_loss = 0
    for iteration, batch in enumerate(training_data_loader, 1):
        input, target = batch[0].to(device), batch[1].to(device)
Пример #18
0
                    type=int,
                    default=64,
                    help='generator filters in first conv layer')
parser.add_argument('--ndf',
                    type=int,
                    default=64,
                    help='discriminator filters in first conv layer')
args = parser.parse_args()

print('GPU: {}'.format(args.gpu))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))

print('===> Loading datasets')
root_path = "dataset/"
train_set = get_training_set(root_path + args.dataset)
test_set = get_test_set(root_path + args.dataset)

# for iteration, batch in enumerate(train_set, 1):
#     print("iteration", iteration)
#     print(batch[0].shape)
#     print(batch[1].shape)
#     break

print('===> Building model')
encoderdecoder_model = EncoderDecoder(args.input_nc, args.output_nc, args.ngf)
discriminator_model = Discriminator(args.input_nc, args.output_nc, args.ngf)

if args.gpu >= 0:
    print("use gpu")
    chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current