Esempio n. 1
0
def compute_mean(args):
    dsetTrain = SRDataset(args.highresdir, args.lowresdir).train_data
    dsetTrain = dsetTrain.astype(np.float32) / 255

    print dsetTrain.shape
    mean = []
    std = []

    for i in range(1):
        pixels = dsetTrain[:, :, i].ravel()
        mean.append(np.mean(pixels))
        std.append(np.std(pixels))
    print("means: {}".format(mean))
    print("stdevs: {}".format(std))
Esempio n. 2
0
def prepare_data(sr_dir, lr_dir, patch_size, batch_size, mode='train', shuffle=True):
    if mode is 'val':
        transform = co_transforms.Compose(
            [co_transforms.Grayscale(),
             # co_transforms.RandomCrop(patch_size, patch_size),
             co_transforms.ToTensor(), ])
    #             co_transforms.Normalize(mean=[0.3787], std=[0.2464])])

    elif mode is 'train':
        transform = co_transforms.Compose(
            [co_transforms.Grayscale(),
             # co_transforms.RandomCrop(patch_size, patch_size),
             co_transforms.RandomHorizontalFlip(),
             co_transforms.RandomVerticalFlip(),
             co_transforms.ToTensor(), ])
        #            co_transforms.Normalize(mean=[0.3787], std=[0.2464])])
    else:
        transform = co_transforms.Compose(
            [co_transforms.Grayscale(),
             co_transforms.ToTensor(), ])
    #            co_transforms.Normalize(mean=[0.3787], std=[0.2464])])

    dset = SRDataset(highres_root=sr_dir, lowres_root=lr_dir, transform=transform)
    dloader = data.DataLoader(dset, batch_size=batch_size, shuffle=shuffle)
    return dloader
Esempio n. 3
0
 def train_dataloader(self):
     data = SRDataset(
         os.path.join(self.root_dir, self.cfg.datasets.train_path))
     train_len = int(len(data) * 0.8)
     val_len = int(len(data) - train_len)
     self.sr_train, self.sr_val = random_split(data, [train_len, val_len])
     return DataLoader(self.sr_train,
                       batch_size=self.cfg.batch_size,
                       shuffle=True)
Esempio n. 4
0
def get_test_loader(args):
    """Create dataset and return dataset loader of test Dataset.

	Args:
		args.data_dir: The directory of the dataset image.
		args.objects_dir: The directory of the ROI object extracted from image by Faster RCNN.
		args.test_list: The file path of annotation list with the unit of content: image_id, box1, box2, label.
		args.scale_size: Scale size of transform.
		args.crop_size: Crop size of trnasform.
		args.workers: The workers number.
		args.batch_size: The batch size to load sample.

	Returns:
		test_laoder: [torch.utils.data.Loader] loader data in batch size.

	"""

    data_dir = args.data
    test_list = args.testlist
    objects_dir = args.objects
    scale_size = args.scale_size
    crop_size = args.crop_size
    workers = args.workers
    batch_size = args.batch_size

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    test_data_transform = transforms.Compose([
        transforms.Resize((crop_size, crop_size)),
        transforms.ToTensor(), normalize
    ])  # what about horizontal flip

    test_full_transform = transforms.Compose(
        [transforms.Resize((448, 448)),
         transforms.ToTensor(), normalize])  # what about horizontal flip

    test_set = SRDataset(data_dir, objects_dir, test_list, test_data_transform,
                         test_full_transform)
    test_loader = DataLoader(dataset=test_set,
                             num_workers=workers,
                             batch_size=batch_size,
                             shuffle=False)
    return test_loader
Esempio n. 5
0
File: SR.py Progetto: sldz5/SR
def valid(args, net=None):  ##siamese_net=None,
    net.eval()
    print("valid time is {} ".format(
        time.strftime('%H:%M:%S', time.localtime(time.time()))))
    ##model_file_s = "./models/b{}.e{}.pkl".format(args.batch_size,args.epoch)  #保存siamese模型
    #model_file = "./models/b{}.e{}.pkl".format(args.batch_size,args.epoch)  #保存net模型
    ds_val = SRDataset(dataset_path, set='valid')
    valid_loader = torch.utils.data.DataLoader(dataset=ds_val,
                                               batch_size=1,
                                               num_workers=args.nb_worker,
                                               shuffle=False)  #
    ##if not siamese_net:
    ##    siamese_net = torch.load(model_file_s)
    if not net:
        net = torch.load(model_file)

    print("Loaded {} valid data.".format(len(ds_val)))
    if args.cuda:
        #siamese_net = siamese_net.cuda(0)
        net = net.cuda(0)

    total_loss = []
    total_psnr = []
    total_ssim = []
    #num_epochs = args.epoch
    for i, (images, gts, base_name) in enumerate(valid_loader):  ##,labels
        if args.cuda:
            images = Variable(images.cuda(0))
            gts = Variable(gts.cuda(0))
        outputs = net(images)

        loss_f = criterion(outputs, gts)
        total_loss.append(loss_f.item())  ##data[0]
        outputs = tensor_to_PIL(outputs)
        gts = tensor_to_PIL(gts)
        path = "./predict/" + base_name[0] + ".png"
        outputs.save(path)

        squared_error = np.square(_open_img(gts) - _open_img(outputs))
        mse = np.mean(squared_error)
        psnr = 10 * np.log10(1.0 / mse)
        #print("{}psnr is {}".format(base_name[0],psnr))
        total_psnr.append(psnr)

        channels = []
        hr = _open_img_ssim(gts)
        sr = _open_img_ssim(outputs)
        for i in range(args.n_colors):
            channels.append(
                ssim(hr[:, :, i],
                     sr[:, :, i],
                     gaussian_weights=True,
                     use_sample_covariance=False))
        ssim_value = np.mean(channels)
        total_ssim.append(ssim_value)

    aver_loss = np.mean(total_loss)
    print("V_aver_loss:{}".format(aver_loss))
    vis.plot_train_val(loss_val=aver_loss)

    aver_psnr = np.mean(total_psnr)
    aver_ssim = np.mean(total_ssim)
    print('PSNR:{}'.format(aver_psnr))
    print('SSIM:{}'.format(aver_ssim))
Esempio n. 6
0
File: SR.py Progetto: sldz5/SR
def train(args, epoch):
    # Train the Model
    net.train()

    exp_lr_scheduler(optimizer, epoch)
    #exp_lr_scheduler(optimizer_s,epoch)
    ds_train = SRDataset(dataset_path, set='train')
    train_loader = torch.utils.data.DataLoader(dataset=ds_train,
                                               batch_size=args.batch_size,
                                               num_workers=args.nb_worker,
                                               shuffle=True)  # 将数据集的数据打乱
    temp_dataset = SRDataset(dataset_path, set='temp')
    temp_loader = torch.utils.data.DataLoader(dataset=temp_dataset,
                                              batch_size=1,
                                              num_workers=args.nb_worker,
                                              shuffle=False)
    print("Loaded {} train data.".format(len(train_loader)))
    total_loss = []
    #total_loss_s = []
    for i, (images, gts, base_name) in enumerate(train_loader):  #,labels
        s1 = 0
        s2 = 0
        base_name1, base_name2 = base_name
        with open('./temp.txt', 'r') as f:
            for line in f.readlines():  #txt中所有字符串读入data
                name = line.strip('\n')
                if base_name1 == name:
                    s1 = 1
                elif base_name2 == name:
                    s2 = 1
                else:
                    pass
        print("base_name:{}".format(base_name))
        if s1 == 1 and s2 == 0:  #and string2 not in data:
            print("------1->0---------")
            images[0] = torch.Tensor(np.zeros([3, 96, 96]))
            gts[0] = torch.Tensor(np.zeros([3, 96, 96]))
        elif s2 == 1 and s1 == 0:
            print("------2->0---------")
            images[1] = torch.Tensor(np.zeros([3, 96, 96]))
            gts[1] = torch.Tensor(np.zeros([3, 96, 96]))
        elif s1 == 1 and s2 == 1:
            print("------in---------")
            continue
        if args.cuda:
            images = Variable(images.cuda(0))
            gts = Variable(gts.cuda(0))
        ##images = transform(images)
        ##gts = transform(gts)
        """
        #-----------train siamese----------
        optimizer_s.zero_grad()
        #-----------false-----------
        #optimizer_D.zero_grad()
        outputs = net(images)
        pred_fake = siamese_net(outputs,gts)
        ture_lable = siamese_net(images,gts)
        if i % 100 == 0 :
            print("times:{}".format(i))
        fake_loss = criterionGAN(pred_fake,False)   
        real_loss = criterionGAN(ture_lable,True)
        loss_s = (fake_loss + real_loss)/2
        loss_s.backward()
        optimizer_s.step()
	    """
        #---------train net---------
        optimizer.zero_grad()
        outputs = net(images)

        #print("base_name:{}".format(base_name[:]))
        for j in range(0, len(outputs)):  ####如果有psnr超过某个值,加进训练集
            psnr_sd, _, _ = psnr_value(images[j], gts[j])
            psnr_pdt, gt, sr = psnr_value(outputs[j], gts[j])
            temp = psnr_pdt - psnr_sd
            global temp_st
            global count
            if temp > 0.5:
                if count == 3:
                    count = 0
                    temp_st = temp_st + 1
                count = count + 1
                #print("psnr_sd:{}".format(psnr_sd))
                #print("psnr_pdt:{}".format(psnr_pdt))
                print("temp:{}".format(temp))
                path_GT = "./temp_GT/" + base_name[j] + ".png"
                path_LR = "./temp_LR/" + base_name[j] + ".png"
                #output = output.resize([48,48],resample=Image.BICUBIC)
                gt.save(path_GT)
                sr.save(path_LR)
                k = 0
                #print("base_name[j]:{}".format(base_name[j]))
                with open('./temp.txt', 'r') as f:
                    for line in f.readlines():  #txt中所有字符串读入data
                        name = line.strip('\n')
                        #name = n.split("\\n")[0]
                        if base_name[j] == name:
                            k = 1
                if k == 1:
                    print("---------pass-------")
                    pass
                else:
                    f = open('./temp.txt', 'a')
                    f.write(
                        base_name[j] +
                        '\n')  ####不知道为什么,新写入的把旧的覆盖了,因为psnr更高了,因此不做修改,但和最初想法有违
                    f.close()
                    #print("~~~~string in data~~~~~")

        loss = criterion(outputs, gts)  ##loss_g
        #ture_lable = siamese_net(outputs,gts)
        #sia_loss = criterionGAN(ture_lable,True)
        ##turet_loss = criterionGAN(true_sia_t,True)
        #loss =(loss_g + sia_loss ) / 2
        #loss_t = loss_f+true_loss
        loss.backward()
        optimizer.step()

        total_loss.append(loss.item())  ##data[0]
        #total_loss_s.append(loss_s.item())

    for i, (images, gts, base_name) in enumerate(temp_loader):  #,labels
        print("Loaded {} temp data.".format(len(temp_loader)))
        if args.cuda:
            images = Variable(images.cuda(0))
            gts = Variable(gts.cuda(0))
        optimizer.zero_grad()
        outputs = net(images)
        loss = criterion(outputs, gts)
        loss.backward()
        optimizer.step()
        total_loss.append(loss.item())  ##data[0]
    #aver_loss_s = np.mean(total_loss_s)
    aver_loss = np.mean(total_loss)

    print("T_aver_loss:{}".format(aver_loss))
    vis.plot_train_val(loss_train=aver_loss)
    #print('Epoch [%d/%d],s_Loss: %.8f f_Loss: %.8f' % (epoch+1, args.epoch, aver_loss_s,aver_loss)) ##
    print('Epoch [%d/%d],Loss: %.8f' % (epoch + 1, args.epoch, aver_loss))

    if (epoch + 1) % 20 == 0:
        #model_file_s = "./models/b{}.{}-e{}.pkl".format(args.batch_size,epoch,args.epoch)  #保存siamese模型
        model_file = "./models/b{}.{}-e{}.pkl".format(args.batch_size, epoch,
                                                      args.epoch)  #保存模型
        torch.save(net.state_dict(), model_file)  ##每迭代一次保存一下模型参数
        #torch.save(siamese_net.state_dict(), model_file_s)                         ####每迭代一次保存一下Siamese模型参数
    return net  ##siamese_net,
Esempio n. 7
0
 def test_dataloader(self):
     data = SRDataset(
         os.path.join(self.root_dir, self.cfg.datasets.test_path))
     return DataLoader(data, batch_size=self.cfg.batch_size, shuffle=False)