Exemple #1
0
def main():
    if not os.path.isdir(opt.save_path):
        os.makedirs(opt.save_path)
    if not os.path.isdir(opt.save_path_r):
        os.makedirs(opt.save_path_r)
    # Build model
    print('Loading model ...\n')

    model = Generator_prelstm(opt.inter_iter, opt.use_GPU)
    print_network(model)
    if opt.use_GPU:
        model = model.cuda()

    # if model is trained by multiGPU

    # state_dict = torch.load(os.path.join(opt.logdir, 'net_latest.pth'))
    # from collections import OrderedDict
    # new_state_dict = OrderedDict()
    # for k, v in state_dict.items():
    #     name = k[7:]  # remove `module.`
    #     new_state_dict[name] = v
    # model.load_state_dict(new_state_dict)

    # if model is trained by single GPU
    model.load_state_dict(torch.load(os.path.join(opt.logdir, 'net_latest.pth')))
    model.eval()
    # load data info
    print('Loading data info ...\n')
    files_source = glob.glob(os.path.join(opt.data_path, 'rainy/*.png'))

    files_source.sort()
    # process data
    time_test = 0
    i = 1
    for f in files_source:
        img_name = os.path.basename(f)

        # image
        Img = cv2.imread(f)
        h, w, c = Img.shape

        b, g, r = cv2.split(Img)
        Img = cv2.merge([r, g, b])
        #Img = cv2.resize(Img, (int(500), int(500)), interpolation=cv2.INTER_CUBIC)
        '''
        if h > 1024:
            ratio = 1024.0/h
            Img = cv2.resize(Img,(int(w * ratio), int(h * ratio)), interpolation=cv2.INTER_CUBIC)

        if w > 1024:
            ratio = 1024.0/w
            Img = cv2.resize(Img,(int(w * ratio), int(h * ratio)), interpolation=cv2.INTER_CUBIC)  #4x4像素邻域的双三次插值
        '''
        Img = normalize(np.float32(Img))
        Img = np.expand_dims(Img.transpose(2, 0, 1), 0)
        #Img = np.expand_dims(Img, 1)
        ISource = torch.Tensor(Img)
        # noise
        #noise = torch.FloatTensor(ISource.size()).normal_(mean=0, std=opt.test_noiseL/255.)
        # noisy image
        INoisy = ISource #+ noise

        if opt.use_GPU:
            ISource, INoisy = Variable(ISource.cuda()), Variable(INoisy.cuda())
        else:
            ISource, INoisy = Variable(ISource), Variable(INoisy)

        with torch.no_grad(): # this can save much memory
            torch.cuda.synchronize()
            start_time = time.time()
            out, _, out_r, _ = model(INoisy)
            out = torch.clamp(out, 0., 1.)
            out_r = torch.clamp(out_r, 0., 1.)

            torch.cuda.synchronize()
            end_time = time.time()
            dur_time = end_time - start_time
            print(img_name)
            print(dur_time)
            time_test += dur_time
        ## if you are using older version of PyTorch, torch.no_grad() may not be supported
        # ISource, INoisy = Variable(ISource.cuda(),volatile=True), Variable(INoisy.cuda(),volatile=True)
        # Out = torch.clamp(INoisy-model(INoisy), 0., 1.)
        #psnr = batch_PSNR(Out, ISource, 1.)
        #psnr_test += psnr
        #print("%s PSNR %f" % (f, psnr))
        if opt.use_GPU:
            save_out = np.uint8(255 * out.data.cpu().numpy().squeeze())   #计算之后放回cpu储存
            save_out_r = np.uint8(255 * out_r.data.cpu().numpy().squeeze())

        else:
            save_out = np.uint8(255 * out.data.numpy().squeeze())
            save_out_r = np.uint8(255 * out_r.data.numpy().squeeze())

        save_out = save_out.transpose(1, 2, 0)
        b, g, r = cv2.split(save_out)
        save_out = cv2.merge([r, g, b])
        # cv2.imshow('a',save_out)

        save_out_r = save_out_r.transpose(1, 2, 0)
        b, g, r = cv2.split(save_out_r)
        save_out_r = cv2.merge([r, g, b])


        save_path = opt.save_path
        save_path_r = opt.save_path_r

        cv2.imwrite(os.path.join(save_path, img_name), save_out)
        cv2.imwrite(os.path.join(save_path_r, img_name), save_out_r)

        i = i + 1

    print(time_test/i)
Exemple #2
0
def main():
    if not os.path.isdir(opt.outf):
        os.makedirs(opt.outf)
    # Load dataset
    print('Loading dataset ...\n')
    dataset_train = Dataset3(train=True, data_path=opt.data_path)

    loader_train = DataLoader(dataset=dataset_train,
                              num_workers=4,
                              batch_size=opt.batchSize,
                              shuffle=True)
    print("# of training samples: %d\n" % int(len(dataset_train)))
    # Build model

    net = Generator_prelstminter11(recurrent_iter=opt.inter_iter,
                                   use_GPU=opt.use_GPU)
    #net = nn.DataParallel(net)
    print_network(net)

    #criterion = nn.MSELoss(size_average=False)
    criterion = pytorch_ssim.SSIM()

    # Move to GPU

    model = net.cuda()
    criterion.cuda()
    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    scheduler = MultiStepLR(optimizer, milestones=[30, 50, 80],
                            gamma=0.2)  # learning rates

    # training
    writer = SummaryWriter(opt.outf)
    step = 0
    noiseL_B = [0, 55]  # ingnored when opt.mode=='S'

    initial_epoch = findLastCheckpoint(
        save_dir=opt.outf)  # load the last model in matconvnet style
    if initial_epoch > 0:
        print('resuming by loading epoch %03d' % initial_epoch)
        model.load_state_dict(
            torch.load(
                os.path.join(opt.outf, 'net_epoch%d.pth' % initial_epoch)))

    for epoch in range(initial_epoch, opt.epochs):
        if epoch < opt.milestone:
            current_lr = opt.lr
        else:
            current_lr = opt.lr / 10.
        scheduler.step(epoch)
        # set learning rate
        for param_group in optimizer.param_groups:
            #param_group["lr"] = current_lr
            print('learning rate %f' % param_group["lr"])
        # train
        for i, (input, target, rain) in enumerate(loader_train, 0):
            # training step
            model.train()
            model.zero_grad()
            optimizer.zero_grad()

            #rain = input - target
            input_train, target_train, rain_train = Variable(
                input.cuda()), Variable(target.cuda()), Variable(rain.cuda())

            out_train, outs, out_r_train, outs_r = model(input_train)

            #            tv_loss = tv(out_train)
            pixel_loss = criterion(target_train, out_train)
            # pixel_loss_r = criterion(rain_train, out_r_train)
            #
            # loss1 = criterion(target_train, outs[0])
            # loss2 = criterion(target_train, outs[1])
            # loss3 = criterion(target_train, outs[2])
            # loss4 = criterion(target_train, outs[3])
            #
            # #
            # loss1_r = criterion(rain_train, outs_r[0])
            # loss2_r = criterion(rain_train, outs_r[1])
            # loss3_r = criterion(rain_train, outs_r[2])
            # loss4_r = criterion(rain_train, outs_r[3])
            '''
            y = utils_vgg.normalize_batch(out_train)
            x = utils_vgg.normalize_batch(target_train)
            x, y = target_train, out_train

            features_y = vgg(y)
            features_x = vgg(x)

            perceptral_loss = 0e-5 * criterion(features_y.relu2_2, features_x.relu2_2)
            '''

            # loss = -(pixel_loss + 0.5 * (loss1 + loss2 + loss3 + loss4)) #/(target_train.size()[0] * 2) + loss5 + loss6 + loss7 + loss8
            # loss_r = -(pixel_loss_r + 0.5 * (loss1_r + loss2_r + loss3_r + loss4_r)) #+ loss5_r + loss6_r + loss7_r + loss8_r
            # lossm = 0.55 * loss + 0.45 * loss_r
            loss = -(pixel_loss)
            loss.backward()
            # loss.backward(retain_graph=True)
            # loss_r.backward()
            optimizer.step()
            # results
            model.eval()
            out_train, _, out_r_train, _ = model(input_train)
            out_train = torch.clamp(out_train, 0., 1.)
            out_r_train = torch.clamp(out_r_train, 0., 1.)
            psnr_train = batch_PSNR(out_train, target_train, 1.)
            psnr_train_r = batch_PSNR(out_r_train, rain_train, 1.)
            print(
                "[epoch %d][%d/%d] loss: %.4f, PSNR_train: %.4f" %
                (epoch + 1, i + 1, len(loader_train), loss.item(), psnr_train))
            # if you are using older version of PyTorch, you may need to change loss.item() to loss.data[0]

            if step % 10 == 0:
                # Log the scalar values
                writer.add_scalar('loss', loss.item(), step)
                writer.add_scalar('PSNR on training data', psnr_train, step)
                #writer.add_scalar('loss_r', loss_r.item(), step)
                writer.add_scalar('PSNR_r on training data', psnr_train_r,
                                  step)
            step += 1
        ## the end of each epoch

        model.eval()
        '''
        # validate
        psnr_val = 0
        for k in range(len(dataset_val)):
            img_val = torch.unsqueeze(dataset_val[k], 0)
            noise = torch.FloatTensor(img_val.size()).normal_(mean=0, std=opt.val_noiseL/255.)
            imgn_val = img_val + noise
            img_val, imgn_val = Variable(img_val.cuda()), Variable(imgn_val.cuda())
            out_val = torch.clamp(imgn_val-model(imgn_val), 0., 1.)
            psnr_val += batch_PSNR(out_val, img_val, 1.)
        psnr_val /= len(dataset_val)
        print("\n[epoch %d] PSNR_val: %.4f" % (epoch+1, psnr_val))
        writer.add_scalar('PSNR on validation data', psnr_val, epoch)
        '''
        # log the images
        out_train, _, out_r_train, _ = model(input_train)
        out_train = torch.clamp(out_train, 0., 1.)
        out_r_train = torch.clamp(out_r_train, 0., 1.)
        Img = utils.make_grid(target_train.data,
                              nrow=8,
                              normalize=True,
                              scale_each=True)
        Imgn = utils.make_grid(input_train.data,
                               nrow=8,
                               normalize=True,
                               scale_each=True)
        Irecon = utils.make_grid(out_train.data,
                                 nrow=8,
                                 normalize=True,
                                 scale_each=True)
        rainstreak = utils.make_grid(out_r_train.data,
                                     nrow=8,
                                     normalize=True,
                                     scale_each=True)
        writer.add_image('clean image', Img, epoch)
        writer.add_image('noisy image', Imgn, epoch)
        writer.add_image('reconstructed image', Irecon, epoch)
        writer.add_image('estimated rain image', rainstreak, epoch)
        # save model
        torch.save(model.state_dict(), os.path.join(opt.outf,
                                                    'net_latest.pth'))

        if epoch % opt.save_freq == 0:
            torch.save(model.state_dict(),
                       os.path.join(opt.outf, 'net_epoch%d.pth' % (epoch + 1)))
Exemple #3
0
def main():
    if not os.path.isdir(opt.save_path):
        os.makedirs(opt.save_path)
    if not os.path.isdir(opt.save_path_r):
        os.makedirs(opt.save_path_r)
    # Build model
    print('Loading model ...\n')

    model = BRN(opt.inter_iter, opt.use_GPU)
    print_network(model)
    if opt.use_GPU:
        model = model.cuda()
    state_dict = torch.load(os.path.join(opt.logdir, 'net_latest.pth'))
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        name = k[7:]  # remove `module.`
        new_state_dict[name] = v
    model.load_state_dict(new_state_dict)

    #model.load_state_dict(torch.load(os.path.join(opt.logdir, 'net_latest.pth')))
    model.eval()
    # load data info
    print('Loading data info ...\n')

    # process data
    time_test = 0
    count = 0
    for img_name in os.listdir(opt.data_path):
        if is_image(img_name):
            img_path = os.path.join(opt.data_path, img_name)

            # image
            Img = cv2.imread(img_path)
            h, w, c = Img.shape

            b, g, r = cv2.split(Img)
            Img = cv2.merge([r, g, b])
            Img = pixelshuffle(Img, 2)

            Img = normalize(np.float32(Img))
            Img = np.expand_dims(Img.transpose(2, 0, 1), 0)

            ISource = torch.Tensor(Img)

            INoisy = ISource

            if opt.use_GPU:
                ISource, INoisy = Variable(ISource.cuda()), Variable(
                    INoisy.cuda())
            else:
                ISource, INoisy = Variable(ISource), Variable(INoisy)

            with torch.no_grad():  # this can save much memory
                torch.cuda.synchronize()
                start_time = time.time()
                out, _, _, _ = model(INoisy)

                out = torch.clamp(out, 0., 1.)

                torch.cuda.synchronize()
                end_time = time.time()
                dur_time = end_time - start_time
                print(img_name)
                print(dur_time)
                time_test += dur_time

            if opt.use_GPU:
                save_out = np.uint8(255 * out.data.cpu().numpy().squeeze())

            else:
                save_out = np.uint8(255 * out.data.numpy().squeeze())

            save_out = save_out.transpose(1, 2, 0)
            b, g, r = cv2.split(save_out)
            save_out = cv2.merge([r, g, b])

            save_path = opt.save_path

            save_out = reverse_pixelshuffle(save_out, 2)

            cv2.imwrite(os.path.join(save_path, img_name), save_out)

            count = count + 1

    print('Avg. time:', time_test / count)