def __init__(self, opt):  # root: list ; transform: torch transform
     self.baseroot_A = opt.baseroot_A
     self.baseroot_B = opt.baseroot_B
     self.imglist_A = utils.get_jpgs(opt.baseroot_A)
     self.imglist_B = utils.get_jpgs(opt.baseroot_B)
     self.transform = transforms.Compose([
         transforms.ToTensor(),
         transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
     ])
def L1L2_Acuuracy(opt):
    # Define the list saving the accuracy
    l1losslist = []
    l2losslist = []
    l1lossratio = 0
    l2lossratio = 0
    imglist = utils.get_jpgs(opt.basepath)

    # Compute the accuracy
    for i in range(len(imglist)):
        # Full imgpath
        imgpath = os.path.join(opt.basepath, imglist[i])
        refimgpath = os.path.join(opt.refpath, imglist[i].split('.')[0] + '.png')
        # Compute the traditional indexes
        l1loss = L1Loss(refimgpath, imgpath)
        l2loss = L2Loss(refimgpath, imgpath)
        l1losslist.append(l1loss)
        l2losslist.append(l2loss)
        l1lossratio = l1lossratio + l1loss
        l2lossratio = l2lossratio + l2loss
        print('The %dth image: l1loss: %f, l2loss: %f' % (i, l1loss, l2loss))
    l1lossratio = l1lossratio / len(imglist)
    l2lossratio = l2lossratio / len(imglist)
    print('The overall results: l1lossratio: %f, l2lossratio: %f' % (l1lossratio, l2lossratio))

    return l1losslist, l2losslist, l1lossratio, l2lossratio
Exemple #3
0
def CCI_Acuuracy(opt):
    # Define the list saving the accuracy
    CNIlist = []
    CCIlist = []
    CCI_determinelist = []
    CNIratio = 0
    CCIratio = 0
    CCI_determineratio = 0
    imglist = utils.get_jpgs(opt.basepath)

    # Compute the accuracy
    for i in range(len(imglist)):
        # Full imgpath
        imgpath = os.path.join(opt.basepath, imglist[i])
        # Compute the traditional indexes
        CNI = SingleImageCNI(imgpath)
        CCI, CCI_determine = SingleImageCCI(imgpath)
        CNIlist.append(CNI)
        CCIlist.append(CCI)
        CCI_determinelist.append(CCI_determine)
        CNIratio = CNIratio + CNI
        CCIratio = CCIratio + CCI
        CCI_determineratio = CCI_determineratio + CCI_determine
        print('The %dth image: CNI: %f, CCI: %f, CCI_determine: %d' %
              (i, CNI, CCI, CCI_determine))
    CNIratio = CNIratio / len(imglist)
    CCIratio = CCIratio / len(imglist)
    CCI_determineratio = CCI_determineratio / len(imglist)
    print(
        'The overall results: CNI: %f, CCI: %f, CCI_determine in [16, 20]: %f'
        % (CNIratio, CCIratio, CCI_determineratio))

    return CNIlist, CCIlist, CCI_determinelist, CNIratio, CCIratio, CCI_determineratio
Exemple #4
0
def Traditional_Acuuracy(opt):
    # Define the list saving the accuracy
    nrmselist = []
    psnrlist = []
    ssimlist = []
    nrmseratio = 0
    psnrratio = 0
    ssimratio = 0
    imglist = utils.get_jpgs(opt.basepath)

    # Compute the accuracy
    for i in range(len(imglist)):
        # Full imgpath
        imgpath = os.path.join(opt.basepath, imglist[i])
        refimgpath = os.path.join(opt.refpath, imglist[i])
        # Compute the traditional indexes
        nrmse = NRMSE(refimgpath, imgpath)
        psnr = PSNR(refimgpath, imgpath)
        ssim = SSIM(refimgpath, imgpath)
        nrmselist.append(nrmse)
        psnrlist.append(psnr)
        ssimlist.append(ssim)
        nrmseratio = nrmseratio + nrmse
        psnrratio = psnrratio + psnr
        ssimratio = ssimratio + ssim
        print('The %dth image: nrmse: %f, psnr: %f, ssim: %f' %
              (i, nrmse, psnr, ssim))
    nrmseratio = nrmseratio / len(imglist)
    psnrratio = psnrratio / len(imglist)
    ssimratio = ssimratio / len(imglist)
    print('The overall results: NRMSE: %f, PSNR: %f, SSIM: %f' %
          (nrmseratio, psnrratio, ssimratio))

    return nrmselist, psnrlist, ssimlist, nrmseratio, psnrratio, ssimratio
def PrecisionRecall_Acuuracy(opt):
    # Define the list saving the accuracy
    pscorelist = []
    rscorelist = []
    f1scorelist = []
    pscoreratio = 0
    rscoreratio = 0
    f1scoreratio = 0
    imglist = utils.get_jpgs(opt.basepath)

    # Compute the accuracy
    for i in range(len(imglist)):
        # Full imgpath
        imgpath = os.path.join(opt.basepath, imglist[i])
        refimgpath = os.path.join(opt.refpath, imglist[i])
        # Compute the traditional indexes
        pscore = PrecisionScore(refimgpath, imgpath)
        rscore = RecallScore(refimgpath, imgpath)
        f1score = F1Score(refimgpath, imgpath)
        pscorelist.append(nrmse)
        rscorelist.append(rscore)
        f1scorelist.append(f1score)
        pscoreratio = pscoreratio + pscore
        rscoreratio = rscoreratio + rscore
        f1scoreratio = f1scoreratio + f1score
        print('The %dth image: pscore: %f, rscore: %f, f1score: %f' % (i, pscore, rscore, f1score))
    pscoreratio = pscoreratio / len(imglist)
    rscoreratio = rscoreratio / len(imglist)
    f1scoreratio = f1scoreratio / len(imglist)
    print('The overall results: pscore: %f, rscore: %f, f1score: %f' % (pscoreratio, rscoreratio, f1scoreratio))

    return pscorelist, rscorelist, f1scorelist, pscoreratio, rscoreratio, f1scoreratio
 def __init__(self, opt):
     # Note that:
     # 1. opt: all the options
     # 2. imglist: all the image names under "baseroot"
     self.opt = opt
     imglist = utils.get_jpgs(opt.baseroot_sal)
     self.imglist = imglist
 def __init__(self, opt):
     # Note that:
     # 1. opt: all the options
     # 2. imglist: all the image names under "baseroot"
     self.opt = opt
     imglist = utils.get_jpgs(opt.baseroot_sal)
     if opt.smaller_coeff > 1:
         imglist = self.create_sub_trainset(imglist, opt.smaller_coeff)
     self.imglist = imglist
Exemple #8
0
def convertToVideo(dirpath, segment, type):
    imgs = get_jpgs(dirpath)
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    vw = cv2.VideoWriter("output_" + type + str(segment) + ".mov", fourcc, 60, (imgs[0].shape[1], imgs[0].shape[0]))
    print "VideoWriter is opened:", vw.isOpened()
    print("Writing video ...")
    i = 1
    for img in imgs:
        print "Writing image", i
        i+=1
        vw.write(img)
    
    vw.release()
Exemple #9
0
def convertToVideo(dirpath):
    imgs = get_jpgs(dirpath)
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    vw = cv2.VideoWriter(
        "output.mov", fourcc, 30,
        (imgs[0].shape[1],
         imgs[0].shape[0]))  #(imgs[0].shape[1]+400, imgs[0].shape[0]+400))
    print "VideoWriter is opened:", vw.isOpened()
    print("Writing video ...")
    i = 1
    for img in imgs:
        print "Writing image", i
        i += 1
        vw.write(img)

    vw.release()
Exemple #10
0
def stitch(number):
    ## Put extracted images into DATA_DIR/<folder> before running this
    imgs = get_jpgs(config.DATA_DIR + "beachVolleyball" + str(number) + "/")
    cv2.ocl.setUseOpenCL(False) # A workaround for ORB feature detector error
    stitcher = TranslationStitcher(imgs)
    panorama_list = stitcher.generate_panorama(get_netmask)
    
    # Create the folder
    d = os.path.dirname(config.DATA_DIR + "processedImages" + str(number) + "/")
    if not os.path.exists(d):
        os.makedirs(d)
    else:
        filelist = os.listdir(d)
        for file in filelist:
            os.remove(d + "/" + file)
    
    write_jpgs(config.DATA_DIR + "processedImages" + str(number) + "/", jpgs=panorama_list)
    convertToVideo(config.DATA_DIR + "processedImages" + str(number) + "/", number, "akaze")
    cv2.destroyAllWindows()
Exemple #11
0
 def __init__(self, opt):  # root: list ; transform: torch transform
     self.opt = opt
     # the root of both domains
     self.baseroot = os.path.join(opt.baseroot)
     # build image list
     self.imglist = utils.get_jpgs(self.baseroot)
Exemple #12
0
def color_tracking(frames, color):
    cv2.namedWindow('original', cv2.WINDOW_NORMAL)
    cv2.namedWindow('mask', cv2.WINDOW_NORMAL)
    for f in frames:
        mask = cv2.inRange(cv2.cvtColor(f, cv2.COLOR_BGR2HSV), color[0],
                           color[1])
        cnts = cv2.findContours(mask.copy(), cv2.RETR_TREE,
                                cv2.CHAIN_APPROX_SIMPLE)[1]
        players = []
        if len(cnts) >= 1:
            cnts.sort(key=cv2.contourArea, reverse=True)
            for cnt in cnts:
                if len(players) < 2:
                    rect = cv2.minAreaRect(cnt)
                    cv2.drawContours(f, [np.int0(cv2.boxPoints(rect))], -1,
                                     (255, 0, 0), 2)
                    players.append(cnt)
                else:
                    break
        cv2.imshow('original', f)
        cv2.imshow('mask', cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR))
        k = cv2.waitKey(100)
        if k == 27:
            break
    cv2.destroyAllWindows()


if __name__ == '__main__':
    frames = get_jpgs(config.INDVIDUAL_VIDEOS['7'])
    median_bg_sub(frames)
            help = 'load the pre-trained model with certain epoch, None for pre-training')
    parser.add_argument('--val_path',
                        type=str,
                        default='./ensemble',
                        help='saving path that is a folder')
    parser.add_argument('--task_name',
                        type=str,
                        default='track2',
                        help='task name for loading networks, saving, and log')
    # NTIRE2020_Validation_Clean    NTIRE2020_Validation_RealWorld
    opt = parser.parse_args()

    sample_folder = os.path.join(opt.val_path, opt.task_name)
    utils.check_path(sample_folder)

    imglist = utils.get_jpgs(opt.path1)
    for imgname in imglist:
        # Read
        data_path1 = os.path.join(opt.path1, imgname)
        data1 = hdf5.loadmat(data_path1)['cube']
        data_path2 = os.path.join(opt.path2, imgname)
        data2 = hdf5.loadmat(data_path2)['cube']
        data_path3 = os.path.join(opt.path3, imgname)
        data3 = hdf5.loadmat(data_path3)['cube']
        data_path4 = os.path.join(opt.path4, imgname)
        data4 = hdf5.loadmat(data_path4)['cube']
        data_path5 = os.path.join(opt.path5, imgname)
        data5 = hdf5.loadmat(data_path5)['cube']
        data_path6 = os.path.join(opt.path6, imgname)
        data6 = hdf5.loadmat(data_path6)['cube']
        data_path7 = os.path.join(opt.path7, imgname)
 def __init__(self, opt):
     self.opt = opt
     self.imglist = utils.get_jpgs(opt.baseroot)
     self.stringlist = utils.text_readlines(opt.stringlist)
     self.scalarlist = utils.text_readlines(opt.scalarlist)
Exemple #15
0
 def __len__(self):
     return len(utils.get_jpgs(self.in_root))
    parser.add_argument('--color_bias_level', type = bool, default = 0.05, help = 'color_bias_level')
    parser.add_argument('--noise_aug', type = bool, default = True, help = 'noise_aug')
    parser.add_argument('--iso', type = int, default = 6400, help = 'noise_level, according to ISO value')
    parser.add_argument('--random_crop', type = bool, default = True, help = 'random_crop')
    parser.add_argument('--crop_size', type = int, default = 320, help = 'single patch size')
    parser.add_argument('--extra_process_train_data', type = bool, default = True, help = 'recover short exposure data')
    parser.add_argument('--cover_long_exposure', type = bool, default = False, help = 'set long exposure to 0')
    parser.add_argument('--short_expo_per_pattern', type = int, default = 2, help = 'the number of exposure pixel of 2*2 square')
    opt = parser.parse_args()

    # ----------------------------------------
    #                   Test
    # ----------------------------------------
    # Initialize
    generator = utils.create_generator(opt).cuda()
    namelist = utils.get_jpgs(opt.in_path_val)
    test_dataset = dataset.Qbayer2RGB_dataset(opt, 'val', namelist)
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size = opt.test_batch_size, shuffle = True, num_workers = opt.num_workers, pin_memory = True)
    sample_folder = os.path.join(opt.val_path, opt.task_name)
    utils.check_path(sample_folder)

    # forward
    val_PSNR = 0
    val_SSIM = 0
    for i, (in_img, RGBout_img, path) in enumerate(test_loader):
        # To device
        # A is for input image, B is for target image
        in_img = in_img.cuda()
        RGBout_img = RGBout_img.cuda()
        #print(path)
    bayer[0::2, 1::2, 1] = img[0::2, 1::2, 1]
    bayer[1::2, 0::2, 1] = img[1::2, 0::2, 1]
    bayer[1::2, 1::2, 2] = img[1::2, 1::2, 0]
    return bayer

if __name__ == "__main__":
    # ----------------------------------------
    #        Initialize the parameters
    # ----------------------------------------
    parser = argparse.ArgumentParser()
    parser.add_argument('--src_path', type = str, \
        default = '', \
            help = 'input baseroot')
    parser.add_argument('--dst_path', type = str, \
        default = '', \
            help = 'target baseroot')
    opt = parser.parse_args()
    print(opt)

    src_imglist = utils.get_jpgs(opt.src_path)

    utils.check_path(opt.dst_path)

    for i in range(len(src_imglist)):
        img_name = src_imglist[i]
        src_name = os.path.join(opt.src_path, img_name)
        dst_name = os.path.join(opt.dst_path, img_name)
        img = cv2.imread(src_name)
        bayer = rgb2bayer(img)
        cv2.imwrite(dst_name, bayer)
Exemple #18
0
 def __init__(self, opt):
     self.opt = opt
     self.imglist = utils.get_jpgs(opt.baseroot_test_blur)
Exemple #19
0
 def __init__(self, opt):
     assert opt.mask_type in ALLMASKTYPES
     self.opt = opt
     self.imglist = utils.get_jpgs(opt.baseroot)
Exemple #20
0
 def __init__(self, opt):
     self.opt = opt
     self.namelist = utils.get_jpgs(opt.baseroot)
Exemple #21
0
def Trainer_GAN(opt):
    # ----------------------------------------
    #              Initialization
    # ----------------------------------------

    # cudnn benchmark
    cudnn.benchmark = opt.cudnn_benchmark

    # configurations
    save_folder = os.path.join(opt.save_path, opt.task_name)
    sample_folder = os.path.join(opt.sample_path, opt.task_name)
    if not os.path.exists(save_folder):
        os.makedirs(save_folder)
    if not os.path.exists(sample_folder):
        os.makedirs(sample_folder)

    # Initialize networks
    generator = utils.create_generator(opt)
    discriminator = utils.create_discriminator(opt)
    perceptualnet = utils.create_perceptualnet()

    # To device
    if opt.multi_gpu:
        generator = nn.DataParallel(generator)
        generator = generator.cuda()
        discriminator = nn.DataParallel(discriminator)
        discriminator = discriminator.cuda()
        perceptualnet = nn.DataParallel(perceptualnet)
        perceptualnet = perceptualnet.cuda()
    else:
        generator = generator.cuda()
        discriminator = discriminator.cuda()
        perceptualnet = perceptualnet.cuda()

    # ----------------------------------------
    #             Network dataset
    # ----------------------------------------

    # Handle multiple GPUs
    gpu_num = torch.cuda.device_count()
    print("There are %d GPUs used" % gpu_num)
    opt.train_batch_size *= gpu_num
    #opt.val_batch_size *= gpu_num
    opt.num_workers *= gpu_num

    # Define the dataset
    train_imglist = utils.get_jpgs(os.path.join(opt.in_path_train))
    val_imglist = utils.get_jpgs(os.path.join(opt.in_path_val))
    train_dataset = dataset.Qbayer2RGB_dataset(opt, 'train', train_imglist)
    val_dataset = dataset.Qbayer2RGB_dataset(opt, 'val', val_imglist)
    print('The overall number of training images:', len(train_imglist))
    print('The overall number of validation images:', len(val_imglist))

    # Define the dataloader
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=opt.train_batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=opt.val_batch_size,
                                             shuffle=False,
                                             num_workers=opt.num_workers,
                                             pin_memory=True)

    # ----------------------------------------
    #       Network training parameters
    # ----------------------------------------

    # Loss functions
    criterion_L1 = torch.nn.L1Loss().cuda()

    class ColorLoss(nn.Module):
        def __init__(self):
            super(ColorLoss, self).__init__()
            self.L1loss = nn.L1Loss()

        def RGB2YUV(self, RGB):
            YUV = RGB.clone()
            YUV[:,
                0, :, :] = 0.299 * RGB[:,
                                       0, :, :] + 0.587 * RGB[:,
                                                              1, :, :] + 0.114 * RGB[:,
                                                                                     2, :, :]
            YUV[:,
                1, :, :] = -0.14713 * RGB[:,
                                          0, :, :] - 0.28886 * RGB[:,
                                                                   1, :, :] + 0.436 * RGB[:,
                                                                                          2, :, :]
            YUV[:,
                2, :, :] = 0.615 * RGB[:,
                                       0, :, :] - 0.51499 * RGB[:,
                                                                1, :, :] - 0.10001 * RGB[:,
                                                                                         2, :, :]
            return YUV

        def forward(self, x, y):
            yuv_x = self.RGB2YUV(x)
            yuv_y = self.RGB2YUV(y)
            return self.L1loss(yuv_x, yuv_y)

    yuv_loss = ColorLoss()

    # Optimizers
    optimizer_G = torch.optim.Adam(generator.parameters(),
                                   lr=opt.lr_g,
                                   betas=(opt.b1, opt.b2),
                                   weight_decay=opt.weight_decay)
    optimizer_D = torch.optim.Adam(generator.parameters(),
                                   lr=opt.lr_d,
                                   betas=(opt.b1, opt.b2),
                                   weight_decay=opt.weight_decay)

    # Learning rate decrease
    def adjust_learning_rate(opt, epoch, iteration, optimizer, lr_gd):
        # Set the learning rate to the initial LR decayed by "lr_decrease_factor" every "lr_decrease_epoch" epochs
        if opt.lr_decrease_mode == 'epoch':
            lr = lr_gd * (opt.lr_decrease_factor
                          **(epoch // opt.lr_decrease_epoch))
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
        if opt.lr_decrease_mode == 'iter':
            lr = lr_gd * (opt.lr_decrease_factor
                          **(iteration // opt.lr_decrease_iter))
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

    # Save the model if pre_train == True
    def save_model(opt, epoch, iteration, len_dataset, generator):
        # Define the name of trained model
        if opt.save_mode == 'epoch':
            model_name = '%s_gan_noise%.3f_epoch%d_bs%d.pth' % (
                opt.net_mode, opt.noise_level, epoch, opt.train_batch_size)
        if opt.save_mode == 'iter':
            model_name = '%s_gan_noise%.3f_iter%d_bs%d.pth' % (
                opt.net_mode, opt.noise_level, iteration, opt.train_batch_size)
        save_model_path = os.path.join(opt.save_path, opt.task_name,
                                       model_name)
        # Save model
        if opt.multi_gpu == True:
            if opt.save_mode == 'epoch':
                if (epoch % opt.save_by_epoch
                        == 0) and (iteration % len_dataset == 0):
                    torch.save(generator.module.state_dict(), save_model_path)
                    print(
                        'The trained model is successfully saved at epoch %d' %
                        (epoch))
            if opt.save_mode == 'iter':
                if iteration % opt.save_by_iter == 0:
                    torch.save(generator.module.state_dict(), save_model_path)
                    print(
                        'The trained model is successfully saved at iteration %d'
                        % (iteration))
        else:
            if opt.save_mode == 'epoch':
                if (epoch % opt.save_by_epoch
                        == 0) and (iteration % len_dataset == 0):
                    torch.save(generator.state_dict(), save_model_path)
                    print(
                        'The trained model is successfully saved at epoch %d' %
                        (epoch))
            if opt.save_mode == 'iter':
                if iteration % opt.save_by_iter == 0:
                    torch.save(generator.state_dict(), save_model_path)
                    print(
                        'The trained model is successfully saved at iteration %d'
                        % (iteration))

    # ----------------------------------------
    #                 Training
    # ----------------------------------------

    # Count start time
    prev_time = time.time()

    # Tensorboard
    writer = SummaryWriter()

    # For loop training
    for epoch in range(opt.epochs):

        # Record learning rate
        for param_group in optimizer_G.param_groups:
            writer.add_scalar('data/lr', param_group['lr'], epoch)
            print('learning rate = ', param_group['lr'])

        if epoch == 0:
            iters_done = 0

        ### Training
        for i, (in_img, RGBout_img) in enumerate(train_loader):

            # To device
            # A is for input image, B is for target image
            in_img = in_img.cuda()
            RGBout_img = RGBout_img.cuda()

            ## Train Discriminator
            # Forward propagation
            out = generator(in_img)

            optimizer_D.zero_grad()
            # Fake samples
            fake_scalar_d = discriminator(in_img, out.detach())
            true_scalar_d = discriminator(in_img, RGBout_img)
            # Overall Loss and optimize
            loss_D = -torch.mean(true_scalar_d) + torch.mean(fake_scalar_d)
            loss_D.backward()
            #torch.nn.utils.clip_grad_norm(discriminator.parameters(), opt.grad_clip_norm)
            optimizer_D.step()

            ## Train Generator
            # Forward propagation
            out = generator(in_img)

            # GAN loss
            fake_scalar = discriminator(in_img, out)
            L_gan = -torch.mean(fake_scalar) * opt.lambda_gan

            # Perceptual loss features
            fake_B_fea = perceptualnet(utils.normalize_ImageNet_stats(out))
            true_B_fea = perceptualnet(
                utils.normalize_ImageNet_stats(RGBout_img))
            L_percep = opt.lambda_percep * criterion_L1(fake_B_fea, true_B_fea)

            # Pixel loss
            L_pixel = opt.lambda_pixel * criterion_L1(out, RGBout_img)

            # Color loss
            L_color = opt.lambda_color * yuv_loss(out, RGBout_img)

            # Sum up to total loss
            loss = L_pixel + L_percep + L_gan + L_color

            # Record losses
            writer.add_scalar('data/L_pixel', L_pixel.item(), iters_done)
            writer.add_scalar('data/L_percep', L_percep.item(), iters_done)
            writer.add_scalar('data/L_color', L_color.item(), iters_done)
            writer.add_scalar('data/L_gan', L_gan.item(), iters_done)
            writer.add_scalar('data/L_total', loss.item(), iters_done)
            writer.add_scalar('data/loss_D', loss_D.item(), iters_done)

            # Backpropagate gradients
            optimizer_G.zero_grad()
            loss.backward()
            #torch.nn.utils.clip_grad_norm(generator.parameters(), opt.grad_clip_norm)
            optimizer_G.step()

            # Determine approximate time left
            iters_done = epoch * len(train_loader) + i + 1
            iters_left = opt.epochs * len(train_loader) - iters_done
            time_left = datetime.timedelta(seconds=iters_left *
                                           (time.time() - prev_time))
            prev_time = time.time()

            # Print log
            print(
                "\r[Epoch %d/%d] [Batch %d/%d] [Total Loss: %.4f] [L_pixel: %.4f]"
                % ((epoch + 1), opt.epochs, i, len(train_loader), loss.item(),
                   L_pixel.item()))
            print(
                "\r[L_percep: %.4f] [L_color: %.4f] [L_gan: %.4f] [loss_D: %.4f] Time_left: %s"
                % (L_percep.item(), L_color.item(), L_gan.item(),
                   loss_D.item(), time_left))

            # Save model at certain epochs or iterations
            save_model(opt, (epoch + 1), iters_done, len(train_loader),
                       generator)

            # Learning rate decrease at certain epochs
            adjust_learning_rate(opt, (epoch + 1), iters_done, optimizer_G,
                                 opt.lr_g)
            adjust_learning_rate(opt, (epoch + 1), iters_done, optimizer_D,
                                 opt.lr_d)

        ### Sample data every epoch
        if (epoch + 1) % 1 == 0:
            img_list = [out, RGBout_img]
            name_list = ['pred', 'gt']
            utils.save_sample_png(sample_folder=sample_folder,
                                  sample_name='train_epoch%d' % (epoch + 1),
                                  img_list=img_list,
                                  name_list=name_list,
                                  pixel_max_cnt=255)

        ### Validation
        val_PSNR = 0
        num_of_val_image = 0

        for j, (in_img, RGBout_img) in enumerate(val_loader):

            # To device
            # A is for input image, B is for target image
            in_img = in_img.cuda()
            RGBout_img = RGBout_img.cuda()

            # Forward propagation
            with torch.no_grad():
                out = generator(in_img)

            # Accumulate num of image and val_PSNR
            num_of_val_image += in_img.shape[0]
            val_PSNR += utils.psnr(out, RGBout_img, 1) * in_img.shape[0]
        val_PSNR = val_PSNR / num_of_val_image

        ### Sample data every epoch
        if (epoch + 1) % 1 == 0:
            img_list = [out, RGBout_img]
            name_list = ['pred', 'gt']
            utils.save_sample_png(sample_folder=sample_folder,
                                  sample_name='val_epoch%d' % (epoch + 1),
                                  img_list=img_list,
                                  name_list=name_list,
                                  pixel_max_cnt=255)

        # Record average PSNR
        writer.add_scalar('data/val_PSNR', val_PSNR, epoch)
        print('PSNR at epoch %d: %.4f' % ((epoch + 1), val_PSNR))

    writer.close()
Exemple #22
0
import cv2
import utils

imglist = utils.get_files('./samples/dcgan')
namelist = utils.get_jpgs('./samples/dcgan')

for i in range(0, len(imglist)):
    img = cv2.imread(imglist[i])
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    cv2.imwrite(namelist[i], img)
Exemple #23
0
        self.transition = transition_model
        self.resampling_handler = resampling_handler
        self.predict = prediction_model

    def process(self, img):
        # Displacement
        self.transition(self.particles, self.img_boundary)
        # Prediction
        # self.predict(self.particles, img, np.mean(cv2.cvtColor(self.ref_img, cv2.COLOR_BGR2HSV)[..., 0]))
        self.predict(self.particles, img, hsv_histogram(self.ref_img))
        for p in self.particles:
            # draw_str(img, (p.x, p.y), "{0:.2f}".format(p.w))
            p.draw(img)

        # Resampling
        sum_w = sum([p.w for p in self.particles])
        ps = self.resampling_handler(self.particles, [p.w/sum_w for p in self.particles], self.img_boundary)
        self.particles = [PlayerParticle(*p) for p in ps]


if __name__ == '__main__':
    imgs = get_jpgs(config.INDVIDUAL_VIDEOS['1'])
    imgs = get_jpgs(config.INPUT_IMGS)
    pf = ParticleFilter(PlayerParticle.generate, img_boundary=(imgs[0].shape[1], imgs[0].shape[0]))
    for img in imgs:
        pf.process(img)
        cv2.imshow('particle filter', img)
        k = cv2.waitKey(1)
        if k == 27:
            break
Exemple #24
0
         imgs[0].shape[0]))  #(imgs[0].shape[1]+400, imgs[0].shape[0]+400))
    print "VideoWriter is opened:", vw.isOpened()
    print("Writing video ...")
    i = 1
    for img in imgs:
        print "Writing image", i
        i += 1
        vw.write(img)

    vw.release()


if __name__ == '__main__':
    number = 3  # Change this number to perform the stitch on different segments
    ## Put extracted images into DATA_DIR/<folder> before running this
    imgs = get_jpgs(config.DATA_DIR + "beachVolleyball" + str(number) + "/")
    cv2.ocl.setUseOpenCL(False)  # A workaround for ORB feature detector error
    stitcher = TranslationStitcher(imgs[::5])
    panorama_list = stitcher.generate_panorama(get_netmask)

    # Create the folder
    d = os.path.dirname(config.DATA_DIR + "processedImages" + str(number) +
                        "/")
    if not os.path.exists(d):
        os.makedirs(d)
    else:
        filelist = os.listdir(d)
        for file in filelist:
            os.remove(d + "/" + file)

    write_jpgs(config.DATA_DIR + "processedImages" + str(number) + "/",