Пример #1
0
def predict():
    visualize = request.args.get('visualize', 'none')
    resize = request.args.get('resize', 'original')
    score_threshold = float(
        request.args.get('score_threshold', str(config.score_threshold)))
    nms_iou_threshold = float(
        request.args.get('nms_iou_threshold', str(config.nms_iou_threshold)))
    img = utils.extract_as_jpeg(request)

    x = utils.img2tensor(img, resize=True)

    x_original = utils.img2tensor(img, resize=False)
    scale = (x_original[0].size()[1] / x[0].size()[1],
             x_original[0].size()[2] / x[0].size()[2])

    y = model(x)

    ret = dict()
    if len(y[0]['boxes']) > 0:
        ret = utils.post_process(y[0]['boxes'], y[0]['scores'],
                                 score_threshold, nms_iou_threshold)
        if resize == 'original':
            ret['boxes'] = utils.rescale_box(ret['boxes'], scale)
    else:
        ret['boxes'] = [[]]
        ret['scores'] = []

    if visualize == 'none':
        return jsonify(ret)
    img_to_show = x_original[0] if resize == 'original' else x
    if visualize == 'bbox':
        fig = utils.show_detection(img_to_show,
                                   ret['boxes'],
                                   pred_score=ret['scores'])
        output = io.BytesIO()
        FigureCanvas(fig).print_png(output)
        return Response(output.getvalue(), mimetype='image/png')
    elif visualize == 'blur':
        return blur({'boxes': ret['boxes'], 'image': img})
Пример #2
0
    def __getitem__(self, idx):
        image_path = self.images[idx]
        image = np.load(image_path)
        if self.resize and (image.shape[0] != self.resize):
            image = zoom(image, (self.resize[0], self.resize[1],
                                 self.resize[2] / image.shape[2]))
        image = img2tensor(image)

        side = os.path.splitext(os.path.basename(image_path))[0].split('_')[1]
        target = 1 if side == 'r' else 0
        target = torch.tensor(target).long()
        sample = {'image': image.unsqueeze(0), 'target': target}
        return sample
            print("Evaluate Warping Error on %s-%s: video %d / %d, %s" %
                  (opts.dataset, opts.phase, v + 1, len(video_list), filename))

            ### load flow
            filename = os.path.join(flow_dir, "%05d.flo" % (t - 1))
            flow = utils.read_flo(filename)

            ### load occlusion mask
            filename = os.path.join(occ_dir, "%05d.png" % (t - 1))
            occ_mask = utils.read_img(filename)
            noc_mask = 1 - occ_mask

            with torch.no_grad():

                ## convert to tensor
                img2 = utils.img2tensor(img2).to(device)
                flow = utils.img2tensor(flow).to(device)

                ## warp img2
                warp_img2 = flow_warping(img2, flow)

                ## convert to numpy array
                warp_img2 = utils.tensor2img(warp_img2)

            ## compute warping error
            diff = np.multiply(warp_img2 - img1, noc_mask)

            N = np.sum(noc_mask)
            if N == 0:
                N = diff.shape[0] * diff.shape[1] * diff.shape[2]
        dist = 0
        for t in range(1, len(frame_list)):

            ### load processed images
            filename = os.path.join(process_dir, "%05d.jpg" % (t))
            P = utils.read_img(filename)

            ### load output images
            filename = os.path.join(output_dir, "%05d.jpg" % (t))
            O = utils.read_img(filename)

            print("Evaluate LPIPS on %s-%s: video %d / %d, %s" %
                  (opts.dataset, opts.phase, v + 1, len(video_list), filename))

            ### convert to tensor
            P = utils.img2tensor(P)
            O = utils.img2tensor(O)

            ### scale to [-1, 1]
            P = P * 2.0 - 1
            O = O * 2.0 - 1

            dist += model.forward(P, O)[0]

        dist_all[v] = dist / (len(frame_list) - 1)

    print("\nAverage perceptual distance = %f\n" % (dist_all.mean()))

    dist_all = np.append(dist_all, dist_all.mean())
    print("Save %s" % metric_filename)
    np.savetxt(metric_filename, dist_all, fmt="%f")
            H_sc = int(
                math.ceil(float(H_orig) / opts.size_multiplier) *
                opts.size_multiplier)
            W_sc = int(
                math.ceil(float(W_orig) / opts.size_multiplier) *
                opts.size_multiplier)

            frame_i1 = cv2.resize(frame_i1, (W_sc, H_sc))
            frame_i2 = cv2.resize(frame_i2, (W_sc, H_sc))
            frame_o1 = cv2.resize(frame_o1, (W_sc, H_sc))
            frame_p2 = cv2.resize(frame_p2, (W_sc, H_sc))

            with torch.no_grad():

                ### convert to tensor
                frame_i1 = utils.img2tensor(frame_i1).to(device)
                frame_i2 = utils.img2tensor(frame_i2).to(device)
                frame_o1 = utils.img2tensor(frame_o1).to(device)
                frame_p2 = utils.img2tensor(frame_p2).to(device)

                ### model input
                inputs = torch.cat((frame_p2, frame_o1, frame_i2, frame_i1),
                                   dim=1)

                ### forward
                ts = time.time()

                output, lstm_state = model(inputs, lstm_state)
                frame_o2 = frame_p2 + output

                te = time.time()
Пример #6
0
def main(args):
    cfg = cfg_dict[args.cfg_name]
    writer = SummaryWriter(os.path.join("runs", args.cfg_name))
    train_loader = get_data_loader(cfg, cfg["train_dir"])

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = EDSR(cfg).to(device)
    criterion = torch.nn.L1Loss()
    optimizer = torch.optim.Adam(model.parameters(), lr=cfg["init_lr"],
                                 betas=(0.9, 0.999), eps=1e-8)

    global_batches = 0
    if args.train:
        for epoch in range(cfg["n_epoch"]):
            model.train()
            running_loss = 0.0
            for i, batch in enumerate(train_loader):
                lr, hr = batch[0].to(device), batch[1].to(device)
                optimizer.zero_grad()
                sr = model(lr)
                loss = model.loss(sr, hr)
                # loss = criterion(model(lr), hr)
                running_loss += loss.item()
                loss.backward()
                optimizer.step()
                global_batches += 1
                if global_batches % cfg["lr_decay_every"] == 0:
                    for param_group in optimizer.param_groups:
                        print(f"decay lr to {param_group['lr'] / 10}")
                        param_group["lr"] /= 10

            if epoch % args.log_every == 0:
                model.eval()
                with torch.no_grad():
                    batch_samples = {"lr": batch[0], "hr": batch[1], 
                                     "sr": sr.cpu()}
                    writer.add_scalar("training-loss", 
                                      running_loss / len(train_loader),
                                      global_step=global_batches)
                    writer.add_scalar("PSNR", compute_psnr(batch_samples), 
                                      global_step=global_batches)
                    samples = {k: v[:3] for k, v in batch_samples.items()}
                    fig = visualize_samples(samples, f"epoch-{epoch}")
                    writer.add_figure("sample-visualization", fig, 
                                      global_step=global_batches)

            if epoch % args.save_every == 0:
                state = {"net": model.state_dict(), 
                         "optim": optimizer.state_dict()}
                checkpoint_dir = args.checkpoint_dir
                if not os.path.exists(checkpoint_dir):
                    os.makedirs(checkpoint_dir)
                path = os.path.join(checkpoint_dir, args.cfg_name)
                torch.save(state, path)
    
    # eval
    if args.eval:
        assert args.model_path and args.lr_img_path
        print(f"evaluating {args.lr_img_path}")
        state = torch.load(args.model_path, map_location=device)
        model.load_state_dict(state["net"])
        optimizer.load_state_dict(state["optim"])

        with torch.no_grad():
            lr = img2tensor(args.lr_img_path)
            sr = model(lr.clone().to(device)).cpu()
            samples = {"lr": lr, "sr": sr}
            if args.hr_img_path:
                samples["hr"] = img2tensor(args.hr_img_path)
                print(f"PSNR: {compute_psnr(samples)}")
            directory = os.path.dirname(args.lr_img_path)
            name = f"eval-{args.cfg_name}-{args.lr_img_path.split('/')[-1]}"
            visualize_samples(samples, name, save=True, 
                              directory=directory, size=6)
                os.path.join(input_dir, "%05d.jpg" % (t - 3)))
            frame_i1 = utils.read_img(
                os.path.join(input_dir, "%05d.jpg" % (t - 2)))
            frame_i2 = utils.read_img(
                os.path.join(input_dir, "%05d.jpg" % (t - 1)))
            frame_i3 = utils.read_img(os.path.join(input_dir,
                                                   "%05d.jpg" % (t)))
            frame_i4 = utils.read_img(
                os.path.join(input_dir, "%05d.jpg" % (t + 1)))
            frame_i5 = utils.read_img(
                os.path.join(input_dir, "%05d.jpg" % (t + 2)))
            frame_i6 = utils.read_img(
                os.path.join(input_dir, "%05d.jpg" % (t + 3)))

            with torch.no_grad():
                frame_i0 = utils.img2tensor(frame_i0).cuda()
                frame_i1 = utils.img2tensor(frame_i1).cuda()
                frame_i2 = utils.img2tensor(frame_i2).cuda()
                frame_i3 = utils.img2tensor(frame_i3).cuda()
                frame_i4 = utils.img2tensor(frame_i4).cuda()
                frame_i5 = utils.img2tensor(frame_i5).cuda()
                frame_i6 = utils.img2tensor(frame_i6).cuda()

                frame_i0, f_h_pad, f_w_pad = align_to_64(frame_i0, 64)
                frame_i1, f_h_pad, f_w_pad = align_to_64(frame_i1, 64)
                frame_i2, f_h_pad, f_w_pad = align_to_64(frame_i2, 64)
                frame_i3, f_h_pad, f_w_pad = align_to_64(frame_i3, 64)
                frame_i4, f_h_pad, f_w_pad = align_to_64(frame_i4, 64)
                frame_i5, f_h_pad, f_w_pad = align_to_64(frame_i5, 64)
                frame_i6, f_h_pad, f_w_pad = align_to_64(frame_i6, 64)
                os.path.join(input_dir, "%05d.jpg" % (t + 3)))
            frame_i5 = utils.read_img(
                os.path.join(input_dir, "%05d.jpg" % (t + 4)))

            H_orig = frame_i1.shape[0]
            W_orig = frame_i1.shape[1]

            H_sc = int(
                math.ceil(float(H_orig) / opts.size_multiplier) *
                opts.size_multiplier)
            W_sc = int(
                math.ceil(float(W_orig) / opts.size_multiplier) *
                opts.size_multiplier)

            with torch.no_grad():
                frame_i1 = utils.img2tensor(frame_i1).to(device)
                frame_i2 = utils.img2tensor(frame_i2).to(device)
                frame_i3 = utils.img2tensor(frame_i3).to(device)
                frame_i4 = utils.img2tensor(frame_i4).to(device)
                frame_i5 = utils.img2tensor(frame_i5).to(device)

                [b, c, h, w] = frame_i1.shape

                frame_i1_new = torch.zeros(b, c, H_sc, W_sc).cuda()
                frame_i2_new = torch.zeros(b, c, H_sc, W_sc).cuda()
                frame_i3_new = torch.zeros(b, c, H_sc, W_sc).cuda()
                frame_i4_new = torch.zeros(b, c, H_sc, W_sc).cuda()
                frame_i5_new = torch.zeros(b, c, H_sc, W_sc).cuda()

                frame_i1_new[:, :, :h, :w] = frame_i1
                frame_i2_new[:, :, :h, :w] = frame_i2
Пример #9
0
def generate_mod_LR_bic():
    # set parameters
    up_scale = 4
    mod_scale = 4
    # set data dir
    sourcedir = "/data/Set5/source/"
    savedir = "/data/Set5/"

    # load PCA matrix of enough kernel
    print("load PCA matrix")
    pca_matrix = torch.load(
        "../../pca_matrix.pth", map_location=lambda storage, loc: storage
    )
    print("PCA matrix shape: {}".format(pca_matrix.shape))

    degradation_setting = {
        "random_kernel": False,
        "code_length": 10,
        "ksize": 21,
        "pca_matrix": pca_matrix,
        "scale": up_scale,
        "cuda": True,
        "rate_iso", 1.0
    }

    # set random seed
    util.set_random_seed(0)

    saveHRpath = os.path.join(savedir, "HR", "x" + str(mod_scale))
    saveLRpath = os.path.join(savedir, "LR", "x" + str(up_scale))
    saveBicpath = os.path.join(savedir, "Bic", "x" + str(up_scale))
    saveLRblurpath = os.path.join(savedir, "LRblur", "x" + str(up_scale))

    if not os.path.isdir(sourcedir):
        print("Error: No source data found")
        exit(0)
    if not os.path.isdir(savedir):
        os.mkdir(savedir)

    if not os.path.isdir(os.path.join(savedir, "HR")):
        os.mkdir(os.path.join(savedir, "HR"))
    if not os.path.isdir(os.path.join(savedir, "LR")):
        os.mkdir(os.path.join(savedir, "LR"))
    if not os.path.isdir(os.path.join(savedir, "Bic")):
        os.mkdir(os.path.join(savedir, "Bic"))
    if not os.path.isdir(os.path.join(savedir, "LRblur")):
        os.mkdir(os.path.join(savedir, "LRblur"))

    if not os.path.isdir(saveHRpath):
        os.mkdir(saveHRpath)
    else:
        print("It will cover " + str(saveHRpath))

    if not os.path.isdir(saveLRpath):
        os.mkdir(saveLRpath)
    else:
        print("It will cover " + str(saveLRpath))

    if not os.path.isdir(saveBicpath):
        os.mkdir(saveBicpath)
    else:
        print("It will cover " + str(saveBicpath))

    if not os.path.isdir(saveLRblurpath):
        os.mkdir(saveLRblurpath)
    else:
        print("It will cover " + str(saveLRblurpath))

    filepaths = sorted([f for f in os.listdir(sourcedir) if f.endswith(".png")])
    print(filepaths)
    num_files = len(filepaths)

    # kernel_map_tensor = torch.zeros((num_files, 1, 10)) # each kernel map: 1*10

    # prepare data with augementation
    
    for i in range(num_files):
        filename = filepaths[i]
        print("No.{} -- Processing {}".format(i, filename))
        # read image
        image = cv2.imread(os.path.join(sourcedir, filename))

        width = int(np.floor(image.shape[1] / mod_scale))
        height = int(np.floor(image.shape[0] / mod_scale))
        # modcrop
        if len(image.shape) == 3:
            image_HR = image[0 : mod_scale * height, 0 : mod_scale * width, :]
        else:
            image_HR = image[0 : mod_scale * height, 0 : mod_scale * width]
        # LR_blur, by random gaussian kernel
        img_HR = util.img2tensor(image_HR)
        C, H, W = img_HR.size()
        
        for sig in np.linspace(1.8, 3.2, 8):

            prepro = util.SRMDPreprocessing(sig=sig, **degradation_setting)

            LR_img, ker_map = prepro(img_HR.view(1, C, H, W))
            image_LR_blur = util.tensor2img(LR_img)
            cv2.imwrite(os.path.join(saveLRblurpath, 'sig{}_{}'.format(sig,filename)), image_LR_blur)
            cv2.imwrite(os.path.join(saveHRpath, 'sig{}_{}'.format(sig,filename)), image_HR)
        # LR
        image_LR = imresize(image_HR, 1 / up_scale, True)
        # bic
        image_Bic = imresize(image_LR, up_scale, True)

        # cv2.imwrite(os.path.join(saveHRpath, filename), image_HR)
        cv2.imwrite(os.path.join(saveLRpath, filename), image_LR)
        cv2.imwrite(os.path.join(saveBicpath, filename), image_Bic)

        # kernel_map_tensor[i] = ker_map
    # save dataset corresponding kernel maps
    # torch.save(kernel_map_tensor, './Set5_sig2.6_kermap.pth')
    print("Image Blurring & Down smaple Done: X" + str(up_scale))
        H_sc = int(
            math.ceil(float(H_orig) / opts.size_multiplier) *
            opts.size_multiplier)
        W_sc = int(
            math.ceil(float(W_orig) / opts.size_multiplier) *
            opts.size_multiplier)

        frame_origin_1 = cv2.resize(frame_origin_1, (W_sc, H_sc))
        frame_origin_2 = cv2.resize(frame_origin_2, (W_sc, H_sc))
        frame_output_1 = cv2.resize(frame_output_1, (W_sc, H_sc))
        frame_input_2 = cv2.resize(frame_input_2, (W_sc, H_sc))

        with torch.no_grad():
            ### convert to tensor
            frame_origin_1 = utils.img2tensor(frame_origin_1).to(device)
            frame_origin_2 = utils.img2tensor(frame_origin_2).to(device)
            frame_output_1 = utils.img2tensor(frame_output_1).to(device)
            frame_input_2 = utils.img2tensor(frame_input_2).to(device)

            ### model input
            inputs = torch.cat((frame_input_2, frame_output_1, frame_origin_2,
                                frame_origin_1),
                               dim=1)

            ### forward
            ts = time.time()

            output, lstm_state = model(inputs, lstm_state)
            frame_o2 = frame_input_2 + output
Пример #11
0
        adv.grad.data.zero_()
    return adv.detach()


DATA_DIR = 'aaac/lfwaaac'
ADV_DIR = 'aaac/lfwadv'
THRESHOLD = 0.2

if not os.path.exists(ADV_DIR):
    os.mkdir(ADV_DIR)

for filename in tqdm(os.listdir(DATA_DIR)):
    if filename[-4:] == '.jpg':
        gallery_img = Image.open(os.path.join(DATA_DIR, filename))
        probe_img = Image.open(os.path.join(DATA_DIR, filename))
        gallery_tensor = img2tensor(gallery_img, device)
        probe_tensor = img2tensor(probe_img, device)
        adv = attack(model,
                     probe_tensor,
                     gallery_tensor,
                     True,
                     eps=8.0 / 255,
                     attack_type='pgd',
                     iters=10)
        adv_arr = adv.cpu().numpy().squeeze().transpose(1, 2, 0) * 255
        adv_arr = adv_arr.astype(np.uint8)
        adv_img = Image.fromarray(adv_arr)
        adv_img.save(os.path.join(ADV_DIR, filename))

    # for j in tqdm(range(num_pairs)):
    #     line = lines[line_num].strip()