def save_picture_unlabel(pic, new_id, mask=True, generate=True):
    ###linux
    # save_path = r'E:\Radiomics\huaxi_jiang_yinjie\segmentation\out'##PVP
    save_path = r'E:\Radiomics\huaxi_jiang_yinjie\segmentation\AP_out'##AP

    if mask and not generate:###金标准
        # print('groundtruth')
        # print(pic.shape)
        # print(pic.max())
        pic = pic * 255
        im = Image.fromarray(np.uint8(pic), mode='L')
        im.save(os.path.join(save_path, str(new_id)+'ground_truth.png'))
    elif mask and generate:##分割网络结果
        # print('predict')
        # print(pic.shape)
        # print(pic.max())
        pic = pic * 255
        im = Image.fromarray(pic, mode='L')
        im.save(os.path.join(save_path, str(new_id)+'generater.png'))
    else:##原始图片
        # print(pic.shape)
        # pic = pic.squeeze((0, 1))
        # print(pic.shape)
        # print(pic.min())
        # pic = pic * 255
        # im = Image.fromarray(pic, mode='L')
        im = ToPILImage()(pic)
        im.save(os.path.join(save_path, str(new_id)+'img.png'))
Beispiel #2
0
def test(model, data_loader):
    print("Testing...")
    avg_psnr = 0
    with torch.no_grad():
        i = 0
        for batch in data_loader:
            #input, target = batch[0].to (device), batch[1].to (device)
            input, target, input2, target2, input3, target3 = batch[
                0].requires_grad_().to(device), batch[1].requires_grad_().to(
                    device), batch[2].requires_grad_().to(
                        device), batch[3].requires_grad_().to(
                            device), batch[4].requires_grad_().to(
                                device), batch[5].requires_grad_().to(device)

            prediction, _, _ = model(input, input2, input3)

            if args.mode == "test":
                print("\n\n Resolve deblurred image...")
                prediction = prediction.clamp(0, 1)
                hr_image = ToPILImage()(prediction[0].data.cpu())
                hr_image.save(
                    "data/sharp_res/test/our_sharp/test_{}.png".format(i))

            #psnr = compute_psnr(prediction, target)
            #avg_psnr += psnr
            i += 1
Beispiel #3
0
def save_result(info, pred, label, mask):
    # import ipdb; ipdb.set_trace()

    classes = pred.argmax(dim=1, keepdim=True)
    label = label.argmax(dim=1, keepdim=True)

    classes[~mask.unsqueeze(1).expand_as(classes).bool()] = 7
    label[~mask.unsqueeze(1).expand_as(label).bool()] = 7

    # classes = colorEncode(classes)
    # label = colorEncode(label)

    classes = classes.cpu()
    label = label.cpu()

    for i in range(classes.shape[0]):
        result_png = colorEncode(classes[i])
        label_png = colorEncode(label[i])

        result_png = ToPILImage()(result_png.float() / 255.)
        label_png = ToPILImage()(label_png.float() / 255.)
        img_name = info[i]

        # print(os.path.join(cfg.TEST.result, img_name.replace('.jpg', '.png')))

        result_png.save(
            os.path.join(cfg.VAL.visualized_pred,
                         img_name.replace('.jpg', '.png')))
        label_png.save(
            os.path.join(cfg.VAL.visualized_label,
                         img_name.replace('.jpg', '.png')))
Beispiel #4
0
    def denoiseList(self):
        img = Image.open(
            'Database/waterloo/pristine_images/00001.bmp').convert('RGB')
        img = CenterCrop((self.size, self.size))(img)
        img.save('outPic/src.bmp', 'bmp', quality=100)
        img = Image.open('Database/waterloo/distorted_images/%s/00001_%d.bmp' %
                         (self.typeDir, 2)).convert('RGB')
        img = CenterCrop((self.size, self.size))(img)
        img.save('outPic/sorted.bmp', 'bmp', quality=100)
        for i in xrange(1, 23):
            input = Variable(ToTensor()(img)).view(1, -1, img.size[1],
                                                   img.size[0])
            input = input.cuda()

            if (self.load == 2):
                model = torch.load('%s/%sF_ALL.pth' % (self.dir, self.typeDir))
            elif (self.load == 1):
                model = torch.load('%s/%sF.pth' % (self.dir, self.typeDir))
            else:
                model = torch.load('%s/%s_%d.pth' %
                                   (self.dir, self.typeDir, i))
            model = model.cuda()

            out_img = model(input)
            out_img = out_img.cpu()
            out_img = out_img.data[0]

            out_img.clamp_(0.0, 1.0)
            # out_img = self.clip(out_img)
            out_img = ToPILImage()(out_img)
            # out_img.save('outPic/%d.bmp'%i, 'bmp', quality=100)
            out_img.save('outPic/%d.bmp' % i, 'bmp', quality=100)
Beispiel #5
0
def main(cfg):
    video_name = cfg.video_name
    upscale_factor = cfg.upscale_factor
    use_gpu = cfg.gpu_mode

    test_set = TestsetLoader('data/'+ video_name, upscale_factor)
    test_loader = DataLoader(test_set, num_workers=1, batch_size=1, shuffle=False)
    net = SOFVSR(upscale_factor=upscale_factor)
    ckpt = torch.load('./log/SOFVSR_x' + str(upscale_factor) + '.pth')
    net.load_state_dict(ckpt)
    if use_gpu:
        net.cuda()


    for idx_iter, (LR_y_cube, SR_cb, SR_cr) in enumerate(test_loader):
        LR_y_cube = Variable(LR_y_cube)
        if use_gpu:
            LR_y_cube = LR_y_cube.cuda()
        SR_y = net(LR_y_cube)

        SR_y = np.array(SR_y.data)
        SR_y = SR_y[np.newaxis, :, :]

        SR_ycbcr = np.concatenate((SR_y, SR_cb, SR_cr), axis=0).transpose(1,2,0)
        SR_rgb = ycbcr2rgb(SR_ycbcr) * 255.0
        SR_rgb = np.clip(SR_rgb, 0, 255)
        SR_rgb = ToPILImage()(SR_rgb.astype(np.uint8))

        if not os.path.exists('results/' + video_name):
            os.mkdir('results/' + video_name)
        SR_rgb.save('results/'+video_name+'/sr_'+ str(idx_iter+2).rjust(2,'0') + '.png')
Beispiel #6
0
def tensor_to_image_to_byte_array(tensor):
    tensor = tensor.detach().cpu()
    image = ToPILImage(mode=None)(tensor)
    byte_io = io.BytesIO()
    image.save(byte_io, format="PNG")
    img_bytes = byte_io.getvalue()
    return np.void(img_bytes)
Beispiel #7
0
def defend_jpeg(input_tensor, image_mode, quality):
    pil_image = ToPILImage(mode=image_mode)(input_tensor)
    fd = BytesIO()
    pil_image.save(fd, format='jpeg',
                   quality=quality)  # quality level specified in paper
    jpeg_image = ToTensor()(PIL.Image.open(fd))
    return jpeg_image
Beispiel #8
0
def Hess_all_BigGAN_optim(param):
    lr = 10 ** param[0, 0]
    wd = 10 ** param[0, 1]
    beta1 = 1 - 10 ** param[0, 2]  # param[2] = log10(1 - beta1)
    beta2 = 1 - 10 ** param[0, 3]  # param[3] = log10(1 - beta2)
    noise_init = torch.from_numpy(truncated_noise_sample(1, 128)).cuda()
    class_init = 0.06 * torch.randn(1, 128).cuda()
    latent_coef = (torch.cat((noise_init, class_init), dim=1) @ evc_all).detach().clone().requires_grad_(True)
    optim = Adam([latent_coef], lr=lr, weight_decay=wd, betas=(beta1, beta2))
    # torch.optim.lr_scheduler
    scores_all = []
    for step in range(300):
        optim.zero_grad()
        latent_code = latent_coef @ evc_all.T
        noise_vec = latent_code[:, :128]
        class_vec = latent_code[:, 128:]
        fitimg = BGAN.generator(latent_code, 0.7)
        fitimg = torch.clamp((1.0 + fitimg) / 2.0, 0, 1)
        dsim = alpha * ImDist(fitimg, target_tsr) + L1loss(fitimg, target_tsr)  #
        dsim.backward()
        optim.step()
        scores_all.append(dsim.item())
        if (step + 1) % 10 == 0:
            print("step%d loss %.2f norm: cls: %.2f nois: %.1f" % (step, dsim.item(), class_vec.norm(), noise_vec.norm()))

    imcmp = ToPILImage()(make_grid(torch.cat((fitimg, target_tsr)).cpu()))
    # imcmp.show()
    imcmp.save(join(savedir, "Hall%06d_%.3f.jpg" % (np.random.randint(1000000), dsim.item())))

    plt.figure()
    plt.plot(scores_all)
    plt.title("lr %.E wd %.E beta1 %.3f beta2 %.3f"%(lr,wd,beta1,beta2))
    plt.savefig(join(savedir, "traj_Hall%06d_%.3f.jpg" % (np.random.randint(1000000), dsim.item())))
    return dsim.item() if not torch.isnan(dsim) else 1E6
Beispiel #9
0
def BigGAN_evol_exp(scorer, optimizer, G, steps=100, RND=None, label="", init_code=None, batchsize=20):
    init_code = np.concatenate((fixnoise, np.zeros((1, 128))), axis=1)
    # optim_cust = CholeskyCMAES(space_dimen=256, init_code=init_code, init_sigma=0.2)
    new_codes = init_code + np.random.randn(25, 256) * 0.06
    scores_all = []
    generations = []
    for i in tqdm.trange(steps, desc="CMA steps"):
        imgs = G.visualize_batch_np(new_codes, B=batchsize)
        latent_code = torch.from_numpy(np.array(new_codes)).float()
        scores = scorer.score_tsr(imgs)
        print("step %d dsim %.3f (%.3f) (norm %.2f noise norm %.2f)" % (
            i, scores.mean(), scores.std(), latent_code[:, 128:].norm(dim=1).mean(),
            latent_code[:, :128].norm(dim=1).mean()))
        new_codes = optimizer.step_simple(scores, new_codes, )
        scores_all.extend(list(scores))
        generations.extend([i] * len(scores))

    scores_all = np.array(scores_all)
    generations = np.array(generations)
    mtg = ToPILImage()(make_grid(imgs, nrow=7))
    mtg.save(join(savedir, "lastgen%s_%05d_score%.1f.jpg" % (methodlab, RND, scores.mean())))
    np.savez(join(savedir, "scores%s_%05d.npz" % (methodlab, RND)), generations=generations, scores_all=scores_all,
             codes_fin=latent_code.cpu().numpy())
    visualize_trajectory(scores_all, generations, title_str=methodlab).savefig(
        join(savedir, "traj%s_%05d_score%.1f.jpg" % (methodlab, RND, scores.mean())))
Beispiel #10
0
def save_image(tensor, num, dir):
    image = tensor.cpu().clone()  # we clone the tensor to not do changes on it
    image = image.squeeze(0)  # remove the fake batch dimension
    image = ToPILImage()(image)
    if not osp.exists(dir):
        os.makedirs(dir)
    image.save(dir + '/{}.png'.format(num))
Beispiel #11
0
def test(args, model):
    model.eval()

    input_image = Image.open(args.image).resize((256, 256))

    input_transform = Compose([
        ToTensor(),
        Normalize([.485, .456, .406], [.229, .224, .225]),
    ])

    image = torch.unsqueeze(input_transform(input_image), 0)
    if args.cuda:
        image = image.cuda()

    label = model(image)
    color_transform = Colorize()
    label = color_transform(label[0].data.max(0)[1])
    label = ToPILImage()(label)

    # label.show()
    # input_image.show()

    if args.resized_image:
        input_image.save(args.resized_image)
    label.save(args.label)
Beispiel #12
0
def Hess_sep_BigGAN_optim(param):
    lr1 = 10 ** param[0, 0]
    wd1 = 10 ** param[0, 1]
    lr2 = 10 ** param[0, 2]
    wd2 = 10 ** param[0, 3]
    noise_init = torch.from_numpy(truncated_noise_sample(1, 128)).cuda()
    class_init = 0.06 * torch.randn(1, 128).cuda()
    noise_coef = (noise_init @ evc_nois).detach().clone().requires_grad_(True)
    class_coef = (class_init @ evc_clas).detach().clone().requires_grad_(True)
    optim1 = Adam([noise_coef], lr=lr1, weight_decay=wd1, betas=(0.9, 0.999))
    optim2 = Adam([class_coef], lr=lr2, weight_decay=wd2, betas=(0.9, 0.999))
    # torch.optim.lr_scheduler
    for step in range(300):
        optim1.zero_grad()
        optim2.zero_grad()
        class_vec = class_coef @ evc_clas.T
        noise_vec = noise_coef @ evc_nois.T
        fitimg = BGAN.generator(torch.cat((noise_vec, class_vec), dim=1), 0.7)
        fitimg = torch.clamp((1.0 + fitimg) / 2.0, 0, 1)
        dsim = alpha * ImDist(fitimg, target_tsr) + L1loss(fitimg, target_tsr)  #
        dsim.backward()
        optim1.step()
        optim2.step()
        if (step + 1) % 10 == 0:
            print("step%d loss %.2f norm: cls: %.2f nois: %.1f" % (step, dsim.item(), class_vec.norm(), noise_vec.norm()))

    imcmp = ToPILImage()(make_grid(torch.cat((fitimg, target_tsr)).cpu()))
    imcmp.show()
    imcmp.save(join(savedir, "Hsep%06d_%.3f.jpg" % (np.random.randint(1000000), dsim.item())))
    return dsim.item() if not torch.isnan(dsim) else 1E6
def Apply_SRGAN(segmented_obj_names_ls, HOME_PATH_str):
    for segmented_image_name in segmented_obj_names_ls:
        print("This pipeline will apply SRGAN on single image.\n")
        parser = argparse.ArgumentParser(description='Test Single Image')
        parser.add_argument('--upscale_factor',
                            default=4,
                            type=int,
                            help='super resolution upscale factor')
        parser.add_argument('--test_mode',
                            default='CPU',
                            type=str,
                            choices=['GPU', 'CPU'],
                            help='using GPU or CPU')
        parser.add_argument('--image_name',
                            default=segmented_image_name,
                            type=str,
                            help='test low resolution image name')
        parser.add_argument('--model_name',
                            default='netG_epoch_4_100.pth',
                            type=str,
                            help='generator model epoch name')
        parser.add_argument('--dataset',
                            default='VOC2012',
                            type=str,
                            help='dataset name')
        opt = parser.parse_args()
        print('**Default arguments are: {0}**\n\n'.format(opt))

        UPSCALE_FACTOR = opt.upscale_factor
        TEST_MODE = True if opt.test_mode == 'GPU' else False
        IMAGE_NAME = opt.image_name
        MODEL_NAME = opt.model_name
        dataset = opt.dataset
        SRGAN_PATH = HOME_PATH_str + 'Object-orientedDeblurringPipeline/srgan_master/'
        Pipeline_PATH = HOME_PATH_str + 'Object-orientedDeblurringPipeline/'

        model = Generator(UPSCALE_FACTOR).eval()
        if TEST_MODE:
            model.cuda()
            model.load_state_dict(
                torch.load(SRGAN_PATH + 'epochs/' + MODEL_NAME))
        else:
            model.load_state_dict(
                torch.load(SRGAN_PATH + 'epochs/' + MODEL_NAME,
                           map_location=lambda storage, loc: storage))

        #Apply SRGAN
        image = Image.open(Pipeline_PATH + 'Images/' + IMAGE_NAME)
        image = Variable(ToTensor()(image), volatile=True).unsqueeze(0)
        if TEST_MODE:
            image = image.cuda()

        start = time.clock()
        out = model(image)
        elapsed = (time.clock() - start)
        print('cost' + str(elapsed) + 's')
        out_img = ToPILImage()(out[0].data.cpu())
        out_img.save(Pipeline_PATH + 'Images/' + '_out_srf_' +
                     str(UPSCALE_FACTOR) + '_' + IMAGE_NAME)
        print(IMAGE_NAME + 'DONE. \n **')
Beispiel #14
0
    def denoiseList(self):
        src = Image.open(
            'Database/waterloo/pristine_images/00001.bmp').convert('RGB')
        src = CenterCrop((self.size, self.size))(src)
        src.save('level_distortion/%s/%s/src.bmp' % (self.dir, self.typeDir),
                 'bmp',
                 quality=100)
        src = Variable(ToTensor()(src)).view(1, -1, src.size[1], src.size[0])
        stock_data = [[], [], [], []]
        for i in xrange(1, 11):
            print('    checking %dth...' % i)
            dis = Image.open('level_distortion/%s/%s/00001_%d.bmp' %
                             (self.dir, self.typeDir, i)).convert('RGB')
            dis = CenterCrop((self.size, self.size))(dis)
            dis.save('level_distortion/%s/%s/dis_%d.bmp' %
                     (self.dir, self.typeDir, i),
                     'bmp',
                     quality=100)
            dis = Variable(ToTensor()(dis)).view(1, -1, dis.size[1],
                                                 dis.size[0])
            dis = dis.cpu()
            # print (input)
            res = self.model(dis)

            tmp = {}
            loss = self.criterion(src, dis)
            tmp[self.src_dis] = loss.data[0]
            stock_data[0].append(loss.data[0])
            # self.experiment.add_scalar_value(self.src_dis, loss.data[0], i)

            loss = self.criterion(res, src)
            tmp[self.src_res] = loss.data[0]
            stock_data[1].append(loss.data[0])
            # self.experiment.add_scalar_value(self.src_res, loss.data[0], i)

            loss = self.criterion(res, dis)
            tmp[self.dis_res] = loss.data[0]
            stock_data[2].append(loss.data[0])
            # self.experiment.add_scalar_value(self.dis_res, loss.data[0], i)

            stock_data[3].append(stock_data[1][i - 1] + stock_data[2][i - 1])

            self.experiment.add_scalar_dict(tmp)

            res = res.data[0]
            res.clamp_(0.0, 1.0)
            res = ToPILImage()(res)
            res.save('level_distortion/%s/%s/res_%d.bmp' %
                     (self.dir, self.typeDir, i),
                     'bmp',
                     quality=100)
        self.draw(stock_data)

        # tmp = self.dis_res_.get_scalar_names()
        # print(tmp)
        # tmp = self.dis_res_.get_scalar_values(self.dis_res)
        # print(tmp)
        # print (self.dis_res_.get_histogram_names())
        # print (self.dis_res_.get_histogram_values(self.dis_res))
        print('Done this work!\n\n')
Beispiel #15
0
    def prepare_depth_estimates(self):
        self.model.eval()
        with torch.no_grad():
            for inputs_val in tqdm(self.data_loader,
                                   total=len(self.data_loader)):
                batch_exists = True
                for f in inputs_val["filename"]:
                    filename = self.build_filename(f)
                    if not os.path.isfile(filename):
                        batch_exists = False
                if batch_exists:
                    continue

                for k, v in inputs_val.items():
                    if torch.is_tensor(v) and k == ("color", 0, 0):
                        inputs_val[k] = v.to(self.device)

                mono_outputs = self.model.predict_test_disp(inputs_val)
                self.monodepth_loss_calculator.generate_depth_test_pred(
                    mono_outputs)
                # depths = mono_outputs[("depth", 0, 0)].cpu()
                depths = mono_outputs[("disp", 0)].cpu()

                for subname, depth in zip(inputs_val["filename"], depths):
                    filename = self.build_filename(subname)
                    os.makedirs(os.path.dirname(filename), exist_ok=True)
                    dmin = torch.min(depth)
                    dmax = torch.max(depth)
                    depth = torch.clamp(depth, dmin, dmax)
                    depth = (depth - dmin) / (dmax - dmin)
                    img = ToPILImage()(depth.squeeze_(0))
                    if not os.path.isfile(filename):
                        img.save(filename)
Beispiel #16
0
def rgb2seg(rgb_names, number_of_workers):
    model = ERFNet(NUM_CLASSES)

    model = torch.nn.DataParallel(model)
    model = model.cuda()

    # Set ERFnet for segmentation
    print("LOAD ERFNet")

    def load_my_state_dict(
            model, state_dict
    ):  #custom function to load model when not all dict elements
        own_state = model.state_dict()
        for name, param in state_dict.items():
            if name not in own_state:
                continue
            own_state[name].copy_(param)
        return model

    model = load_my_state_dict(
        model,
        torch.load(os.path.join('trained_models/erfnet_pretrained.pth')))
    model.eval()
    print("ERFNet and weights LOADED successfully")

    loader = DataLoader(CoIL(rgb_names, input_transform_cityscapes,
                             target_transform_cityscapes),
                        num_workers=number_of_workers,
                        batch_size=1,
                        shuffle=False)

    tmp = []
    for step, (images, labels, filename, filenameGt) in enumerate(loader):
        # images = images.cuda()

        inputs = Variable(images)
        print("inputs ", inputs.shape)

        with torch.no_grad():
            outputs = model(inputs)

        label = outputs[0].max(0)[1].byte().cpu().data
        print("label ", label.shape)
        #label_cityscapes = cityscapes_trainIds2labelIds(label.unsqueeze(0))
        label_color = Colorize()(label.unsqueeze(0))

        filenameSave = "./save_color/" + filename[0].split("CoILTrain/")[1]
        os.makedirs(os.path.dirname(filenameSave), exist_ok=True)
        #image_transform(label.byte()).save(filenameSave)
        label_save = ToPILImage()(label_color)
        label_save.save(filenameSave)

        print(step, filenameSave)

        tmp.append(label)

    tmp = torch.stack(tmp)
    print(tmp.shape)
    return tmp
Beispiel #17
0
def JPeg(image, quality):
    img = torch.clamp(image, 0, 1)
    img = ToPILImage()(img.cpu())
    path = '/tmp/def_jpeg.jpeg'
    img.save(path, 'JPEG', quality=quality)
    img = Image.open(path)
    img = ToTensor()(img)
    return img
Beispiel #18
0
def _jpeg_compression(im):
    assert torch.is_tensor(im)
    im = ToPILImage()(im)
    savepath = BytesIO()
    im.save(savepath, 'JPEG', quality=75)
    im = Image.open(savepath)
    im = ToTensor()(im)
    return im
Beispiel #19
0
def for_loop(net, data_loader, train_optimizer):
    is_train = train_optimizer is not None
    net.train() if is_train else net.eval()

    total_loss, total_time, total_num, preds, targets = 0.0, 0.0, 0, [], []
    data_bar = tqdm(data_loader, dynamic_ncols=True)
    with (torch.enable_grad() if is_train else torch.no_grad()):
        for data, target, grad, boundary, name in data_bar:
            data, target, grad, boundary = data.cuda(), target.cuda(), grad.cuda(), boundary.cuda()
            torch.cuda.synchronize()
            start_time = time.time()
            seg, edge = net(data, grad)
            prediction = torch.argmax(seg.detach(), dim=1)
            torch.cuda.synchronize()
            end_time = time.time()
            semantic_loss = semantic_criterion(seg, target)
            edge_loss = edge_criterion(edge, target, boundary)
            task_loss = task_criterion(seg, edge, target)
            loss = semantic_loss + 20 * edge_loss + task_loss

            if is_train:
                train_optimizer.zero_grad()
                loss.backward()
                train_optimizer.step()

            total_num += data.size(0)
            total_time += end_time - start_time
            total_loss += loss.item() * data.size(0)
            preds.append(prediction.cpu())
            targets.append(target.cpu())

            if not is_train:
                if data_loader.dataset.split == 'test':
                    # revert train id to regular id
                    for key in sorted(trainId2label.keys(), reverse=True):
                        prediction[prediction == key] = trainId2label[key].id
                # save pred images
                save_root = '{}/{}_{}_{}/{}'.format(save_path, backbone_type, crop_h, crop_w, data_loader.dataset.split)
                if not os.path.exists(save_root):
                    os.makedirs(save_root)
                for pred_tensor, pred_name in zip(prediction, name):
                    pred_img = ToPILImage()(pred_tensor.unsqueeze(dim=0).byte().cpu())
                    if data_loader.dataset.split == 'val':
                        pred_img.putpalette(get_palette())
                    pred_name = pred_name.replace('leftImg8bit', 'color')
                    path = '{}/{}'.format(save_root, pred_name)
                    pred_img.save(path)
            data_bar.set_description('{} Epoch: [{}/{}] Loss: {:.4f} FPS: {:.0f}'
                                     .format(data_loader.dataset.split.capitalize(), epoch, epochs,
                                             total_loss / total_num, total_num / total_time))
        # compute metrics
        preds = torch.cat(preds, dim=0)
        targets = torch.cat(targets, dim=0)
        pa, mpa, class_iou, category_iou = compute_metrics(preds, targets)
        print('{} Epoch: [{}/{}] PA: {:.2f}% mPA: {:.2f}% Class_mIOU: {:.2f}% Category_mIOU: {:.2f}%'
              .format(data_loader.dataset.split.capitalize(), epoch, epochs,
                      pa * 100, mpa * 100, class_iou * 100, category_iou * 100))
    return total_loss / total_num, pa * 100, mpa * 100, class_iou * 100, category_iou * 100
Beispiel #20
0
    def validate(self):
        self.model.eval()
        self.evaluator.reset()
        tbar = tqdm(self.val_loader, desc='\r')
        for i, (sample, image_name) in enumerate(tbar):

            if self.args.depth:
                image, depth, target = sample['image'], sample[
                    'depth'], sample['label']
            else:
                image, target = sample['image'], sample['label']
            if self.args.cuda:
                image = image.cuda()
                if self.args.depth:
                    depth = depth.cuda()
            start_time = time.time()
            with torch.no_grad():
                if self.args.depth:
                    output = self.model(image, depth)
                else:
                    output = self.model(image)
            if self.args.cuda:
                torch.cuda.synchronize()
            if i != 0:
                fwt = time.time() - start_time
                self.time_train.append(fwt)
                print(
                    "Forward time per img (bath size=%d): %.3f (Mean: %.3f)" %
                    (self.args.val_batch_size, fwt / self.args.val_batch_size,
                     sum(self.time_train) / len(self.time_train) /
                     self.args.val_batch_size))
            time.sleep(0.1)  # to avoid overheating the GPU too much

            # pred colorize
            pre_colors = Colorize()(torch.max(output,
                                              1)[1].detach().cpu().byte())
            # save
            for i in range(pre_colors.shape[0]):

                label_name = os.path.join(
                    self.args.label_save_path +
                    self.args.weight_path.split('run/')[1],
                    image_name[i].split('val\\')[1])
                merge_label_name = os.path.join(
                    self.args.merge_label_save_path +
                    self.args.weight_path.split('run/')[1],
                    image_name[i].split('val\\')[1])
                os.makedirs(os.path.dirname(label_name), exist_ok=True)
                os.makedirs(os.path.dirname(merge_label_name), exist_ok=True)

                pre_color_image = ToPILImage()(
                    pre_colors[i])  # pre_colors.dtype = float64
                pre_color_image.save(label_name)

                if (self.args.merge):
                    image_merge(image_name[i], pre_color_image,
                                merge_label_name)
                    print('save image: {}'.format(merge_label_name))
 def save_image(self, epoch, doublets):
     images = []
     for orig, rec in doublets:
         orig = orig.cpu().detach()
         rec = rec.cpu().detach()
         diff = 10 * abs(orig - rec)
         images.append(torch.cat((orig, rec, diff), dim=1))
     log_image = ToPILImage()(torch.cat(images, dim=2))
     log_image.save(os.path.join(self.path, f'{epoch}.jpg'))
def img2label(img, label, count):
    count += 1
    img = np.array(img)
    label = np.array(label)
    for i in range(label.shape[0]):
        for j in range(label.shape[1]):
            if label[i, j] == 0:
                img[i, j, :] = 0
    image = ToPILImage()(img)
    image.save('./results/imglabel_' + str(count) + '.jpg')
Beispiel #23
0
 def generate_image(self):
     noise = self.get_noise()
     image_tensor = self.model(noise)
     image_tensor = (image_tensor.cpu() + 1) / 2
     image = ToPILImage()(image_tensor[0])
     image = image.resize((128, 128), resample=0)
     with io.BytesIO() as f:
         image.save(f, format='JPEG')
         img_byte_arr = f.getvalue()
     return img_byte_arr
Beispiel #24
0
def save_result(info, pred):
    classes = pred.argmax(dim=1, keepdim=True).cpu()

    for i in range(classes.shape[0]):
        result_png = ToPILImage()(classes[i].float() / 255.)
        img_name = info[i]

        # print(os.path.join(cfg.TEST.result, img_name.replace('.jpg', '.png')))

        result_png.save(
            os.path.join(cfg.TEST.result, img_name.replace('.jpg', '.png')))
Beispiel #25
0
 def api_model_layer_output_image(input_id, vis_id, layer_id, time, output_id):
     output_id = int(output_id)
     try:
         output = self.outputs[output_id]
         output = output.detach().cpu()
         pil_img = ToPILImage()(output)
         img_io = io.BytesIO()
         pil_img.save(img_io, 'JPEG', quality=70)
         img_io.seek(0)
         return send_file(img_io, mimetype='image/jpeg')
     except KeyError:
         return Response(status=500, response='Index not found.')
Beispiel #26
0
 def jpeg_sub(imgs):
     """Input image of shape (n, 3, 224, 224)"""
     n, c, w, h = imgs.shape
     transformed = imgs.clone().cpu()
     for i in range(n):
         im = ToPILImage()(transformed[i])
         savepath = BytesIO()
         im.save(savepath, 'JPEG', quality=quality)
         im = Image.open(savepath)
         im = ToTensor()(im)
         transformed[i] = im
     return transformed.cuda()
def generateSR(image_path, save_path, model):
    r"""
    :param: image_path: path of used image
                save_path: path of saved image
                upscale_factor: which model do you want to use. format :int
    """

    image = Image.open(image_path)
    image = Variable(ToTensor()(image), volatile=True).unsqueeze(0).cuda()
    #   image = Variable(ToTensor()(image), volatile=True).unsqueeze(0)
    out = model(image)
    out_img = ToPILImage()(out[0].data.cpu())
    out_img.save(save_path)
Beispiel #28
0
 def jpeg_sub(imgs):
     """Input image of shape (n, 3, 224, 224)"""
     n, c, w, h = imgs.shape
     transformed = imgs.copy()
     for i in range(n):
         im = torch.Tensor(imgs[i])
         im = ToPILImage()(im)
         savepath = BytesIO()
         im.save(savepath, 'JPEG', quality=quality)
         im = Image.open(savepath)
         im = ToTensor()(im).numpy()
         transformed[i] = im
     return transformed
Beispiel #29
0
    def evaluate_model(config, model_folder, image_folder, output_path, save_json=True):
        """
        Evaluates a model by comparing input images with output images
        :param config: the model configuration
        :param model_folder: folder of the saved model
        :param image_folder: path images used to evaluate the model
        :param output_path: path where anonymized images should be stored
        :return: list of distances
        """

        image_folder = Path(image_folder)
        output_path = Path(output_path)
        model = config.model(**config.model_params)
        model.load_model(Path(model_folder))
        extractor = FaceExtractor(margin=0.05, mask_factor=10)

        print("The authors of the package recommend 0.6 as max distance for the same person.")
        scores = {}
        for image_file in image_folder.iterdir():
            if image_file.is_dir():
                continue

            print('#' * 10)
            print('Processing image:', image_file.name)

            input_image = Image.open(image_file)
            extracted_face, extracted_info = extractor(input_image)
            if extracted_face is None:
                print('Face could not be extracted')
                continue

            face_out = model.anonymize(extracted_face, extracted_info).squeeze(0)
            face_out = ToPILImage()(face_out.cpu().detach())
            face_out = face_out.resize(extracted_face.size, resample=BICUBIC)

            try:
                face_out.save(output_path / ('anonymized_' + image_file.name.__str__()))
                score, sim, emo = Evaluator.evaluate_image_pair(extracted_face, face_out)
                scores[image_file.name] = {'score': score, 'sim': sim, 'emo': emo}
            except Exception as ex:
                print(ex)
                continue

            print('Current image score:', scores[image_file.name])

        if save_json:
            with open(output_path / 'scores.json', 'w') as f:
                json.dump(scores, f)

        return scores
Beispiel #30
0
def main(args):
    # ========= Setup device and seed ============
    np.random.seed(42)
    torch.manual_seed(42)
    if args.cuda:
        torch.cuda.manual_seed_all(42)
    device = 'cuda' if args.cuda else 'cpu'
    logger = Logger(pjoin(args.save_dir, args.model, 'test.log'))
    logger.write(f'\nTesting configs: {args}')

    # ================= Load processed data ===================
    val_dataset = VOC12(args.data_dir, img_size=args.img_size, split='test')
    val_loader = DataLoader(val_dataset, num_workers=8, batch_size=1)
    n_classes = val_dataset.n_classes

    # ================= Init model ====================
    model = models.get_model(name=args.model, n_classes=n_classes)
    model = model.to(device)
    state = convert_state_dict(torch.load(args.model_path)["model_state"])
    model.load_state_dict(state)
    model.eval()

    # ====================== Only one image ==========================
    if args.eval:
        with torch.no_grad():
            img = Image.open(args.img_path)
            origin = img.size
            if args.img_size:
                img = img.resize(
                    (val_dataset.img_size[0], val_dataset.img_size[1]))
            img = val_dataset.input_transform(img).unsqueeze(0).to(device)
            out = model(img)
            pred = np.squeeze(out.data.max(1)[1].cpu().numpy(), axis=0)
            decoded = val_dataset.decode_segmap(pred)
            img_out = ToPILImage()(decoded).resize(origin)
            img_out.save(
                pjoin(args.save_dir, args.model, f'eval_{args.img_size}.png'))
        return

    # ====================== Testing Many images ==============================
    with torch.no_grad():
        for idx, (name, img) in enumerate(val_loader):
            img = img.to(device)
            out = model(img)
            pred = out.data.max(1)[1].squeeze_(1).squeeze_(0).cpu().numpy()
            decoded = val_dataset.decode_segmap(pred)
            ToPILImage()(decoded).save(
                pjoin(args.save_dir, args.model,
                      f'{name[0]}_{args.img_size}.png'))