Ejemplo n.º 1
0
def test(model):
    print("============================= TEST ============================")
    model.switch_to_eval()
    for i, (img, name, WW, HH) in tqdm(enumerate(val_loader), desc='testing'):
        model.test(img, name, WW, HH)
    model.switch_to_train()
    maxfm, mae, _, _ = fm_and_mae(opt.results_dir, opt.val_gt_dir)
    model.performance = {'maxfm': maxfm, 'mae': mae}
    if hasattr(opt, 'cap_results_dir'):
        maxfm, mae, _, _ = fm_and_mae(opt.cap_results_dir, opt.val_gt_dir)
        model.performance.update({'cap_maxfm': maxfm, 'cap_mae': mae})
    if hasattr(opt, 'cls_results_dir'):
        maxfm, mae, _, _ = fm_and_mae(opt.cls_results_dir, opt.val_gt_dir)
        model.performance.update({'cls_maxfm': maxfm, 'cls_mae': mae})
    return model.performance
Ejemplo n.º 2
0
 def val_sal(num):
     netc.eval()
     netc.rcap = False
     netp.eval()
     netp.rcap = False
     for i, (img, names, WWs, HHs) in tqdm(enumerate(sal_val_loader),
                                           desc="val"):
         with torch.no_grad():
             img = (img.cuda() - mean) / std
             msk_big1 = netc(img)
             msk_big1 = F.sigmoid(msk_big1)
             msk_big2 = netp(img)
             msk_big2 = F.sigmoid(msk_big2)
             msk_big = (msk_big1 + msk_big2) / 2
         msk_big = msk_big.squeeze(1)
         msk_big = msk_big.cpu().numpy() * 255
         for b, _msk in enumerate(msk_big):
             name = names[b]
             WW = WWs[b]
             HH = HHs[b]
             _msk = Image.fromarray(_msk.astype(np.uint8))
             _msk = _msk.resize((WW, HH))
             _msk.save(f"{pathvrst}/{name}.png")
     maxfm, mae, _, _ = fm_and_mae(pathvrst, args.pathann_val_sal)
     print(f"val iteration {num} | FM {maxfm} | MAE {mae}")
     netp.train()
     netc.train()
     return maxfm, mae
Ejemplo n.º 3
0
def test(model, val_loader, opt):
    print("============================= TEST ============================")
    model.switch_to_eval()
    for i, (img, name, WW, HH) in tqdm(enumerate(val_loader), desc='testing'):
        model.test(img, name, WW, HH)
    model.switch_to_train()
    maxfm, mae, _, _ = fm_and_mae(opt.results_dir, opt.val_gt_dir)
    model.performance = {'maxfm': maxfm, 'mae': mae}
    return model.performance
Ejemplo n.º 4
0
def test_sal(model):
    print("============================= TEST ============================")
    model.switch_to_eval()
    model.opt.sal_results_dir = model.opt.results_dir + '_sal'
    if not os.path.exists(model.opt.sal_results_dir):
        os.mkdir(model.opt.sal_results_dir)
    for i, (img, name, WW, HH) in tqdm(enumerate(sal_val_loader), desc='testing'):
        model.test_sal(img, name, WW, HH)
    model.switch_to_train()
    maxfm, mae, _, _ = fm_and_mae(opt.sal_results_dir, sal_val_gt_dir)
    model.performance = {'maxfm': maxfm, 'mae': mae}
    print(maxfm)
    print(mae)
    with open('val_sal.json', 'w') as f:
        json.dump(model.performance, f)
    return model.performance
Ejemplo n.º 5
0
def val_sal():
    net.eval()
    with torch.no_grad():
        for it, (img, gt, batch_name, WW, HH) in tqdm(enumerate(sal_val_loader), desc='train'):
            img = (img.cuda()-mean)/std
            pred_seg, v_sal,_ = net(img)
            pred_seg = torch.softmax(pred_seg, 1)
            bg = pred_seg[:, :1]
            fg = (pred_seg[:, 1:]*v_sal[:, 1:]).sum(1, keepdim=True)
            fg = fg.squeeze(1)
            fg = fg*255
            for n, name in enumerate(batch_name):
                msk =fg[n]
                msk = msk.detach().cpu().numpy()
                w = WW[n]
                h = HH[n]
                msk = Image.fromarray(msk.astype(np.uint8))
                msk = msk.resize((w, h))
                msk.save('{}/{}.png'.format(path_save_valid_sal, name), 'PNG')
        fm, mae, _, _ = fm_and_mae(path_save_valid_sal, sal_val_gt_dir)
        net.train()
        return fm, mae
Ejemplo n.º 6
0
    # Find out the most probable class for each pixel.
    BMAP = np.argmax(Q, axis=0).reshape((H, W))
    BMAP = (BMAP*255).astype(np.uint8)
    BMAP = Image.fromarray(BMAP)
    BMAP.save(os.path.join(output_root, img_name), 'png')

    MAP = np.array(Q)[1].reshape((H, W))
    MAP = (MAP*255).astype(np.uint8)
    msk = Image.fromarray(MAP)
    msk.save(os.path.join(output_root, img_name), 'png')
    BMAP.save(os.path.join(output_root+'_bin', img_name), 'png')


if __name__ == '__main__':
    # for file in tqdm(files):
    #     myfunc(file)
    print('start crf')
    pool = multiprocessing.Pool(processes=8)
    pool.map(myfunc, files)
    pool.close()
    pool.join()
    print('done')
    fm, mae, _, _ = fm_and_mae(output_root, '../data/datasets/saliency_Dataset/%s/masks'%sal_set)
    print(fm)
    print(mae)

    # hhh='/home/zeng/ROTSfiles/Two3_densenet169/DUT-train_avg_sp_crf'
    # fm, mae, _, _ = fm_and_mae(hhh, '../data/datasets/saliency_Dataset/%s/masks'%sal_set)
    # print(fm)
    # print(mae)
Ejemplo n.º 7
0
    net.features.transition2[-2].register_forward_hook(hook)
    net.features.transition1[-2].register_forward_hook(hook)
    net.features.block0[-2].register_forward_hook(hook)
    net.features.transition3[-1].kernel_size = 1
    net.features.transition3[-1].stride = 1

    v_mean = torch.Tensor([0.485, 0.456, 0.406])[None, ..., None, None]
    v_std = torch.Tensor([0.229, 0.224, 0.225])[None, ..., None, None]

    for name in tqdm(names, desc='testing'):
        img = Image.open(os.path.join(img_root, name + '.jpg')).convert('RGB')
        ww, hh = img.size
        img = np.array(img, dtype=np.uint8)
        img = img.transpose((2, 0, 1))
        img = (torch.from_numpy(img[None, ...]).float() - v_mean) / v_std

        net.feats = []
        x = net(img.cuda())
        net.feats += [x]
        feat = torch.cat((F.upsample(net.feats[1], size=(hh, ww)),
                          F.upsample(net.feats[2], size=(hh, ww))),
                         1).detach().cpu().numpy()
        feat = feat.transpose((0, 2, 3, 1))
        proc_feat(name, feat, img_root, prob_root, output_root)

    print('done')
    fm, mae, _, _ = fm_and_mae(
        output_root, '/home/zeng/data/datasets/saliency_Dataset/ECSSD/masks')
    print(fm)
    print(mae)