예제 #1
0
파일: test.py 프로젝트: hvcl/ColorRL
def deploy_evaluate(lbl, gt_lbl):
    pred_lbl = clean_reindex(lbl)

    bestDice, FgBgDice = GetDices(pred_lbl, gt_lbl)

    diffFG = DiffFGLabels(pred_lbl, gt_lbl)

    MWCov, MUCov, AvgFP, AvgFN = kitti_metric(pred_lbl, gt_lbl)

    rand_i = adjusted_rand_index(gt_lbl, pred_lbl)

    return bestDice, FgBgDice, diffFG, MWCov, MUCov, AvgFP, AvgFN, rand_i
예제 #2
0
파일: test.py 프로젝트: hvcl/ColorRL
def evaluate(args, env):

    pred_lbl = clean_reindex(env.lbl, args.minsize)
    gt_lbl = env.gt_lbl

    bestDice, FgBgDice = GetDices(pred_lbl, gt_lbl)

    diffFG = DiffFGLabels(pred_lbl, gt_lbl)

    MWCov, MUCov, AvgFP, AvgFN = kitti_metric(pred_lbl, gt_lbl)

    rand_i = adjusted_rand_index(gt_lbl, pred_lbl)

    return bestDice, FgBgDice, diffFG, MWCov, MUCov, AvgFP, AvgFN, rand_i
예제 #3
0
파일: test.py 프로젝트: hvcl/ColorRL
def inference(args,
              logger,
              model,
              tests,
              test_env,
              gpu_id,
              rng,
              iter,
              upsize_lbl=None):
    log_img = []
    render_imgs = []

    if args.data in ['cvppp_eval']:
        original_size = (512, 512)
    elif args.data in ['kitti']:
        original_size = (1224, 370)

    # indices testset
    if args.deploy:
        idxs = np.arange(len(tests)).tolist()
    else:
        idxs = []
        if (len(tests) <= 40):
            idxs.append(0)
        else:
            if "3D" in args.data:
                ret = []
                gt_ret = []
                idxs.append(
                    rng.choice(range(tests), min(len(tests), 5),
                               replace=False))
            else:
                idxs.append(
                    rng.choice(range(tests),
                               min(len(tests), 33),
                               replace=False))

        for _ in range(min(len(tests), 33)):
            idxs.append((idxs[-1] + 1) % len(tests))

    # if args.data in ['cvppp']:
    #     resize = True
    # else:
    #     resize = False

    if "3D" in args.data:
        ret = []
        gt_ret = []

    if args.deploy:
        ret = []
        gt_ret = []
        # ---------- Define Evaluating Variables ----------
        # CVPPP
        bestDices = []
        FgBgDices = []
        diffFGs = []

        # Kitti
        MUCovs = []
        MWCovs = []
        AvgFPs = []
        AvgFNs = []

        rand_is = []
        # ----------End Define Evaluating Variables ----------

    eval_times = []

    for idx in idxs:
        # obs = test_env.set_sample (idx, resize)
        obs = test_env.set_sample(idx)
        # render_img = [np.repeat (np.expand_dims (test_env.raw, -1), 3, -1)]
        done = False
        if args.lstm_feats:
            with torch.cuda.device(gpu_id):
                cx, hx = model.lstm.init_hidden(batch_size=1, use_cuda=True)
        currTime = time.time()

        while (not done):
            with torch.no_grad():
                with torch.cuda.device(gpu_id):
                    t_obs = torch.tensor(obs[None],
                                         dtype=torch.float32,
                                         device="cuda")
                    if args.lstm_feats:
                        value, logit, (hx, cx) = model((t_obs, (hx, cx)))
                    else:
                        value, logit = model(t_obs)
                    prob = F.softmax(logit, dim=1)
                    action = prob.max(1)[1].data.cpu().numpy()

            obs, _, done, _ = test_env.step_inference(action[0])
        #     render_img.append (test_env.pred_lbl2rgb(test_env.lbl))

        # render_img = np.concatenate (render_img, 1).astype (np.uint8)
        # render_imgs.append (render_img)

        if not "3D" in args.data:
            clean_reindex(test_env.lbl, args.minsize)

        deltaTime = time.time() - currTime
        eval_times.append(deltaTime)
        # print ("eval time: ", deltaTime)
        if not args.deploy:
            img = test_env.render()
            log_img.append(img[:len(img) // 2])

        elif args.data in ['cvppp_eval', 'cvppp', "448_cremi", "256_cremi"]:
            # ret.append(test_env.lbl)
            # lbl_upsize = cv2.resize (test_env.lbl, original_size, interpolation=cv2.INTER_NEAREST)
            ret.append(test_env.lbl)

            # # ---------------------------------------- Evaluate ----------------------------------------
            # bestDice, FgBgDice, diffFG, MWCov, MUCov, AvgFP, AvgFN, rand_i = deploy_evaluate(lbl_upsize, upsize_lbl[idx])
            # # CVPPP
            # bestDices.append(bestDice)
            # FgBgDices.append(FgBgDice)
            # diffFGs.append(abs (diffFG))

            # # Kitti
            # MWCovs.append(MWCov)
            # MUCovs.append(MUCov)
            # AvgFPs.append(AvgFP)
            # AvgFNs.append(AvgFN)

            # rand_is.append(rand_i)
            # ---------------------------------------- End Evaluate ----------------------------------------
        elif args.data == 'cvppp':
            ret.append(test_env.lbl)

        if "3D" in args.data:
            ret.append(test_env.lbl)
            gt_ret.append(test_env.gt_lbl)

    print(eval_times)
    print("Mean eval times: ", np.mean(eval_times))

    if not args.deploy:
        log_img = np.concatenate(log_img, 0)
        # render_imgs = np.concatenate (render_imgs, 0)
        # print (render_imgs.shape)
        # log_info = {"test_samples": log_img, "render_imgs": render_imgs}
        log_info = {
            "test_samples": log_img,
        }
        for tag, img in log_info.items():
            img = img[None]
            logger.image_summary(tag, img, iter)

        if "3D" in args.data:
            for i, vol in enumerate(ret):
                io.imsave(
                    args.log_dir + "tifs_test/" + str(iter) + "_" + str(i) +
                    "_test.tif", vol.astype(np.uint8))
    else:
        print("\n------------------------------------------------")
        print((str(datetime.now()) + "\n"))
        if not "3D" in args.data:
            ret = np.array(ret, dtype=np.int32)
            io.imsave("deploy/" + "deploy_" + args.env + ".tif", ret)
        else:
            pass

        # print ("\nExport Images: ", "deploy/" + "deploy_" + args.data + ".tif")

        # if args.data in ['kitti', 'cremi', 'cvppp_eval']:
        #     print("Evaluate on "+ args.eval_data + " " + args.data)
        #     log_txt = open("deploy/" + "log_" + str(args.env) + ".txt", "w")
        #     log_info = {
        #         "bestDice": np.mean(bestDices),
        #         "FgBgDice": np.mean(FgBgDices),
        #         "diffFG": np.mean(diffFGs),
        #         "MWCov": np.mean(MWCovs),
        #         "MUCov": np.mean(MUCovs),
        #         "AvgFP": np.mean(AvgFPs),
        #         "AvgFN": np.mean(AvgFNs),
        #         "rand_i": np.mean(rand_is),
        #     }
        #     print (log_info)
        #     log_txt.write("##########################################" + "\n")
        #     log_txt.write(str(datetime.now()) + "\n")
        #     for k, v in log_info.items():
        #         log_txt.write(str(k) + ": " + str(v) + "\n")
        #         print(str(k) + ": " + str(v) + "\n")
        #     log_txt.write("##########################################" + "\n")
        #     log_txt.close()

        print("\nDone !")
        print("\n------------------------------------------------")