Exemplo n.º 1
0
def evaluate():
    opt = get_args()

    # Loading Pretarined Model
    models = {}
    models["encoder"] = monolayout.Encoder(18, opt.height, opt.width, True)
    if opt.type == "both":
        models["static_decoder"] = monolayout.Decoder(
            models["encoder"].resnet_encoder.num_ch_enc)
        models["dynamic_decoder"] = monolayout.Decoder(
            models["encoder"].resnet_encoder.num_ch_enc)
    else:
        models["decoder"] = monolayout.Decoder(
            models["encoder"].resnet_encoder.num_ch_enc)

    for key in models.keys():
        models[key].to("cuda")

    models = load_model(models, opt.pretrained_path)

    # Loading Validation/Testing Dataset

    # Data Loaders
    dataset_dict = {"3Dobject": monolayout.KITTIObject,
                    "odometry": monolayout.KITTIOdometry,
                    "argo": monolayout.Argoverse,
                    "raw": monolayout.KITTIRAW}

    dataset = dataset_dict[opt.split]
    fpath = os.path.join(
        os.path.dirname(__file__),
        "splits",
        opt.split,
        "{}_files.txt")
    test_filenames = readlines(fpath.format("val"))
    test_dataset = dataset(opt, test_filenames, is_train=False)
    test_loader = DataLoader(
        test_dataset,
        1,
        True,
        num_workers=opt.num_workers,
        pin_memory=True,
        drop_last=True)

    iou, mAP = np.array([0., 0.]), np.array([0., 0.])
    for batch_idx, inputs in tqdm.tqdm(enumerate(test_loader)):
        with torch.no_grad():
            outputs = process_batch(opt, models, inputs)
        pred = np.squeeze(
            torch.argmax(
                outputs["topview"].detach(),
                1).cpu().numpy())
        true = np.squeeze(inputs[opt.type + "_gt"].detach().cpu().numpy())
        iou += mean_IU(pred, true)
        mAP += mean_precision(pred, true)
    iou /= len(test_loader)
    mAP /= len(test_loader)
    print("Evaluation Results: mIOU: %.4f mAP: %.4f" % (iou[1], mAP[1]))
Exemplo n.º 2
0
 def validation(self):
     iou, mAP = np.array([0., 0.]), np.array([0., 0.])
     for batch_idx, inputs in tqdm.tqdm(enumerate(self.val_loader)):
         with torch.no_grad():
             outputs = self.process_batch(inputs, True)
         pred = np.squeeze(
             torch.argmax(outputs["topview"].detach(), 1).cpu().numpy())
         true = np.squeeze(inputs[self.opt.type +
                                  "_gt"].detach().cpu().numpy())
         iou += mean_IU(pred, true)
         mAP += mean_precision(pred, true)
     iou /= len(self.val_loader)
     mAP /= len(self.val_loader)
     print("Epoch: %d | Validation: mIOU: %.4f mAP: %.4f" %
           (self.epoch, iou[1], mAP[1]))
Exemplo n.º 3
0
                feed_dict = {img: x_batch, label: y_batch}
                loss, pred_logits = sess.run([log_loss, pred],
                                             feed_dict=feed_dict)
                pred_map_batch = np.argmax(pred_logits, axis=3)
                # import pdb; pdb.set_trace()
                for pred_map, y in zip(pred_map_batch, y_batch):
                    mean_iou, pixel_acc, dice = vis.add_sample(
                        pred_map, y_batch[0])
            vis.compute_scores(suffix=it)

        x_batch, y_batch = next(train_generator)
        # w_map = define_map(y_batch[0])
        # exclude whole-backgroud images
        feed_dict = {
            img: x_batch,
            label: y_batch
            # weight_map: w_map
        }
        _, loss, summary, lr, pred_logits = sess.run(
            [train_step, log_loss, summary_merged, learning_rate, pred],
            feed_dict=feed_dict)
        global_step.assign(it).eval()
        train_writer.add_summary(summary, it)

        pred_map = np.argmax(pred_logits[0], axis=2)
        score, _ = mean_IU(pred_map, y_batch[0])

        if it % 20 == 0:
            print('[iter %d, epoch %.3f]: lr=%f loss=%f, mean_IU=%f' %
                  (it, float(it) / opt.iter_epoch, lr, loss, score))
Exemplo n.º 4
0
def evaluate():
    opt = get_args()

    # Loading Pretarined Model
    models = {}
    models["encoder"] = racklay.Encoder(18, opt.height, opt.width, True)
    if opt.type == "both":
        models["top_decoder"] = racklay.Decoder(
            models["encoder"].resnet_encoder.num_ch_enc, 3 * opt.num_racks,
            opt.occ_map_size)
        models["front_decoder"] = racklay.Decoder(
            models["encoder"].resnet_encoder.num_ch_enc, 3 * opt.num_racks,
            opt.occ_map_size)
    elif opt.type == "topview":
        models["top_decoder"] = racklay.Decoder(
            models["encoder"].resnet_encoder.num_ch_enc)
    elif opt.type == "frontview":
        models["front_decoder"] = racklay.Decoder(
            models["encoder"].resnet_encoder.num_ch_enc)

    for key in models.keys():
        models[key].to("cuda")

    models = load_model(models, opt.pretrained_path)

    # Loading Validation/Testing Dataset

    # Data Loaders
    dataset_dict = {
        "warehouse": Loader,
        "3Dobject": racklay.KITTIObject,
        "odometry": racklay.KITTIOdometry,
        "argo": racklay.Argoverse,
        "raw": racklay.KITTIRAW
    }

    dataset = dataset_dict[opt.split]
    fpath = os.path.join(os.path.dirname(__file__), "splits", opt.split,
                         "{}_files.txt")
    test_filenames = readlines(fpath.format("val"))
    test_dataset = dataset(opt, test_filenames, is_train=False)
    test_loader = DataLoader(test_dataset,
                             1,
                             True,
                             num_workers=opt.num_workers,
                             pin_memory=True,
                             drop_last=True)

    iou_box_top, mAP_box_top = np.array([0., 0.]), np.array([0., 0.])
    iou_rack_top, mAP_rack_top = np.array([0., 0.]), np.array([0., 0.])
    iou_box_front, mAP_box_front = np.array([0., 0.]), np.array([0., 0.])
    iou_rack_front, mAP_rack_front = np.array([0., 0.]), np.array([0., 0.])
    for batch_idx, inputs in tqdm.tqdm(enumerate(test_loader)):
        with torch.no_grad():
            outputs = process_batch(opt, models, inputs)
        #top view
        if (opt.type == "both" or opt.type == "topview"):
            for i in range(opt.num_racks):  # For the Rack Case
                input_temp = inputs["topview"][:,
                                               i, :, :].detach().cpu().numpy()
                input_onlyrack = np.zeros_like(input_temp)
                input_onlyrack[input_temp == 1] = 1

                input_temp = np.squeeze(input_onlyrack)
                input_temp = cv2.resize(input_temp,
                                        dsize=(opt.occ_map_size,
                                               opt.occ_map_size),
                                        interpolation=cv2.INTER_NEAREST)

                pred = np.squeeze(
                    torch.argmax(
                        outputs["top"][:, 3 * i:3 * i + 3, :, :].detach(),
                        1).cpu().numpy())
                pred_temp = np.zeros_like(pred)
                pred_temp[pred == 1] = 1

                true = np.squeeze(input_temp)
                iou_rack_top += mean_IU(pred_temp, true)
                mAP_rack_top += mean_precision(pred_temp, true)

            for i in range(opt.num_racks):
                input_temp = inputs["topview"][:,
                                               i, :, :].detach().cpu().numpy()
                input_onlybox = np.zeros_like(input_temp)
                input_onlybox[input_temp == 2] = 1
                input_temp = np.squeeze(input_onlybox)
                input_temp = cv2.resize(input_temp,
                                        dsize=(opt.occ_map_size,
                                               opt.occ_map_size),
                                        interpolation=cv2.INTER_NEAREST)

                pred = np.squeeze(
                    torch.argmax(
                        outputs["top"][:, 3 * i:3 * i + 3, :, :].detach(),
                        1).cpu().numpy())
                pred_temp = np.zeros_like(pred)
                pred_temp[pred == 2] = 1

                true = np.squeeze(input_temp)
                iou_box_top += mean_IU(pred_temp, true)
                mAP_box_top += mean_precision(pred_temp, true)

        #front view
        if (opt.type == "both" or opt.type == "frontview"):
            for i in range(opt.num_racks):  # For the Rack Case
                input_temp = inputs["frontview"][:,
                                                 i, :, :].detach().cpu().numpy(
                                                 )
                input_onlyrack = np.zeros_like(input_temp)
                input_onlyrack[input_temp == 1] = 1

                input_temp = np.squeeze(input_onlyrack)
                input_temp = cv2.resize(input_temp,
                                        dsize=(opt.occ_map_size,
                                               opt.occ_map_size),
                                        interpolation=cv2.INTER_NEAREST)

                pred = np.squeeze(
                    torch.argmax(
                        outputs["front"][:, 3 * i:3 * i + 3, :, :].detach(),
                        1).cpu().numpy())
                pred_temp = np.zeros_like(pred)
                pred_temp[pred == 1] = 1

                true = np.squeeze(input_temp)
                iou_rack_front += mean_IU(pred_temp, true)
                mAP_rack_front += mean_precision(pred_temp, true)

            for i in range(opt.num_racks):
                input_temp = inputs["frontview"][:,
                                                 i, :, :].detach().cpu().numpy(
                                                 )
                input_onlybox = np.zeros_like(input_temp)
                input_onlybox[input_temp == 2] = 1
                input_temp = np.squeeze(input_onlybox)
                input_temp = cv2.resize(input_temp,
                                        dsize=(opt.occ_map_size,
                                               opt.occ_map_size),
                                        interpolation=cv2.INTER_NEAREST)

                pred = np.squeeze(
                    torch.argmax(
                        outputs["front"][:, 3 * i:3 * i + 3, :, :].detach(),
                        1).cpu().numpy())
                pred_temp = np.zeros_like(pred)
                pred_temp[pred == 2] = 1

                true = np.squeeze(input_temp)
                iou_box_front += mean_IU(pred_temp, true)
                mAP_box_front += mean_precision(pred_temp, true)

    if (opt.type == "both" or opt.type == "topview"):
        iou_rack_top /= (len(test_loader) * opt.num_racks)
        mAP_rack_top /= (len(test_loader) * opt.num_racks)
        iou_box_top /= (len(test_loader) * opt.num_racks)
        mAP_box_top /= (len(test_loader) * opt.num_racks)
        print("Evaluation Results for Rack Top: mIOU: %.4f mAP: %.4f" %
              (iou_rack_top[1], mAP_rack_top[1]))
        print("Evaluation Results for Box Top: mIOU: %.4f mAP: %.4f" %
              (iou_box_top[1], mAP_box_top[1]))

    if (opt.type == "both" or opt.type == "frontview"):
        iou_rack_front /= (len(test_loader) * opt.num_racks)
        mAP_rack_front /= (len(test_loader) * opt.num_racks)
        iou_box_front /= (len(test_loader) * opt.num_racks)
        mAP_box_front /= (len(test_loader) * opt.num_racks)
        print("Evaluation Results for Rack Front: mIOU: %.4f mAP: %.4f" %
              (iou_rack_front[1], mAP_rack_front[1]))
        print("Evaluation Results for Box Front: mIOU: %.4f mAP: %.4f" %
              (iou_box_front[1], mAP_box_front[1]))