Beispiel #1
0
def get_dataloader(args, scene_list, phase):
    if args.use_wholescene:
        dataset = ScannetDatasetWholeScene(scene_list,
                                           is_weighting=not args.no_weighting,
                                           use_color=args.use_color,
                                           use_normal=args.use_normal,
                                           use_multiview=args.use_multiview)
        dataloader = DataLoader(dataset,
                                batch_size=1,
                                collate_fn=collate_wholescene,
                                num_workers=args.num_workers,
                                pin_memory=True)
    else:
        dataset = ScannetDataset(phase,
                                 scene_list,
                                 is_weighting=not args.no_weighting,
                                 use_color=args.use_color,
                                 use_normal=args.use_normal,
                                 use_multiview=args.use_multiview)
        dataloader = DataLoader(dataset,
                                batch_size=args.batch_size,
                                collate_fn=collate_random,
                                num_workers=args.num_workers,
                                pin_memory=True)

    return dataset, dataloader
def evaluate(args):
    # prepare data
    print("preparing data...")
    scene_list = get_scene_list(args)
    dataset = ScannetDatasetWholeScene(scene_list,
                                       use_color=args.use_color,
                                       use_normal=args.use_normal,
                                       use_multiview=args.use_multiview)
    dataloader = DataLoader(dataset,
                            batch_size=1,
                            collate_fn=collate_wholescene)

    # load model
    print("loading model...")
    model_path = os.path.join(CONF.OUTPUT_ROOT, args.folder, "model.pth")
    Pointnet = importlib.import_module("pointnet2_semseg")
    input_channels = int(args.use_color) * 3 + int(args.use_normal) * 3 + int(
        args.use_multiview) * 128
    model = Pointnet.get_model(num_classes=CONF.NUM_CLASSES,
                               is_msg=args.use_msg,
                               input_channels=input_channels,
                               use_xyz=not args.no_xyz,
                               bn=not args.no_bn).cuda()
    model.load_state_dict(torch.load(model_path))
    model.eval()

    # predict
    print("predicting...")
    preds = predict_label(args, model, dataloader)

    # visualize
    print("visualizing...")
    visualize(args, preds)
Beispiel #3
0
def get_dataloader(args, scene_list, is_train=True, is_wholescene=False):
    if is_wholescene:
        dataset = ScannetDatasetWholeScene(scene_list, is_train=is_train)
        dataloader = DataLoader(dataset, batch_size=1, collate_fn=collate_wholescene)
    else:
        dataset = ScannetDataset(scene_list, is_train=is_train)
        dataloader = DataLoader(dataset, batch_size=args.batch_size, collate_fn=collate_random)

    return dataset, dataloader
Beispiel #4
0
def evaluate(args):
    # prepare data
    print("preparing data...")
    scene_list = get_scene_list(CONF.SCANNETV2_VAL)
    dataset = ScannetDatasetWholeScene(scene_list, is_weighting=False)
    dataloader = DataLoader(dataset, batch_size=1, collate_fn=collate_wholescene)

    # load model
    print("loading model...")
    model_path = os.path.join(CONF.OUTPUT_ROOT, args.folder, "model.pth")
    Pointnet = importlib.import_module("pointnet2_msg_semseg")
    model = Pointnet.get_model(num_classes=CONF.NUM_CLASSES).cuda()
    model.load_state_dict(torch.load(model_path))
    model.eval()

    # eval
    print("evaluating...")
    pointacc_list, pointacc_per_class_array, voxacc_list, voxacc_per_class_array, voxcaliacc_list, pointmiou_per_class_array, voxmiou_per_class_array, masks = eval_wholescene(args, model, dataloader)
    
    avg_pointacc = np.mean(pointacc_list)
    avg_pointacc_per_class = np.sum(pointacc_per_class_array * masks, axis=0)/np.sum(masks, axis=0)

    avg_voxacc = np.mean(voxacc_list)
    avg_voxacc_per_class = np.sum(voxacc_per_class_array * masks, axis=0)/np.sum(masks, axis=0)

    avg_voxcaliacc = np.mean(voxcaliacc_list)
    
    avg_pointmiou_per_class = np.sum(pointmiou_per_class_array * masks, axis=0)/np.sum(masks, axis=0)
    avg_pointmiou = np.mean(avg_pointmiou_per_class)

    avg_voxmiou_per_class = np.sum(voxmiou_per_class_array * masks, axis=0)/np.sum(masks, axis=0)
    avg_voxmiou = np.mean(avg_voxmiou_per_class)

    # report
    print()
    print("Point accuracy: {}".format(avg_pointacc))
    print("Point accuracy per class: {}".format(np.mean(avg_pointacc_per_class)))
    print("Voxel accuracy: {}".format(avg_voxacc))
    print("Voxel accuracy per class: {}".format(np.mean(avg_voxacc_per_class)))
    print("Calibrated voxel accuracy: {}".format(avg_voxcaliacc))
    print("Point miou: {}".format(avg_pointmiou))
    print("Voxel miou: {}".format(avg_voxmiou))
    print()

    print("Point acc/voxel acc/point miou/voxel miou per class:")
    for l in range(CONF.NUM_CLASSES):
        print("Class {}: {}/{}/{}/{}".format(CONF.NYUCLASSES[l], avg_pointacc_per_class[l], avg_voxacc_per_class[l], avg_pointmiou_per_class[l], avg_voxmiou_per_class[l]))
Beispiel #5
0
def evaluate(args):
    # prepare data
    print("preparing data...")
    #scene_list = get_scene_list("python/Mesh2Loc/data/scannetv2_val.txt")
    scene_list = list()
    scene_list.append("scene0000_00")
    scene_list.append("scene0000_02")
    scene_list.append("scene0000_03")
    scene_list.append("scene0000_04")
    scene_list.append("scene0000_01")
    print(
        "/home/lorenzlamm/Dokumente/DavesPointnetClone/Pointnet2.ScanNet/preprocessing/scannet_scenes"
    )
    dataset = ScannetDatasetWholeScene(scene_list, is_train=False)
    dataloader = DataLoader(dataset,
                            batch_size=1,
                            collate_fn=collate_wholescene)

    # load model
    print("loading model...")
    model_path = os.path.join(CONF.OUTPUT_ROOT, args.folder, "model.pth")
    Pointnet = importlib.import_module("pointnet2_msg_semseg")
    model = Pointnet.get_model(num_classes=21).cuda()
    model.load_state_dict(torch.load(model_path))
    model.eval()

    # eval
    print("evaluating...")
    pointacc_list, voxacc_list, voxcaliacc_list = eval_wholescene(
        args, model, dataloader)
    avg_pointacc = np.mean(pointacc_list)
    avg_voxacc = np.mean(voxacc_list)
    avg_voxcaliacc = np.mean(voxcaliacc_list)

    # report
    print()
    print("Point accuracy: {}".format(avg_pointacc))
    print("Voxel-based point accuracy: {}".format(avg_voxacc))
    print("Calibrated point accuracy: {}".format(avg_voxcaliacc))
def evaluate(args):
    # prepare data
    print("preparing data...")
    scene_list = get_scene_list(args)
    dataset = ScannetDatasetWholeScene(scene_list, is_weighting=True)
    dataloader = DataLoader(dataset,
                            batch_size=1,
                            collate_fn=collate_wholescene)

    # load model
    print("loading model...")
    model_path = os.path.join(CONF.OUTPUT_ROOT, args.folder, "model.pth")
    Pointnet = importlib.import_module("pointnet2_msg_semseg")
    model = Pointnet.get_model(num_classes=CONF.NUM_CLASSES).cuda()
    model.load_state_dict(torch.load(model_path))
    model.eval()

    # predict
    print("predicting...")
    preds = predict_label(args, model, dataloader)

    # visualize
    print("visualizing...")
    visualize(args, preds)
def evaluate(args):
    # prepare data
    print("preparing data...")
    scene_list = get_scene_list(CONF.SCANNETV2_VAL)
    dataset = ScannetDatasetWholeScene(scene_list, is_weighting=True)
    dataloader = DataLoader(dataset,
                            batch_size=1,
                            collate_fn=collate_wholescene)

    # load model
    print("loading model...")
    model_path = os.path.join(CONF.OUTPUT_ROOT, args.folder, "model.pth")
    Pointnet = importlib.import_module("pointnet2_msg_semseg")
    model = Pointnet.get_model(num_classes=21).cuda()
    model.load_state_dict(torch.load(model_path))
    model.eval()

    # eval
    print("evaluating...")
    pointacc_list, pointacc_per_class_array, voxacc_list, voxacc_per_class_array, voxcaliacc_list, pointmiou_per_class_array, voxmiou_per_class_array, masks = eval_wholescene(
        args, model, dataloader)

    avg_pointacc = np.mean(pointacc_list)
    avg_pointacc_per_class = np.sum(
        pointacc_per_class_array * masks, axis=0
    ) / np.sum(
        masks, axis=0
    )  # T: avg with masks, cannot use np.mean(); result is vector from all the batches
    # it's possible for division with 0 to happen here -> results in nan in np.array

    tmp_num = np.sum(pointacc_per_class_array * masks, axis=0)
    tmp_den = np.sum(masks, axis=0)
    avg_pointacc_per_class2 = np.array(
        [a / b if b != 0 else 0 for a, b in zip(tmp_num, tmp_den)])
    mean_pointacc_per_class = np.sum(avg_pointacc_per_class2) / np.sum(
        avg_pointacc_per_class2 > 0)

    avg_voxacc = np.mean(voxacc_list)
    avg_voxacc_per_class = np.sum(voxacc_per_class_array * masks,
                                  axis=0) / np.sum(masks, axis=0)

    tmp_num = np.sum(voxacc_per_class_array * masks, axis=0)
    avg_voxacc_per_class2 = np.array(
        [a / b if b != 0 else 0 for a, b in zip(tmp_num, tmp_den)])
    mean_voxacc_per_class = np.sum(avg_voxacc_per_class2) / np.sum(
        avg_voxacc_per_class2 > 0)

    avg_voxcaliacc = np.mean(voxcaliacc_list)

    avg_pointmiou_per_class = np.sum(pointmiou_per_class_array * masks,
                                     axis=0) / np.sum(masks, axis=0)
    avg_pointmiou = np.mean(avg_pointmiou_per_class)

    tmp_num = np.sum(pointmiou_per_class_array * masks, axis=0)
    avg_pointmiou_per_class2 = np.array(
        [a / b if b != 0 else 0 for a, b in zip(tmp_num, tmp_den)])
    mean_pointmiou_per_class = np.sum(avg_pointmiou_per_class2) / np.sum(
        avg_pointmiou_per_class2 > 0)

    avg_voxmiou_per_class = np.sum(voxmiou_per_class_array * masks,
                                   axis=0) / np.sum(masks, axis=0)
    avg_voxmiou = np.mean(avg_voxmiou_per_class)

    tmp_num = np.sum(voxmiou_per_class_array * masks, axis=0)
    avg_voxmiou_per_class2 = np.array(
        [a / b if b != 0 else 0 for a, b in zip(tmp_num, tmp_den)])
    mean_voxmiou_per_class = np.sum(avg_voxmiou_per_class2) / np.sum(
        avg_voxmiou_per_class2 > 0)

    # report
    print()
    print("Point accuracy: {}".format(avg_pointacc))
    print(
        "Point accuracy per class: {}".format(mean_pointacc_per_class)
    )  #T: mask of seen classes shouldn't be necc, bc there shouldn't exist 0 ??
    #print("T: Point accuracy per class: {}".format( np.sum(avg_pointacc_per_class * (np.sum(masks, axis=0)>0)) / np.sum(np.sum(masks, axis=0) > 0) ) )
    #print("T: Point accuracy per class: {}".format( np.sum(avg_pointacc_per_class) / np.sum(avg_pointacc_per_class > 0)))
    print("Voxel accuracy: {}".format(avg_voxacc))
    print("Voxel accuracy per class: {}".format(mean_voxacc_per_class))
    #print("T: Voxel accuracy per class: {}".format( np.sum(avg_voxacc_per_class * (np.sum(masks, axis=0) > 0)) / np.sum(np.sum(masks, axis=0) > 0)))
    print("Calibrated voxel accuracy: {}".format(avg_voxcaliacc))
    print("Point miou: {}".format(mean_pointmiou_per_class))
    print("Voxel miou: {}".format(mean_voxmiou_per_class))
    print()

    print("Point acc/voxel acc/point miou/voxel miou per class:")
    for l in range(NUM_CLASSES):
        if l == 0: continue
        print("Class {}: {}/{}/{}/{}".format(NYUCLASSES[l],
                                             avg_pointacc_per_class[l - 1],
                                             avg_voxacc_per_class[l - 1],
                                             avg_pointmiou_per_class[l - 1],
                                             avg_voxmiou_per_class[l - 1]))
Beispiel #8
0
def evaluate(args):
    # prepare data
    print("preparing data...")
    scene_list = get_scene_list("data/scannetv2_val.txt")
    dataset = ScannetDatasetWholeScene(scene_list,
                                       use_color=args.use_color,
                                       use_normal=args.use_normal,
                                       use_multiview=args.use_multiview)
    dataloader = DataLoader(dataset,
                            batch_size=1,
                            collate_fn=collate_wholescene)

    # load model
    print("loading model...")
    model_path = os.path.join(CONF.OUTPUT_ROOT, args.folder, "model.pth")
    Pointnet = importlib.import_module("pointnet2_semseg")
    input_channels = int(args.use_color) * 3 + int(args.use_normal) * 3 + int(
        args.use_multiview) * 128
    model = Pointnet.get_model(num_classes=CONF.NUM_CLASSES,
                               is_msg=args.use_msg,
                               input_channels=input_channels,
                               use_xyz=not args.no_xyz,
                               bn=not args.no_bn).cuda()
    model.load_state_dict(torch.load(model_path))
    model.eval()

    # eval
    print("evaluating...")
    pointacc_list, pointacc_per_class_array, voxacc_list, voxacc_per_class_array, voxcaliacc_list, pointmiou_per_class_array, voxmiou_per_class_array, masks = eval_wholescene(
        args, model, dataloader)

    avg_pointacc = np.mean(pointacc_list)
    avg_pointacc_per_class = np.sum(pointacc_per_class_array * masks,
                                    axis=0) / np.sum(masks, axis=0)

    avg_voxacc = np.mean(voxacc_list)
    avg_voxacc_per_class = np.sum(voxacc_per_class_array * masks,
                                  axis=0) / np.sum(masks, axis=0)

    avg_voxcaliacc = np.mean(voxcaliacc_list)

    avg_pointmiou_per_class = np.sum(pointmiou_per_class_array * masks,
                                     axis=0) / np.sum(masks, axis=0)
    avg_pointmiou = np.mean(avg_pointmiou_per_class)

    avg_voxmiou_per_class = np.sum(voxmiou_per_class_array * masks,
                                   axis=0) / np.sum(masks, axis=0)
    avg_voxmiou = np.mean(avg_voxmiou_per_class)

    # report
    print()
    print("Point accuracy: {}".format(avg_pointacc))
    print("Point accuracy per class: {}".format(
        np.mean(avg_pointacc_per_class)))
    print("Voxel accuracy: {}".format(avg_voxacc))
    print("Voxel accuracy per class: {}".format(np.mean(avg_voxacc_per_class)))
    print("Calibrated voxel accuracy: {}".format(avg_voxcaliacc))
    print("Point miou: {}".format(avg_pointmiou))
    print("Voxel miou: {}".format(avg_voxmiou))
    print()

    print("Point acc/voxel acc/point miou/voxel miou per class:")
    for l in range(CONF.NUM_CLASSES):
        print("Class {}: {}/{}/{}/{}".format(CONF.NYUCLASSES[l],
                                             avg_pointacc_per_class[l],
                                             avg_voxacc_per_class[l],
                                             avg_pointmiou_per_class[l],
                                             avg_voxmiou_per_class[l]))