コード例 #1
0
def evaluate_one_epoch():
    stat_dict = {}

    ap_calculator = APCalculator(ap_iou_thresh=FLAGS.ap_iou_thresh,
                                 class2type_map=DATASET_CONFIG.class2type)
    ap_calculator_l = APCalculator(ap_iou_thresh=FLAGS.ap_iou_thresh * 2,
                                   class2type_map=DATASET_CONFIG.class2type)

    net.eval()  # set model to eval mode (for bn and dp)
    for batch_idx, batch_data_label in enumerate(TEST_DATALOADER):
        end_points = {}
        if batch_idx % 10 == 0:
            print('Eval batch: %d' % (batch_idx))
        for key in batch_data_label:
            batch_data_label[key] = batch_data_label[key].to(device)

        # Forward pass
        inputs = {'point_clouds': batch_data_label['point_clouds']}
        with torch.no_grad():
            end_points = net(inputs, end_points)

        # Compute loss
        for key in batch_data_label:
            end_points[key] = batch_data_label[key]
        loss, end_points = criterion(inputs, end_points, DATASET_CONFIG)

        # Accumulate statistics and print out
        for key in end_points:
            if 'loss' in key or 'acc' in key or 'ratio' in key:
                if key not in stat_dict: stat_dict[key] = 0
                stat_dict[key] += end_points[key].item()

        batch_pred_map_cls = parse_predictions(
            end_points, CONFIG_DICT, opt_ang=(FLAGS.dataset == 'sunrgbd'))
        batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT)
        ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)

        batch_pred_map_cls = parse_predictions(
            end_points, CONFIG_DICT_L, opt_ang=(FLAGS.dataset == 'sunrgbd'))
        batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT_L)
        ap_calculator_l.step(batch_pred_map_cls, batch_gt_map_cls)

        if FLAGS.dump_results:
            dump_results(end_points, DUMP_DIR + '/result/', DATASET_CONFIG,
                         TEST_DATASET)

    # Log statistics
    for key in sorted(stat_dict.keys()):
        log_string('eval mean %s: %f' % (key, stat_dict[key] /
                                         (float(batch_idx + 1))))

    metrics_dict = ap_calculator.compute_metrics()
    for key in metrics_dict:
        log_string('iou = 0.25, eval %s: %f' % (key, metrics_dict[key]))
    metrics_dict = ap_calculator_l.compute_metrics()
    for key in metrics_dict:
        log_string('iou = 0.5, eval %s: %f' % (key, metrics_dict[key]))

    mean_loss = stat_dict['loss'] / float(batch_idx + 1)
    return mean_loss
コード例 #2
0
ファイル: train_with_rn.py プロジェクト: lanlan96/3DRM
def evaluate_one_epoch():
    stat_dict = {}  # collect statistics
    ap_calculator = APCalculator(ap_iou_thresh=FLAGS.ap_iou_thresh,
                                 class2type_map=DATASET_CONFIG.class2type)
    net.eval()  # set model to eval mode (for bn and dp)
    for batch_idx, batch_data_label in enumerate(TEST_DATALOADER):
        if batch_idx % 10 == 0:
            print('Eval batch: %d' % (batch_idx))
        for key in batch_data_label:
            batch_data_label[key] = batch_data_label[key].to(device)

        # Forward pass
        inputs = {'point_clouds': batch_data_label['point_clouds']}
        with torch.no_grad():
            end_points = net(inputs)

        end_points['relation_type'] = FLAGS.relation_type
        end_points['relation_pair'] = FLAGS.relation_pair

        # Compute loss
        for key in batch_data_label:
            assert (key not in end_points)
            end_points[key] = batch_data_label[key]
        loss, end_points = criterion(end_points, DATASET_CONFIG)

        # Accumulate statistics and print out
        for key in end_points:
            if 'loss' in key or 'acc' in key or 'ratio' in key:
                if key not in stat_dict: stat_dict[key] = 0
                stat_dict[key] += end_points[key].item()

        batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT)
        batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT)
        ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)

        # Dump evaluation results for visualization
        if FLAGS.dump_results and batch_idx == 0 and EPOCH_CNT % 10 == 0:
            MODEL.dump_results(end_points, DUMP_DIR, DATASET_CONFIG)

    # Log statistics
    TEST_VISUALIZER.log_scalars(
        {key: stat_dict[key] / float(batch_idx + 1)
         for key in stat_dict},
        (EPOCH_CNT + 1) * len(TRAIN_DATALOADER) * BATCH_SIZE)
    for key in sorted(stat_dict.keys()):
        log_string('eval mean %s: %f' % (key, stat_dict[key] /
                                         (float(batch_idx + 1))))

    # Evaluate average precision
    metrics_dict = ap_calculator.compute_metrics()
    for key in metrics_dict:
        log_string('eval %s: %f' % (key, metrics_dict[key]))

    mean_loss = stat_dict['loss'] / float(batch_idx + 1)
    return mean_loss
コード例 #3
0
def evaluate_one_epoch():
    stat_dict = {}
    ap_calculator_list = [APCalculator(iou_thresh, DATASET_CONFIG.class2type) \
        for iou_thresh in AP_IOU_THRESHOLDS]
    net.eval()  # set model to eval mode (for bn and dp)
    for batch_idx, batch_data_label in enumerate(TEST_DATALOADER):
        if batch_idx % 10 == 0:
            print('Eval batch: %d' % (batch_idx))
        for key in batch_data_label:
            batch_data_label[key] = batch_data_label[key].to(device)

        # Forward pass
        inputs = {'point_clouds': batch_data_label['point_clouds']}
        with torch.no_grad():
            end_points = net(inputs)

        # Compute loss
        for key in batch_data_label:
            assert (key not in end_points)
            end_points[key] = batch_data_label[key]
        loss, end_points = criterion(end_points, DATASET_CONFIG)

        # Accumulate statistics and print out
        for key in end_points:
            if 'loss' in key or 'acc' in key or 'ratio' in key:
                if key not in stat_dict: stat_dict[key] = 0
                stat_dict[key] += end_points[key].item()

        batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT)
        batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT)
        for ap_calculator in ap_calculator_list:
            ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)

        # Dump evaluation results for visualization
        if batch_idx == 0:
            MODEL.dump_results(end_points, DUMP_DIR, DATASET_CONFIG)
        else:
            break

    # Log statistics
    for key in sorted(stat_dict.keys()):
        log_string('eval mean %s: %f' % (key, stat_dict[key] /
                                         (float(batch_idx + 1))))

    # Evaluate average precision
    for i, ap_calculator in enumerate(ap_calculator_list):
        print('-' * 10, 'iou_thresh: %f' % (AP_IOU_THRESHOLDS[i]), '-' * 10)
        metrics_dict = ap_calculator.compute_metrics()
        for key in metrics_dict:
            log_string('eval %s: %f' % (key, metrics_dict[key]))

    mean_loss = stat_dict['loss'] / float(batch_idx + 1)
    return mean_loss
コード例 #4
0
def evaluate_one_epoch():
    stat_dict = {}  # collect statistics
    ap_calculator = APCalculator(ap_iou_thresh=FLAGS.ap_iou_thresh,
                                 class2type_map=DATASET_CONFIG.class2type)
    detector.eval()  # set model to eval mode (for bn and dp)
    for batch_idx, batch_data_label in enumerate(TEST_DATALOADER):
        for key in batch_data_label:
            batch_data_label[key] = batch_data_label[key].to(device)

        # Forward pass
        inputs = {'point_clouds': batch_data_label['point_clouds']}
        with torch.no_grad():
            end_points = detector(inputs)

        # Compute loss
        for key in batch_data_label:
            assert (key not in end_points)
            end_points[key] = batch_data_label[key]
        loss, end_points = test_detector_criterion(end_points, DATASET_CONFIG)

        # Accumulate statistics and print out
        for key in end_points:
            if 'loss' in key or 'acc' in key or 'ratio' in key:
                if key not in stat_dict: stat_dict[key] = 0
                stat_dict[key] += end_points[key].item()

        batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT)
        batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT)
        ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)

        # Log statistics
    TEST_VISUALIZER.log_scalars(
        {key: stat_dict[key] / float(batch_idx + 1)
         for key in stat_dict},
        (EPOCH_CNT + 1) * len(LABELED_DATALOADER) * BATCH_SIZE)
    for key in sorted(stat_dict.keys()):
        log_string('eval mean %s: %f' % (key, stat_dict[key] /
                                         (float(batch_idx + 1))))

    # Evaluate average precision
    metrics_dict = ap_calculator.compute_metrics()
    for key in metrics_dict:
        log_string('eval %s: %f' % (key, metrics_dict[key]))

    TEST_VISUALIZER.log_scalars(
        {
            'mAP': metrics_dict['mAP'],
            'AR': metrics_dict['AR']
        }, (EPOCH_CNT + 1) * len(LABELED_DATALOADER) * BATCH_SIZE)

    mean_loss = stat_dict['detection_loss'] / float(batch_idx + 1)
    return mean_loss
コード例 #5
0
 def get_detections(self, depth):
     """
     Detection output after applying confidence threshold and non-maximum suppression
     Format is (batch size, # of preds) array of tuples of 
     (semantic class, bbox corners, objectness).
     """
     pc = self.depth_to_pc(depth)
     inputs = {'point_clouds': torch.from_numpy(pc).to(self.device)}
     tic = time.time()
     with torch.no_grad():
         end_points = self.net(inputs)
     toc = time.time()
     obj_logits = end_points['objectness_scores'].detach().cpu().numpy()
     obj_prob = softmax(obj_logits)[:, :, 1]  # (B,K)
     if np.sum(obj_prob > self.eval_config_dict['conf_thresh']) == 0:
         return [[]]
     #print('Inference time: %f'%(toc-tic))
     end_points['point_clouds'] = inputs['point_clouds']
     pred_map_cls = parse_predictions(end_points, self.eval_config_dict)
     return pred_map_cls
コード例 #6
0
ファイル: without_voice.py プロジェクト: KeondoPark/luxmea
def _votenet_inference(queue):

    # Set file paths and dataset config
    demo_dir = os.path.join(BASE_DIR, 'demo_files')

    # Use sunrgbd
    sys.path.append(os.path.join(ROOT_DIR, 'sunrgbd'))
    from sunrgbd_detection_dataset import DC  # dataset config
    checkpoint_path = os.path.join(demo_dir,
                                   'pretrained_votenet_on_sunrgbd.tar')

    eval_config_dict = {
        'remove_empty_box': True,
        'use_3d_nms': True,
        'nms_iou': 0.25,
        'use_old_type_nms': False,
        'cls_nms': False,
        'per_class_proposal': False,
        'conf_thresh': 0.5,
        'dataset_config': DC
    }

    # Init the model and optimzier
    MODEL = importlib.import_module('votenet')  # import network module
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    net = MODEL.VoteNet(
        num_proposal=256,
        input_feature_dim=1,
        vote_factor=1,
        #sampling='seed_fps', num_class=DC.num_class,
        sampling='vote_fps',
        num_class=DC.num_class,
        num_heading_bin=DC.num_heading_bin,
        num_size_cluster=DC.num_size_cluster,
        mean_size_arr=DC.mean_size_arr).to(device)
    print('Constructed model.')

    # Load checkpoint
    optimizer = optim.Adam(net.parameters(), lr=0.001)
    checkpoint = torch.load(checkpoint_path)
    net.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    epoch = checkpoint['epoch']
    print("Loaded checkpoint %s (epoch: %d)" % (checkpoint_path, epoch))

    # Load and preprocess input point cloud
    net.eval()  # set model to eval mode (for bn and dp)

    filename = queue.get()
    print(filename)
    pc_dir = os.path.join(BASE_DIR, 'point_cloud')
    pc_path = os.path.join(pc_dir, filename)

    point_cloud = read_ply(pc_path)
    pc = preprocess_point_cloud(point_cloud)
    print('Loaded point cloud data: %s' % (pc_path))

    # Model inference
    inputs = {'point_clouds': torch.from_numpy(pc).to(device)}
    tic = time.time()
    with torch.no_grad():
        #with profiler.profile(with_stack=True, profile_memory=True) as prof:
        end_points = net(inputs)
        toc = time.time()
        print('Inference time: %f' % (toc - tic))

    end_points['point_clouds'] = inputs['point_clouds']
    pred_map_cls = parse_predictions(end_points, eval_config_dict)
    print('Finished detection. %d object detected.' % (len(pred_map_cls[0])))

    #dump_dir = os.path.join(demo_dir, '%s_results'%('sunrgbd'))
    #if not os.path.exists(dump_dir): os.mkdir(dump_dir)
    #MODEL.dump_results(end_points, dump_dir, DC, True)
    #print('Dumped detection results to folder %s'%(dump_dir))

    #return pred_map_cls

    queue.put(pred_map_cls)
コード例 #7
0
def evaluate_one_full_tour(scan_name):
    stat_dict = {}
    ap_calculator_list = [APCalculator(iou_thresh, DATASET_CONFIG.class2type) \
        for iou_thresh in AP_IOU_THRESHOLDS]

    l = os.listdir(os.path.join(DUMP_DIR, 'outputs'))
    l = [x for x in l if scan_name in x]

    end_points = {}

    filename = l[0]
    h5file = h5py.File(os.path.join(DUMP_DIR, 'outputs', filename))
    for k in output_keys:
        end_points[k] = torch.FloatTensor(h5file[k]).unsqueeze(0)
    h5file.close()

    for filename in l[1:]:
        h5file = h5py.File(os.path.join(DUMP_DIR, 'outputs', filename))
        for k in output_keys:
            end_points[k] = torch.cat(
                (end_points[k], torch.FloatTensor(h5file[k]).unsqueeze(0)),
                dim=1)
        h5file.close()

    ### -- load input
    dirdir = 'mp3d/votenet_training_data_full_tours_fixed_pc_fixed_votes/val/votenet_inputs/'
    h5file = h5py.File(os.path.join(dirdir, scan_name + '.h5'), 'r')
    mesh_vertices = np.array(h5file['point_cloud'], dtype=np.float32)
    instance_labels = np.array(h5file['instance'], dtype=np.int32)
    semantic_labels = np.array(h5file['semantic'], dtype=np.int32)
    instance_bboxes = np.array(h5file['bboxes'], dtype=np.float32)
    instance_bboxes = instance_bboxes[:, :8]
    h5file.close()

    # convert PC to z is up.
    mesh_vertices[:, [0, 1, 2]] = mesh_vertices[:, [0, 2, 1]]

    # convert annotations to z is up.
    instance_bboxes[:, [0, 1, 2]] = instance_bboxes[:, [0, 2, 1]]
    instance_bboxes[:, [3, 4, 5]] = instance_bboxes[:, [3, 5, 4]]

    if not FLAGS.use_color:
        point_cloud = mesh_vertices[:, 0:3]  # do not use color for now
    else:
        point_cloud = mesh_vertices[:, 0:6]
        MEAN_COLOR_RGB = np.array([109.8, 97.2, 83.8])
        point_cloud[:, 3:] = point_cloud[:, 3:] - (MEAN_COLOR_RGB) / 256.0

    if (not FLAGS.no_height):
        floor_height = np.percentile(point_cloud[:, 2], 0.99)
        height = point_cloud[:, 2] - floor_height
        point_cloud = np.concatenate(
            [point_cloud, np.expand_dims(height, 1)], 1)

    # ------------------------------- LABELS ------------------------------
    MAX_NUM_OBJ_FT = 256
    target_bboxes = np.zeros((MAX_NUM_OBJ_FT, 6))
    target_bboxes_mask = np.zeros((MAX_NUM_OBJ_FT))
    angle_classes = np.zeros((MAX_NUM_OBJ_FT, ))
    angle_residuals = np.zeros((MAX_NUM_OBJ_FT, ))
    size_classes = np.zeros((MAX_NUM_OBJ_FT, ))
    size_residuals = np.zeros((MAX_NUM_OBJ_FT, 3))

    target_bboxes_mask[0:instance_bboxes.shape[0]] = 1
    target_bboxes[0:instance_bboxes.shape[0], :] = instance_bboxes[:, 0:6]

    # compute votes *AFTER* augmentation
    # generate votes
    # Note: since there's no map between bbox instance labels and
    # pc instance_labels (it had been filtered
    # in the data preparation step) we'll compute the instance bbox
    # from the points sharing the same instance label.
    num_points = mesh_vertices.shape[0]
    print('total num points: ', num_points)
    point_votes = np.zeros([num_points, 3])
    point_votes_mask = np.zeros(num_points)

    for i_instance in np.unique(instance_labels):
        # ignore points not associated with a box
        #if i_instance not in instance_bboxes_instance_labels: continue

        # find all points belong to that instance
        ind = np.where(instance_labels == i_instance)[0]
        # find the semantic label
        #TODO: change classe labels
        if not (semantic_labels[ind[0]] == -1):
            x = point_cloud[ind, :3]
            center = 0.5 * (x.min(0) + x.max(0))
            point_votes[ind, :] = center - x
            point_votes_mask[ind] = 1.0

    point_votes = np.tile(point_votes, (1, 3))  # make 3 votes identical

    # NOTE: set size class as semantic class. Consider use size2class.
    size_classes[0:instance_bboxes.shape[0]] = instance_bboxes[:, -1]
    instance_bboxes_sids = instance_bboxes[:, -1]
    instance_bboxes_sids = instance_bboxes_sids.astype(np.int)
    size_residuals[0:instance_bboxes.shape[0], :] = \
        target_bboxes[0:instance_bboxes.shape[0], 3:6] - DATASET_CONFIG.mean_size_arr[instance_bboxes_sids,:]

    #TODO: update angle_classes + residuals
    angle_residuals[0:instance_bboxes.shape[0]] = instance_bboxes[:, 6]

    ret_dict = {}
    ret_dict['point_clouds'] = point_cloud.astype(np.float32)
    ret_dict['center_label'] = target_bboxes.astype(np.float32)[:, 0:3]
    ret_dict['heading_class_label'] = angle_classes.astype(np.int64)
    ret_dict['heading_residual_label'] = angle_residuals.astype(np.float32)
    ret_dict['size_class_label'] = size_classes.astype(np.int64)
    ret_dict['size_residual_label'] = size_residuals.astype(np.float32)
    target_bboxes_semcls = np.zeros((MAX_NUM_OBJ_FT))
    target_bboxes_semcls[0:instance_bboxes.shape[0]] = instance_bboxes[:, -1]
    ret_dict['sem_cls_label'] = target_bboxes_semcls.astype(np.int64)
    ret_dict['box_label_mask'] = target_bboxes_mask.astype(np.float32)
    ret_dict['vote_label'] = point_votes.astype(np.float32)
    ret_dict['vote_label_mask'] = point_votes_mask.astype(np.int64)

    for key in ret_dict:
        ret_dict[key] = torch.FloatTensor(
            ret_dict[key]).to(device).unsqueeze(0)

    ### --  DONE load input

    for key in ret_dict:
        assert (key not in end_points)
        end_points[key] = ret_dict[key]

    print('parse pred')
    batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT)
    print('parse GT')
    batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT)
    print('preint metrics')
    for ap_calculator in ap_calculator_list:
        ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)

    for i, ap_calculator in enumerate(ap_calculator_list):
        print('-' * 10, 'iou_thresh: %f' % (AP_IOU_THRESHOLDS[i]), '-' * 10)
        metrics_dict = ap_calculator.compute_metrics()
        for key in metrics_dict:
            print('eval %s: %f' % (key, metrics_dict[key]))
コード例 #8
0
ファイル: train.py プロジェクト: cheng052/H3DNet
def evaluate_one_epoch():
    stat_dict = {} # collect statistics
    ap_calculator = APCalculator(ap_iou_thresh=FLAGS.ap_iou_thresh,
        class2type_map=DATASET_CONFIG.class2type)
    ap_calculator_l = APCalculator(ap_iou_thresh=FLAGS.ap_iou_thresh*2,
        class2type_map=DATASET_CONFIG.class2type)

    net.eval() # set model to eval mode (for bn and dp)

    time_file = 'time_file_%s.txt' % FLAGS.dataset
    time_file = os.path.join(DUMP_DIR, time_file)
    f = open(time_file, 'w')
    all_time = 0
    for batch_idx, batch_data_label in enumerate(TEST_DATALOADER):
        if batch_idx % 10 == 0:
            print('Eval batch: %d'%(batch_idx))
        end_points = {}
        for key in batch_data_label:
            batch_data_label[key] = batch_data_label[key].to(device)
        inputs = {'point_clouds': batch_data_label['point_clouds']}

        tic = time.time()
        with torch.no_grad():
            end_points = net(inputs, end_points)
        toc = time.time()
        t = toc - tic
        all_time += t
        f.write('batch_idx:%d, infer time:%f\n' % (batch_idx, t))
        print('Inference time: %f'%(t))

        # Compute loss
        for key in batch_data_label:
            end_points[key] = batch_data_label[key]
        loss, end_points = criterion(inputs, end_points, DATASET_CONFIG)

        # Accumulate statistics and print out
        for key in end_points:
            if 'loss' in key or 'acc' in key or 'ratio' in key:
                if key not in stat_dict: stat_dict[key] = 0
                stat_dict[key] += end_points[key].item()

        batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT, opt_ang=(FLAGS.dataset == 'sunrgbd'))
        batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT) 
        ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)

        batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT_L, opt_ang=(FLAGS.dataset == 'sunrgbd'))
        batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT_L) 
        ap_calculator_l.step(batch_pred_map_cls, batch_gt_map_cls)

        if FLAGS.dump_results:
            dump_results(end_points, DUMP_DIR+'/result/', DATASET_CONFIG, TEST_DATASET)

    mean_time = all_time/float(batch_idx+1)
    f.write('Batch number:%d\n' % (batch_idx+1))
    f.write('mean infer time: %f\n' % (mean_time))
    f.close()
    print('Mean inference time: %f'%(mean_time))
    # Log statistics
    TEST_VISUALIZER.log_scalars({key:stat_dict[key]/float(batch_idx+1) for key in stat_dict},
        (EPOCH_CNT+1)*len(TRAIN_DATALOADER)*BATCH_SIZE)
    for key in sorted(stat_dict.keys()):
        log_string('eval mean %s: %f'%(key, stat_dict[key]/(float(batch_idx+1))))

    metrics_dict = ap_calculator.compute_metrics()
    for key in metrics_dict:
        log_string('eval %s: %f'%(key, metrics_dict[key]))
    metrics_dict = ap_calculator_l.compute_metrics()
    for key in metrics_dict:
        log_string('eval %s: %f'%(key, metrics_dict[key]))

    mean_loss = stat_dict['loss']/float(batch_idx+1)
    return mean_loss
コード例 #9
0
def evaluate_one_epoch():
    stat_dict = {}

    ap_calculator_list = [APCalculator(iou_thresh, DATASET_CONFIG.class2type) \
        for iou_thresh in AP_IOU_THRESHOLDS]

    net.eval() # set model to eval mode (for bn and dp)

    for batch_idx, batch_data_label in enumerate(TEST_DATALOADER):
        if batch_idx % 10 == 0:
            print('Eval batch: %d'%(batch_idx))

        if 'files_crops' in batch_data_label:

            if len(batch_data_label['files_crops']) == 0:
                continue

            batch_data_label['files_crops'] = [x[0] for x in batch_data_label['files_crops']]

        # Forward pass
        with torch.no_grad():
            end_points = net.forward_full_tour(batch_data_label,
                                               DATASET_CONFIG,
                                               device,
                                               files_crops=batch_data_label['files_crops'],
                                               **settings)

        for key in batch_data_label:
            assert(key not in end_points)
            end_points[key] = batch_data_label[key]

        # send data to device
        for key in end_points:
            if (key == 'scan_name') or\
               (key == 'files_crops'): continue
            end_points[key] = end_points[key].to(device)

        # /!\ cannot compute loss bc not the same #points and #seeds/pred-votes (crops are upsampled)
        # Compute loss
        # -- loss, end_points = criterion(end_points, DATASET_CONFIG)

        # -- # Accumulate statistics and print out
        # -- for key in end_points:
        # --     if 'loss' in key or 'acc' in key or 'ratio' in key:
        # --         if key not in stat_dict: stat_dict[key] = 0
        # --         stat_dict[key] += end_points[key].item()

        batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT)
        batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT)
        for ap_calculator in ap_calculator_list:
            ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)

        # Dump evaluation results for visualization
        # -- if batch_idx == 0:
        # --     MODEL.dump_results(end_points,
        # --                        DUMP_DIR,
        # --                        DATASET_CONFIG)


    # Log statistics
    #for key in sorted(stat_dict.keys()):
    #    log_string('eval mean %s: %f'%(key, stat_dict[key]/(float(batch_idx+1))))

    # Evaluate average precision
    for i, ap_calculator in enumerate(ap_calculator_list):
        print('-'*10, 'iou_thresh: %f'%(AP_IOU_THRESHOLDS[i]), '-'*10)
        metrics_dict = ap_calculator.compute_metrics()
        for key in metrics_dict:
            log_string('eval %s: %f'%(key, metrics_dict[key]))

    #mean_loss = stat_dict['loss']/float(batch_idx+1)
    #return mean_loss
    return -1
コード例 #10
0
def evaluate_one_epoch():
    stat_dict = {}  # collect statistics
    ap_calculator_list = [APCalculator(iou_thresh, DATASET_CONFIG.class2type) \
                          for iou_thresh in AP_IOU_THRESHOLDS]
    net.eval()  # set model to eval mode (for bn and dp)
    for batch_idx, batch_data_label in enumerate(TEST_DATALOADER):
        for key in batch_data_label:
            batch_data_label[key] = batch_data_label[key].to(device)

        # Forward pass
        inputs = {'point_clouds': batch_data_label['point_clouds']}
        with torch.no_grad():
            end_points = net(inputs)

        # Compute loss
        for key in batch_data_label:
            assert (key not in end_points)
            end_points[key] = batch_data_label[key]
        loss, end_points = get_labeled_loss(end_points, DATASET_CONFIG,
                                            CONFIG_DICT)

        # Accumulate statistics and print out
        for key in end_points:
            if 'loss' in key or 'acc' in key or 'ratio' in key:
                if key not in stat_dict: stat_dict[key] = 0
                stat_dict[key] += end_points[key].item()

        batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT)
        batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT)
        for ap_calculator in ap_calculator_list:
            ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)

        # Dump evaluation results for visualization
        if FLAGS.dump_results and batch_idx == 0 and EPOCH_CNT % 10 == 0:
            dump_results(end_points, DUMP_DIR, DATASET_CONFIG)

            # Log statistics
    TEST_VISUALIZER.log_scalars(
        {
            tb_name(key): stat_dict[key] / float(batch_idx + 1)
            for key in stat_dict
        }, (EPOCH_CNT + 1) * len(TRAIN_DATALOADER) * BATCH_SIZE)
    for key in sorted(stat_dict.keys()):
        log_string('eval mean %s: %f' % (key, stat_dict[key] /
                                         (float(batch_idx + 1))))

    map = []
    # Evaluate average precision
    for i, ap_calculator in enumerate(ap_calculator_list):
        print('-' * 10, 'iou_thresh: %f' % (AP_IOU_THRESHOLDS[i]), '-' * 10)
        metrics_dict = ap_calculator.compute_metrics()
        for key in metrics_dict:
            log_string('eval %s: %f' % (key, metrics_dict[key]))
        TEST_VISUALIZER.log_scalars(
            {
                'metrics_' + str(AP_IOU_THRESHOLDS[i]) + '/' + key:
                metrics_dict[key]
                for key in metrics_dict if key in ['mAP', 'AR']
            }, (EPOCH_CNT + 1) * len(TRAIN_DATALOADER) * BATCH_SIZE)
        map.append(metrics_dict['mAP'])

    mean_loss = stat_dict['loss'] / float(batch_idx + 1)
    return mean_loss, map
コード例 #11
0
ファイル: eval.py プロジェクト: SirWyver/votenet
def evaluate_one_epoch():
    stat_dict = {}
    ap_calculator_list = [APCalculator(iou_thresh, DATASET_CONFIG.class2type) for iou_thresh in AP_IOU_THRESHOLDS]
    net.eval()  # set model to eval mode (for bn and dp)

    serialize_preds = dict()
    for batch_idx, batch_data_label in enumerate(TEST_DATALOADER):
        if batch_idx % 10 == 0:
            print("Eval batch: %d" % (batch_idx))
        for key in batch_data_label:
            if key not in ["scan_name", "frame_idx"]:
                batch_data_label[key] = batch_data_label[key].to(device)

        # Forward pass
        inputs = {"point_clouds": batch_data_label["point_clouds"]}
        with torch.no_grad():
            end_points = net(inputs)

        # Compute loss
        for key in batch_data_label:
            assert key not in end_points
            end_points[key] = batch_data_label[key]
        loss, end_points = criterion(end_points, DATASET_CONFIG)

        # Accumulate statistics and print out
        for key in end_points:
            if "loss" in key or "acc" in key or "ratio" in key:
                if key not in stat_dict:
                    stat_dict[key] = 0
                stat_dict[key] += end_points[key].item()

        batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT)
        for i in range(len(batch_data_label["scan_name"])):
            scan_name, frame_idx = batch_data_label["scan_name"][i], batch_data_label["frame_idx"][i]
            # serialize_pred = {"scan_name": scan_name, "frame_idx": frame_idx.item()}
            pred_maps_cls = batch_pred_map_cls[i]
            instances = []
            for pred_map in pred_maps_cls:
                cls_id, pred_corners_3d_upright_camera, obj_prob = pred_map
                instances.append({"cls_id": cls_id, "faabb": pred_corners_3d_upright_camera, "obj_prob": float(obj_prob)})
            # serialize_pred["instances"] = instances
            serialize_preds[(scan_name, frame_idx.item())] = instances
        # (pred_sem_cls[i,j].item(), pred_corners_3d_upright_camera[i,j], obj_prob[i,j])

        batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT)
        for ap_calculator in ap_calculator_list:
            ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)

        # Dump evaluation results for visualization
        if batch_idx == 0:
            MODEL.dump_results(end_points, DUMP_DIR, DATASET_CONFIG)
    Dyme(DUMP_DIR).store("serialize_preds.yaml", serialize_preds)
    # json.dump(serialize_preds, open(DUMP_DIR + "serialize_preds.json", "w"))
    # Log statistics
    for key in sorted(stat_dict.keys()):
        log_string("eval mean %s: %f" % (key, stat_dict[key] / (float(batch_idx + 1))))

    # Evaluate average precision
    for i, ap_calculator in enumerate(ap_calculator_list):
        print("-" * 10, "iou_thresh: %f" % (AP_IOU_THRESHOLDS[i]), "-" * 10)
        metrics_dict = ap_calculator.compute_metrics()
        for key in metrics_dict:
            log_string("eval %s: %f" % (key, metrics_dict[key]))

    mean_loss = stat_dict["loss"] / float(batch_idx + 1)
    return mean_loss
コード例 #12
0
                with torch.no_grad():
                    end_points = net(inputs)

                # Compute loss
                for key in batch_data_label:
                    assert (key not in end_points)
                    end_points[key] = batch_data_label[key]
                loss, end_points = criterion(end_points, DATASET_CONFIG)

                # Accumulate statistics and print out
                for key in end_points:
                    if 'loss' in key or 'acc' in key or 'ratio' in key:
                        if key not in stat_dict: stat_dict[key] = 0
                        stat_dict[key] += end_points[key].item()

                batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT)
                batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT)
                ap_Measurement.step(batch_pred_map_cls, batch_gt_map_cls)

                # Dump evaluation results for visualization
                if FLAGS.dump_results and batch_idx == 0 and EPOCH_CNT % 10 == 0:
                    MODEL.dump_results(end_points, DUMP_DIR, DATASET_CONFIG)

                    # Log statistics
            TEST_VISUALIZER.log_scalars(
                {
                    key: stat_dict[key] / float(batch_idx + 1)
                    for key in stat_dict
                }, (EPOCH_CNT + 1) * len(TRAIN_DATALOADER) * BATCH_SIZE)
            for key in sorted(stat_dict.keys()):
                print('eval mean %s: %f' % (key, stat_dict[key] /
コード例 #13
0
def evaluate_one_epoch():
    stat_dict = {}
    ap_calculator_list = [APCalculator(iou_thresh, DATASET_CONFIG.class2type) \
        for iou_thresh in AP_IOU_THRESHOLDS]
    net.eval()  # set model to eval mode (for bn and dp)
    for batch_idx, batch_data_label in enumerate(TEST_DATALOADER):
        if batch_idx % 10 == 0:
            print('Eval batch: %d' % (batch_idx))
        for key in batch_data_label:
            batch_data_label[key] = batch_data_label[key].to(device)

        # Forward pass
        inputs = {'point_clouds': batch_data_label['point_clouds']}
        if FLAGS.use_imvotenet:
            inputs.update({
                'scale':
                batch_data_label['scale'],
                'calib_K':
                batch_data_label['calib_K'],
                'calib_Rtilt':
                batch_data_label['calib_Rtilt'],
                'cls_score_feats':
                batch_data_label['cls_score_feats'],
                'full_img_votes_1d':
                batch_data_label['full_img_votes_1d'],
                'full_img_1d':
                batch_data_label['full_img_1d'],
                'full_img_width':
                batch_data_label['full_img_width'],
            })
            with torch.no_grad():
                end_points = net(inputs, joint_only=True)
        else:
            with torch.no_grad():
                end_points = net(inputs)

        # Compute loss
        for key in batch_data_label:
            if key not in end_points:
                end_points[key] = batch_data_label[key]
        loss, end_points = criterion(end_points, DATASET_CONFIG,
                                     KEY_PREFIX_LIST, TOWER_WEIGHTS)

        # Accumulate statistics and print out
        for key in end_points:
            if 'loss' in key or 'acc' in key or 'ratio' in key:
                if key not in stat_dict: stat_dict[key] = 0
                stat_dict[key] += end_points[key].item()

        batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT,
                                               KEY_PREFIX_LIST[0])
        batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT)
        for ap_calculator in ap_calculator_list:
            ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)

        # Dump evaluation results for visualization
        if batch_idx == 0:
            MODEL.dump_results(end_points,
                               DUMP_DIR,
                               DATASET_CONFIG,
                               key_prefix=KEY_PREFIX_LIST[-1])

    # Log statistics
    for key in sorted(stat_dict.keys()):
        log_string('eval mean %s: %f' % (key, stat_dict[key] /
                                         (float(batch_idx + 1))))

    # Evaluate average precision
    for i, ap_calculator in enumerate(ap_calculator_list):
        print('-' * 10, 'iou_thresh: %f' % (AP_IOU_THRESHOLDS[i]), '-' * 10)
        metrics_dict = ap_calculator.compute_metrics()
        for key in metrics_dict:
            log_string('eval %s: %f' % (key, metrics_dict[key]))

    mean_loss = stat_dict['loss'] / float(batch_idx + 1)
    return mean_loss
コード例 #14
0
def evaluate_one_epoch():
    stat_dict = {}
    pred_dict = []
    ap_calculator_list = [APCalculator(iou_thresh, DATASET_CONFIG.class2type) \
        for iou_thresh in AP_IOU_THRESHOLDS]
    net.eval()  # set model to eval mode (for bn and dp)
    for batch_idx, batch_data_label in enumerate(TEST_DATALOADER):
        # if batch_idx > 2:
        #     break

        if batch_idx % 10 == 0:
            print('Eval batch: %d' % (batch_idx))
        for key in batch_data_label:
            batch_data_label[key] = batch_data_label[key].to(device)

        # Forward pass
        inputs = {'point_clouds': batch_data_label['point_clouds']}
        with torch.no_grad():
            end_points = net(inputs)

        end_points['relation_type'] = FLAGS.relation_type
        end_points['relation_pair'] = FLAGS.relation_pair

        # Compute loss
        for key in batch_data_label:
            assert (key not in end_points)
            end_points[key] = batch_data_label[key]
        loss, end_points = criterion(end_points, DATASET_CONFIG)

        # Accumulate statistics and print out
        for key in end_points:
            if 'loss' in key or 'acc' in key or 'ratio' in key:
                if key not in stat_dict: stat_dict[key] = 0
                stat_dict[key] += end_points[key].item()

        batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT)
        batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT)
        for ap_calculator in ap_calculator_list:
            ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)

        # Dump evaluation results for visualization
        if batch_idx == 0:
            MODEL.dump_results(end_points, DUMP_DIR, DATASET_CONFIG)

        # Record the bbox before/after nms for vis
        # pred_center = end_points['center'].detach().cpu().numpy()  # (B,K,3)
        # pred_heading_class = torch.argmax(end_points['heading_scores'], -1)  # B,num_proposal
        # pred_heading_residual = torch.gather(end_points['heading_residuals'], 2,
        #                                      pred_heading_class.unsqueeze(-1))  # B,num_proposal,1
        # pred_heading_class = pred_heading_class.detach().cpu().numpy()  # B,num_proposal
        # pred_heading_residual = pred_heading_residual.squeeze(2).detach().cpu().numpy()  # B,num_proposal
        # pred_size_class = torch.argmax(end_points['size_scores'], -1)  # B,num_proposal
        # pred_size_residual = torch.gather(end_points['size_residuals'], 2,
        #                                   pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, 1,
        #                                                                                      3))  # B,num_proposal,1,3
        # pred_size_residual = pred_size_residual.squeeze(2).detach().cpu().numpy()  # B,num_proposal,3
        # pred_size_class = pred_size_class.detach().cpu().numpy()
        # for _i in range(len(pred_center)):
        #     pred_dict.append({
        #         'point_clouds': end_points['mesh_vertices'][_i].detach().cpu().numpy(),
        #         'pred_corners_3d_upright_camera': end_points['pred_corners_3d_upright_camera'][_i],
        #         'pred_mask': end_points['pred_mask'][_i],
        #         'pred_center': pred_center[_i],
        #         'pred_heading_class': pred_heading_class[_i],
        #         'pred_heading_residual': pred_heading_residual[_i],
        #         'pred_size_class': pred_size_class[_i],
        #         'pred_size_residual': pred_size_residual[_i],
        #         'nearest_n_index': end_points['nearest_n_index'][_i].detach().cpu().numpy(),
        #         'sem_cls_label': torch.gather(end_points['sem_cls_label'], 1, end_points['object_assignment'])[
        #             _i].detach().cpu().numpy(),
        #         'rn_label': end_points['rn_labels_1'][_i].detach().cpu().numpy(),
        #         # 'scan_name': TEST_DATASET.scan_names[batch_data_label['scan_idx'][_i]]
        #     })

    # draw the bbox
    # for _i in range(len(pred_dict)):
    #     # draw_bbox(pred_dict[_i], DATASET_CONFIG, nms=True)
    #     # draw_bbox2(pred_dict[_i])
    #     draw_relation_pairs(pred_dict[_i], DATASET_CONFIG)

    # Log statistics
    for key in sorted(stat_dict.keys()):
        log_string('eval mean %s: %f' % (key, stat_dict[key] /
                                         (float(batch_idx + 1))))

    class_list = [
        'window', 'bed', 'counter', 'sofa', 'table', 'showercurtrain',
        'garbagebin', 'sink', 'picture', 'chair', 'desk', 'curtain',
        'refrigerator', 'door', 'toilet', 'bookshelf', 'bathtub', 'cabinet',
        'mAP', 'AR'
    ]

    # Evaluate average precision
    for i, ap_calculator in enumerate(ap_calculator_list):
        print('-' * 10, 'iou_thresh: %f' % (AP_IOU_THRESHOLDS[i]), '-' * 10)
        metrics_dict = ap_calculator.compute_metrics()
        for key in metrics_dict:
            log_string('eval %s: %f' % (key, metrics_dict[key]))
    # for i, ap_calculator in enumerate(ap_calculator_list):
    #     print('-'*10, 'iou_thresh: %f'%(AP_IOU_THRESHOLDS[i]), '-'*10)
    #     metrics_dict = ap_calculator.compute_metrics()
    #
    #     for cls in class_list:
    #         for key in metrics_dict:
    #             if cls in key:
    #                 log_string('eval %s: %f'%(key, metrics_dict[key]))

    mean_loss = stat_dict['loss'] / float(batch_idx + 1)
    return mean_loss
コード例 #15
0
def evaluate_one_epoch():
    stat_dict = {}  # collect statistics
    ap_calculator_dict = {}
    for key_prefix in KEY_PREFIX_LIST:
        ap_calculator_dict[key_prefix + 'ap_calculator'] = APCalculator(
            ap_iou_thresh=FLAGS.ap_iou_thresh,
            class2type_map=DATASET_CONFIG.class2type)
    net.eval()  # set model to eval mode (for bn and dp)
    for batch_idx, batch_data_label in enumerate(TEST_DATALOADER):
        if batch_idx % 10 == 0:
            print('Eval batch: %d' % (batch_idx))
        for key in batch_data_label:
            batch_data_label[key] = batch_data_label[key].to(device)

        # Forward pass
        inputs = {'point_clouds': batch_data_label['point_clouds']}
        if FLAGS.use_imvotenet:
            inputs.update({
                'scale':
                batch_data_label['scale'],
                'calib_K':
                batch_data_label['calib_K'],
                'calib_Rtilt':
                batch_data_label['calib_Rtilt'],
                'cls_score_feats':
                batch_data_label['cls_score_feats'],
                'full_img_votes_1d':
                batch_data_label['full_img_votes_1d'],
                'full_img_1d':
                batch_data_label['full_img_1d'],
                'full_img_width':
                batch_data_label['full_img_width'],
            })
        with torch.no_grad():
            end_points = net(inputs)

        # Compute loss
        for key in batch_data_label:
            if key not in end_points:
                end_points[key] = batch_data_label[key]
        loss, end_points = criterion(end_points, DATASET_CONFIG,
                                     KEY_PREFIX_LIST, TOWER_WEIGHTS)

        # Accumulate statistics and print out
        for key in end_points:
            if 'loss' in key or 'acc' in key or 'ratio' in key:
                if key not in stat_dict: stat_dict[key] = 0
                stat_dict[key] += end_points[key].item()

        for key_prefix in KEY_PREFIX_LIST:
            batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT,
                                                   key_prefix)
            batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT)
            ap_calculator_dict[key_prefix + 'ap_calculator'].step(
                batch_pred_map_cls, batch_gt_map_cls)

        # Dump evaluation results for visualization
        if FLAGS.dump_results and batch_idx == 0 and EPOCH_CNT % 10 == 0:
            MODEL.dump_results(end_points,
                               DUMP_DIR,
                               DATASET_CONFIG,
                               key_prefix=KEY_PREFIX_LIST[-1])

    # Log statistics
    TEST_VISUALIZER.log_scalars(
        {key: stat_dict[key] / float(batch_idx + 1)
         for key in stat_dict},
        (EPOCH_CNT + 1) * len(TRAIN_DATALOADER) * BATCH_SIZE)
    for key in sorted(stat_dict.keys()):
        log_string('eval mean %s: %f' % (key, stat_dict[key] /
                                         (float(batch_idx + 1))))

    # Evaluate average precision
    for key_prefix in KEY_PREFIX_LIST:
        metrics_dict = ap_calculator_dict[key_prefix +
                                          'ap_calculator'].compute_metrics()
        for key in metrics_dict:
            log_string('eval %s: %f' % (key, metrics_dict[key]))

    mean_loss = stat_dict['loss'] / float(batch_idx + 1)
    return mean_loss
コード例 #16
0
        np.float32)  # B, num_proposal,2
    center_batch = y_pred[1].astype(np.float32)  # B, num_proposal,3
    heading_batch = y_pred[2].astype(np.float32)  # B, num_proposal
    size_batch = y_pred[3].astype(np.float32)  # B, num_proposal,3
    sem_class_normalized_batch = y_pred[4].astype(
        np.float32)  # B, num_proposal,num_class
    seeds_xyz_batch = y_pred[5].astype(np.float32)  # B, num_seeds,3
    votes_xyz_batch = y_pred[6].astype(
        np.float32)  # B, num_seeds*vote_factor,3

    batch_pred_map_cls = parse_predictions(
        objectness_score_normalized_batch,
        center_batch,
        heading_batch,
        size_batch,
        sem_class_normalized_batch,
        conf_thresh,
        nms_iou,
        DC.num_class,
        per_class_proposal=per_class_proposal,
        cls_nms=cls_nms)
    batch_gt_map_cls = parse_groundtruths(
        center_label.astype(np.float32),
        heading_class_label.astype(np.float32),
        heading_residual_label.astype(np.float32),
        size_class_label.astype(np.float32),
        size_residual_label.astype(np.float32),
        sem_cls_label.astype(np.float32), box_label_mask.astype(np.float32),
        DC)
    ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)
コード例 #17
0
    def run(self):
        logging.info('Start running...')
        idx = 0
        flip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0],
                          [0, 0, 0, 1]]

        if self.visualize:
            logging.info('Creating visualizer...')
            set_bounding_box = False
            self.vis = o3d.visualization.Visualizer()
            self.vis.create_window(window_name='point cloud',
                                   width=1280,
                                   height=960)
            self.point_cloud = o3d.geometry.PointCloud()
            self.point_cloud.points = o3d.utility.Vector3dVector([])
            self.point_cloud.colors = o3d.utility.Vector3dVector([])
            self.bbox = o3d.geometry.LineSet()
            self.bbox.points = o3d.utility.Vector3dVector([])
            self.bbox.lines = o3d.utility.Vector2iVector([])
            self.bbox.colors = o3d.utility.Vector3dVector([])

        while not self.flag_exit:
            point_cloud = self.get_data(idx)
            if point_cloud is None:
                continue
            pc = self.preprocess_point_cloud(point_cloud)
            inputs = {
                'point_clouds': torch.from_numpy(pc).to(self.torch_device)
            }
            tic = time.time()
            with torch.no_grad():
                end_points = self.net(inputs)
            toc = time.time()
            logging.info('Inference time: %f' % (toc - tic))
            end_points['point_clouds'] = inputs['point_clouds']
            pred_map_cls = parse_predictions(end_points, self.eval_config_dict)
            logging.info('Finished detection. %d object detected.' %
                         (len(pred_map_cls[0])))

            if self.visualize:
                geometries = self.get_geometries(point_cloud, end_points, idx)
                pcd = geometries[0]
                if len(geometries) == 2:
                    bbox = geometries[1]
                else:
                    bbox = o3d.geometry.LineSet()
                    bbox.points = o3d.utility.Vector3dVector([])
                    bbox.lines = o3d.utility.Vector2iVector([])
                    bbox.colors = o3d.utility.Vector3dVector([])
                self.point_cloud.points = pcd.points
                self.point_cloud.colors = pcd.colors
                self.bbox.points = bbox.points
                self.bbox.lines = bbox.lines
                self.bbox.colors = bbox.colors
                self.point_cloud.transform(flip_transform)
                self.bbox.transform(flip_transform)
                if idx == 0:
                    self.vis.add_geometry(self.point_cloud)
                    self.vis.add_geometry(self.bbox)
                self.vis.update_geometry(self.point_cloud)
                self.vis.update_geometry(self.bbox)
                self.vis.poll_events()
                self.vis.update_renderer()

            if self.save_data:
                print(idx)
                if not os.path.exists(self.output_dir):
                    os.mkdir(self.output_dir)
                self.MODEL.dump_results(end_points,
                                        self.output_dir,
                                        DC,
                                        inference_switch=True,
                                        DUMP_CONF_THRESH=self.DUMP_CONF_THRESH,
                                        idx_beg=idx)
                logging.info('Dumped detection results to folder %s' %
                             (self.output_dir))
            idx += 1
        self.vis.destroy_window()
コード例 #18
0
    # Load checkpoint
    optimizer = optim.Adam(net.parameters(), lr=0.001)
    checkpoint = torch.load(checkpoint_path)
    net.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    epoch = checkpoint['epoch']
    print("Loaded checkpoint %s (epoch: %d)" % (checkpoint_path, epoch))

    # Load and preprocess input point cloud
    net.eval()  # set model to eval mode (for bn and dp)
    point_cloud = read_ply(pc_path)
    pc = preprocess_point_cloud(point_cloud)
    print('Loaded point cloud data: %s' % (pc_path))

    # Model inference
    inputs = {'point_clouds': torch.from_numpy(pc).to(device)}
    tic = time.time()
    with torch.no_grad():
        end_points = net(inputs)
    toc = time.time()
    print('Inference time: %f' % (toc - tic))
    end_points['point_clouds'] = inputs['point_clouds']
    pred_map_cls = parse_predictions(end_points, eval_config_dict)
    print('Finished detection. %d object detected.' % (len(pred_map_cls[0])))

    dump_dir = os.path.join(demo_dir, '%s_results' % (FLAGS.dataset))
    if not os.path.exists(dump_dir): os.mkdir(dump_dir)
    MODEL.dump_results(end_points, dump_dir, DC, True)
    print('Dumped detection results to folder %s' % (dump_dir))
コード例 #19
0
def evaluate_one_epoch():
    stat_dict = {}
    ap_calculator_list = [APCalculator(iou_thresh, DATASET_CONFIG.class2type) \
        for iou_thresh in AP_IOU_THRESHOLDS]
    net.eval()  # set model to eval mode (for bn and dp)
    for batch_idx, batch_data_label in enumerate(TEST_DATALOADER):

        scan_name_list = batch_data_label['scan_name']
        del batch_data_label['scan_name']

        if batch_idx % 10 == 0:
            print('Eval batch: %d' % (batch_idx))

        for key in batch_data_label:
            batch_data_label[key] = batch_data_label[key].to(device)

        # Forward pass
        inputs = {'point_clouds': batch_data_label['point_clouds']}
        with torch.no_grad():
            end_points = net(inputs)

        # Compute loss
        for key in batch_data_label:
            assert (key not in end_points)
            end_points[key] = batch_data_label[key]
        loss, end_points = criterion(end_points, DATASET_CONFIG)

        # Accumulate statistics and print out
        for key in end_points:
            if 'loss' in key or 'acc' in key or 'ratio' in key:
                if key not in stat_dict: stat_dict[key] = 0
                stat_dict[key] += end_points[key].item()

        batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT)
        batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT)
        for ap_calculator in ap_calculator_list:
            ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)

        ######## Saving data ########
        save_dir = '/home/sirdome/katefgroup/language_grounding/mlcvnet_dump'

        # INPUT
        point_clouds = end_points['point_clouds'].cpu().numpy()
        batch_size = point_clouds.shape[0]

        # NETWORK OUTPUTS
        seed_xyz = end_points['seed_xyz'].detach().cpu().numpy(
        )  # (B,num_seed,3)
        if 'vote_xyz' in end_points:
            aggregated_vote_xyz = end_points['aggregated_vote_xyz'].detach(
            ).cpu().numpy()
            vote_xyz = end_points['vote_xyz'].detach().cpu().numpy(
            )  # (B,num_seed,3)
            aggregated_vote_xyz = end_points['aggregated_vote_xyz'].detach(
            ).cpu().numpy()
        objectness_scores = end_points['objectness_scores'].detach().cpu(
        ).numpy()  # (B,K,2)
        pred_center = end_points['center'].detach().cpu().numpy()  # (B,K,3)
        pred_heading_class = torch.argmax(end_points['heading_scores'],
                                          -1)  # B,num_proposal
        pred_heading_residual = torch.gather(
            end_points['heading_residuals'], 2,
            pred_heading_class.unsqueeze(-1))  # B,num_proposal,1
        pred_heading_class = pred_heading_class.detach().cpu().numpy(
        )  # B,num_proposal
        pred_heading_residual = pred_heading_residual.squeeze(
            2).detach().cpu().numpy()  # B,num_proposal
        pred_size_class = torch.argmax(end_points['size_scores'],
                                       -1)  # B,num_proposal
        pred_size_residual = torch.gather(
            end_points['size_residuals'], 2,
            pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat(
                1, 1, 1, 3))  # B,num_proposal,1,3
        pred_size_residual = pred_size_residual.squeeze(
            2).detach().cpu().numpy()  # B,num_proposal,3

        # OTHERS
        pred_mask = end_points['pred_mask']  # B,num_proposal
        idx_beg = 0
        pred_center_upright_camera = flip_axis_to_camera(pred_center)

        for i in range(batch_size):
            objectness_prob = softmax(objectness_scores[i, :, :])[:, 1]  # (K,)

            # Dump predicted bounding boxes
            if np.sum(objectness_prob > 0.5) > 0:
                num_proposal = pred_center.shape[1]
                sr3d_boxes = []
                for j in range(num_proposal):

                    heading_angle = CONFIG_DICT['dataset_config'].class2angle(\
                        pred_heading_class[i,j], pred_heading_residual[i,j])
                    box_size = CONFIG_DICT['dataset_config'].class2size(\
                        int(pred_size_class[i,j]), pred_size_residual[i,j])

                    corners_3d_upright_camera = get_3d_box(
                        box_size, heading_angle,
                        pred_center_upright_camera[i, j, :])
                    box3d = corners_3d_upright_camera
                    box3d = flip_axis_to_depth(box3d)

                    sr3d_box = _convert_all_corners_to_end_points(box3d)
                    # sr3d_box = convert_mlcvnetbox_to_sr3d(DATASET_CONFIG, pred_center[i,j,0:3],
                    # pred_size_class[i,j], pred_size_residual[i,j])
                    sr3d_boxes.append(sr3d_box)

                if len(sr3d_boxes) > 0:
                    sr3d_boxes = np.vstack(
                        tuple(sr3d_boxes))  # (num_proposal, 6)
                    # Output boxes according to their semantic labels
                    pred_sem_cls = torch.argmax(end_points['sem_cls_scores'],
                                                -1)  # B,num_proposal
                    pred_sem_cls = pred_sem_cls.detach().cpu().numpy()
                    mask = np.logical_and(objectness_prob > 0.5,
                                          pred_mask[i, :] == 1)
                    sr3d_boxes = sr3d_boxes[mask, :]

                sr3d_boxes = list(sr3d_boxes)

            scan_name = scan_name_list[i]
            class_label_list = [
                DATASET_CONFIG.class2type[p[0]] for p in batch_pred_map_cls[i]
            ]

            print(len(class_label_list))

            assert (len(sr3d_boxes) == len(class_label_list))

            data_dict = {
                "class": class_label_list,
                "box": sr3d_boxes,
                "pc": point_clouds[i]
            }

            np.save(f'{save_dir}/{scan_name}.npy', data_dict)

    # Log statistics
    for key in sorted(stat_dict.keys()):
        log_string('eval mean %s: %f' % (key, stat_dict[key] /
                                         (float(batch_idx + 1))))

    # Evaluate average precision
    for i, ap_calculator in enumerate(ap_calculator_list):
        print('-' * 10, 'iou_thresh: %f' % (AP_IOU_THRESHOLDS[i]), '-' * 10)
        metrics_dict = ap_calculator.compute_metrics()
        for key in metrics_dict:
            log_string('eval %s: %f' % (key, metrics_dict[key]))

    mean_loss = stat_dict['loss'] / float(batch_idx + 1)
    return mean_loss