예제 #1
0
DATASET_DIR = args.dataset_root_dir
if args.dataset_split_file == '':
    DATASET_SPLIT_FILE = os.path.join(
        DATASET_DIR, '3DOP_splits/' + train_config['train_dataset'])
else:
    DATASET_SPLIT_FILE = args.dataset_split_file
config_complete = load_config(args.config_path)
if 'train' in config_complete:
    config = config_complete['train']
else:
    config = config_complete
# input function ==============================================================
dataset = KittiDataset(os.path.join(DATASET_DIR, 'image/training/image_2'),
                       os.path.join(DATASET_DIR,
                                    'velodyne/training/velodyne/'),
                       os.path.join(DATASET_DIR, 'calib/training/calib/'),
                       os.path.join(DATASET_DIR, 'labels/training/label_2'),
                       DATASET_SPLIT_FILE,
                       num_classes=config['num_classes'])
NUM_CLASSES = dataset.num_classes

if 'NUM_TEST_SAMPLE' not in train_config:
    NUM_TEST_SAMPLE = dataset.num_files
else:
    if train_config['NUM_TEST_SAMPLE'] < 0:
        NUM_TEST_SAMPLE = dataset.num_files
    else:
        NUM_TEST_SAMPLE = train_config['NUM_TEST_SAMPLE']

BOX_ENCODING_LEN = get_encoding_len(config['box_encoding_method'])
box_encoding_fn = get_box_encoding_fn(config['box_encoding_method'])
예제 #2
0
else:
    DATASET_SPLIT_FILE = args.dataset_split_file
if args.output_dir == '':
    OUTPUT_DIR = os.path.join(args.checkpoint_path, './eval/')
else:
    OUTPUT_DIR = args.output_dir
CHECKPOINT_PATH = args.checkpoint_path
CONFIG_PATH = os.path.join(CHECKPOINT_PATH, 'config')
assert os.path.isfile(CONFIG_PATH), 'No config file found in %s'
config = load_config(CONFIG_PATH)
# setup dataset ===============================================================
if IS_TEST:
    dataset = KittiDataset(os.path.join(DATASET_DIR, 'image/testing/image_2'),
                           os.path.join(DATASET_DIR,
                                        'velodyne/testing/velodyne/'),
                           os.path.join(DATASET_DIR, 'calib/testing/calib/'),
                           '',
                           num_classes=config['num_classes'],
                           is_training=False)
else:
    dataset = KittiDataset(os.path.join(DATASET_DIR, 'image/training/image_2'),
                           os.path.join(DATASET_DIR,
                                        'velodyne/training/velodyne/'),
                           os.path.join(DATASET_DIR, 'calib/training/calib/'),
                           os.path.join(DATASET_DIR,
                                        'labels/training/label_2'),
                           DATASET_SPLIT_FILE,
                           num_classes=config['num_classes'])
NUM_TEST_SAMPLE = dataset.num_files
NUM_CLASSES = dataset.num_classes
예제 #3
0
파일: train.py 프로젝트: who1129/Point-GNN
args = parser.parse_args()
train_config = load_train_config(args.train_config_path)
DATASET_DIR = args.dataset_root_dir
if args.dataset_split_file == '':
    DATASET_SPLIT_FILE = os.path.join(
        DATASET_DIR, './3DOP_splits/' + train_config['train_dataset'])
else:
    DATASET_SPLIT_FILE = args.dataset_split_file
config_complete = load_config(args.config_path)
if 'train' in config_complete:
    config = config_complete['train']
else:
    config = config_complete
# input function ==============================================================
dataset = KittiDataset(os.path.join(DATASET_DIR,
                                    'velodyne/training/velodyne/'),
                       os.path.join(DATASET_DIR, 'labels/training/label_2'),
                       num_classes=config['num_classes'])
NUM_CLASSES = dataset.num_classes

if 'NUM_TEST_SAMPLE' not in train_config:
    NUM_TEST_SAMPLE = dataset.num_files
else:
    if train_config['NUM_TEST_SAMPLE'] < 0:
        NUM_TEST_SAMPLE = dataset.num_files
    else:
        NUM_TEST_SAMPLE = train_config['NUM_TEST_SAMPLE']

BOX_ENCODING_LEN = get_encoding_len(config['box_encoding_method'])
box_encoding_fn = get_box_encoding_fn(config['box_encoding_method'])
box_decoding_fn = get_box_decoding_fn(config['box_encoding_method'])
                        required=True)
    flags = parser.parse_args()
    hyper_params = json.loads(open("semantic_no_color.json").read())

    # Create output dir
    sparse_output_dir = os.path.join("result", "sparse")
    dense_output_dir = os.path.join("result", "dense")
    os.makedirs(sparse_output_dir, exist_ok=True)
    os.makedirs(dense_output_dir, exist_ok=True)

    # Dataset
    dataset = KittiDataset(
        num_points_per_sample=hyper_params["num_point"],
        base_dir=flags.kitti_root,
        dates=["2011_09_26"],
        # drives=["0095", "0001"],
        drives=["0095"],
        box_size_x=hyper_params["box_size_x"],
        box_size_y=hyper_params["box_size_y"],
    )

    # Model
    max_batch_size = 128  # The more the better, limited by memory size
    predictor = PredictInterpolator(
        checkpoint_path=flags.ckpt,
        num_classes=dataset.num_classes,
        hyper_params=hyper_params,
    )

    # Init visualizer
    dense_pcd = open3d.PointCloud()
예제 #5
0
                    default='../dataset/kitti/',
                    help='Path to KITTI dataset. Default="../dataset/kitti/"')
args = parser.parse_args()
eval_config = load_train_config(args.eval_config_path)
DATASET_DIR = args.dataset_root_dir

config_path = os.path.join(eval_config['train_dir'],
                           eval_config['config_path'])
while not os.path.isfile(config_path):
    print('No config file found in %s, waiting' % config_path)
    time.sleep(eval_config['eval_every_second'])
config = load_config(config_path)
if 'eval' in config:
    config = config['eval']
dataset = KittiDataset(os.path.join(DATASET_DIR, 'velodyne/val/'),
                       os.path.join(DATASET_DIR, 'labels/val/'),
                       num_classes=config['num_classes'])
NUM_CLASSES = dataset.num_classes
print(dataset)
if 'NUM_TEST_SAMPLE' not in eval_config:
    NUM_TEST_SAMPLE = dataset.num_files
else:
    if eval_config['NUM_TEST_SAMPLE'] < 0:
        NUM_TEST_SAMPLE = dataset.num_files
    else:
        NUM_TEST_SAMPLE = eval_config['NUM_TEST_SAMPLE']

BOX_ENCODING_LEN = get_encoding_len(config['box_encoding_method'])
box_encoding_fn = get_box_encoding_fn(config['box_encoding_method'])
box_decoding_fn = get_box_decoding_fn(config['box_encoding_method'])
  def __len__(self):
    return self.num_data

if __name__ == '__main__':

    args = parse_args()

    if torch.cuda.is_available() and not args.cuda:
        print("WARNING: You might want to run with --cuda")

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
        
    # dataset
    if args.dataset == 'kitti':
        dataset = KittiDataset(train=True)
        train_size = len(dataset)

        batch_sampler = sampler(train_size, args.bs)

        dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.bs,
                                sampler=batch_sampler, num_workers=args.num_workers)
    elif args.dataset == 'scannet':
        pass

    # tensor placeholders
    img = torch.FloatTensor(1)
    z = torch.FloatTensor(1)
    
    # network initialization
    print('Initializing model...')
    parser.add_argument("--save", action="store_true", default=False)
    flags = parser.parse_args()
    hyper_params = json.loads(open("semantic_no_color.json").read())

    # Create output dir
    sparse_output_dir = os.path.join("result", "sparse")
    dense_output_dir = os.path.join("result", "dense")
    os.makedirs(sparse_output_dir, exist_ok=True)
    os.makedirs(dense_output_dir, exist_ok=True)

    # Dataset
    dataset = KittiDataset(
        num_points_per_sample=hyper_params["num_point"],
        base_dir="/home/ylao/data/kitti",
        dates=["2011_09_26"],
        # drives=["0095", "0001"],
        drives=["0095"],
        box_size_x=hyper_params["box_size_x"],
        box_size_y=hyper_params["box_size_y"],
    )

    # Model
    max_batch_size = 128  # The more the better, limited by memory size
    predictor = PredictInterpolator(
        checkpoint_path=flags.ckpt,
        num_classes=dataset.num_classes,
        hyper_params=hyper_params,
    )

    # Init visualizer
    dense_pcd = open3d.PointCloud()
예제 #8
0
파일: run.py 프로젝트: who1129/Point-GNN
                    'Default="CHECKPOINT_PATH/eval/"')
args = parser.parse_args()

DATASET_DIR = args.dataset_root_dir
if args.output_dir == '':
    OUTPUT_DIR = os.path.join(args.checkpoint_path, './eval/')
else:
    OUTPUT_DIR = args.output_dir
CHECKPOINT_PATH = args.checkpoint_path
CONFIG_PATH = os.path.join(CHECKPOINT_PATH, 'config')
assert os.path.isfile(CONFIG_PATH), 'No config file found in %s'
config = load_config(CONFIG_PATH)
# setup dataset ===============================================================

dataset = KittiDataset(os.path.join(DATASET_DIR, 'velodyne/testing/'),
                       "",
                       num_classes=config['num_classes'],
                       is_training=False)

NUM_TEST_SAMPLE = dataset.num_files
NUM_CLASSES = dataset.num_classes
try:
    for frame_idx in tqdm(range(0, NUM_TEST_SAMPLE)):
        start_time = time.time()
        # provide input ======================================================
        cam_points = dataset.get_cam_points_in_image(
            frame_idx, config['downsample_by_voxel_size'])

except:
    print("ss")

예제 #9
0
import numpy as np
import open3d
import os
from dataset.kitti_dataset import KittiDataset
from sklearn.cluster import KMeans
from tqdm import tqdm

dataset = KittiDataset('../dataset/kitti/image/training/image_2',
                       '../dataset/kitti/velodyne/training/velodyne/',
                       '../dataset/kitti/calib/training/calib/',
                       '',
                       '../dataset/kitti/3DOP_splits/val.txt',
                       is_training=False)

downsample_rate = 2
output_dir = '../dataset/kitti/velodyne/training_downsampled_%d/velodyne/' % downsample_rate
for frame_idx in tqdm(range(0, dataset.num_files)):
    velo_points = dataset.get_velo_points(frame_idx)
    filename = dataset.get_filename(frame_idx)
    xyz = velo_points.xyz
    xyz_norm = np.sqrt(np.sum(xyz * xyz, axis=1, keepdims=True))
    z_axis = np.array([[0], [0], [1]])
    cos = xyz.dot(z_axis) / xyz_norm
    kmeans = KMeans(n_clusters=64, n_jobs=-1).fit(cos)
    centers = np.sort(np.squeeze(kmeans.cluster_centers_))
    centers = [
        -1,
    ] + centers.tolist() + [
        1,
    ]
    cos = np.squeeze(cos)