def read_config(self): config_path = self.config_path cfg_from_yaml_file(self.config_path, cfg) self.logger = common_utils.create_logger() self.demo_dataset = DemoDataset( dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path( "/home/muzi2045/Documents/project/OpenPCDet/data/kitti/velodyne/000001.bin" ), ext='.bin') self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") self.net = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=self.demo_dataset) self.net.load_params_from_file(filename=self.model_path, logger=self.logger, to_cpu=True) self.net = self.net.to(self.device).eval() # nuscenes dataset lidar2imu_t = np.array([0.985793, 0.0, 1.84019]) lidar2imu_r = Quaternion( [0.706749235, -0.01530099378, 0.0173974518, -0.7070846]) self.lidar2imu = transform_matrix(lidar2imu_t, lidar2imu_r, inverse=True) self.imu2lidar = transform_matrix(lidar2imu_t, lidar2imu_r, inverse=False)
def parse_config(): parser = argparse.ArgumentParser(description='arg parser') parser.add_argument('--cfg_file', type=str, default='/home/syang/Data/data_object_velodyne/output/kitti_models/centernet_multihead/0124_single_head/centernet_multihead.yaml', help='specify the config for training') parser.add_argument('--batch_size', type=int, default=1, required=False, help='batch size for training') parser.add_argument('--workers', type=int, default=16, help='number of workers for dataloader') parser.add_argument('--extra_tag', type=str, default='0129_twostage_first_4', help='extra tag for this experiment') # os.path.abspath('..') + '/output/robosense_models/robosense_pointpillar/BResampl_LR001/ckpt/checkpoint_epoch_30.pth' parser.add_argument('--ckpt', type=str, default='/home/syang/Data/data_object_velodyne/output/kitti_models/centernet_multihead/0124_single_head/ckpt/checkpoint_epoch_80.pth', help='checkpoint to start from') parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none') parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training') parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training') parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER, help='set extra config keys if needed') parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes') parser.add_argument('--start_epoch', type=int, default=0, help='') parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment') parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints') parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed') parser.add_argument('--save_to_file', action='store_true', default=False, help='') args = parser.parse_args() cfg_from_yaml_file(args.cfg_file, cfg) cfg.TAG = Path(args.cfg_file).stem cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml' np.random.seed(1024) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs, cfg) return args, cfg
def parse_config(): parser = argparse.ArgumentParser(description='arg parser') parser.add_argument( '--cfg_file', type=str, default='cfgs/kitti_models/second.yaml', help='specify the config for demo') # 1. '--cfg_file' #指定配置 parser.add_argument('--data_path', type=str, default='demo_data', help='specify the point cloud data file or directory' ) # 2 . '--data_path' #指定点云数据文件或目录 parser.add_argument( '--ckpt', type=str, default=None, help='specify the pretrained model') # 3. '--ckpt' #指定预训练模型 parser.add_argument( '--ext', type=str, default='.bin', help='specify the extension of your point cloud data file' ) # 4. '--ext' #指定点云数据文件的扩展名 args = parser.parse_args() cfg_from_yaml_file(args.cfg_file, cfg) # cfg的参数在tools/cfg/kitti_models/pv-rcnn.yaml return args, cfg # cfg的参数在tools/cfg/kitti_models/pv-rcnn.yaml
def parse_config(): parser = argparse.ArgumentParser(description='arg parser') #设置模型参数 parser.add_argument('--cfg_file', type=str, default='cfgs/kitti_models/second.yaml', help='specify the config for demo') #设置数据路径参数 parser.add_argument('--data_path', type=str, default='demo_data', help='specify the point cloud data file or directory') #设置预训练模型,Check point 校验点 在系统运行中当出现查找数据请求时, #系统从数据库中找出这些数据并存入内存区,这样用户就可以对这些内存区数据进行修改等 parser.add_argument('--ckpt', type=str, default=None, help='specify the pretrained model') # extension 指定点云数据文件的扩展名 parser.add_argument( '--ext', type=str, default='.bin', help='specify the extension of your point cloud data file') args = parser.parse_args() cfg_from_yaml_file(args.cfg_file, cfg) #arguments参数,configuration配制 return args, cfg
def parse_config(): parser = argparse.ArgumentParser(description='arg parser') parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training') parser.add_argument( '--data_path', type=str, default='demo_data', help='specify the dataset or point cloud data directory') parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from') parser.add_argument( '--ext', type=str, default='.bin', help='specify the extension of your point cloud data file') parser.add_argument('--save_video_path', type=str, default=None, help='path to save the inference video') args = parser.parse_args() cfg_from_yaml_file(args.cfg_file, cfg) cfg.TAG = Path(args.cfg_file).stem cfg.EXP_GROUP_PATH = '/'.join( args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml' np.random.seed(1024) return args, cfg
def parse_config(): parser = argparse.ArgumentParser(description='arg parser') parser.add_argument('--cfg_file', type=str, default='cfgs/nuscenes_models/cbgs_pp_multihead.yaml', help='specify the config for demo') parser.add_argument('--data_path', type=str, default='/media/javier/HDD_linux/data/nuscenes', help='specify the point cloud data file or directory') parser.add_argument('--ckpt', type=str, default=None, help='specify the pretrained model') parser.add_argument( '--ext', type=str, default='.bin', help='specify the extension of your point cloud data file') parser.add_argument('--frames', type=int, default='6019', help='specify the number of frames to use') args = parser.parse_args() cfg_from_yaml_file(args.cfg_file, cfg) return args, cfg
def parse_config(): parser = argparse.ArgumentParser(description='arg parser') parser.add_argument('--cfg_file', type=str, default='cfgs/kitti_models/second.yaml', help='specify the config for demo') parser.add_argument('--data_path', type=str, default='demo_data', help='specify the point cloud data file or directory') parser.add_argument('--ckpt', type=str, default=None, help='specify the pretrained model') parser.add_argument( '--ext', type=str, default='.bin', help='specify the extension of your point cloud data file') args = parser.parse_args() cfg_from_yaml_file(args.cfg_file, cfg) return args, cfg
def parse_config(): parser = argparse.ArgumentParser(description='arg parser') parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training') parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training') parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader') parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment') parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from') parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none') parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training') parser.add_argument('--local_rank', type=int, default=50, help='local rank for distributed training') parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER, help='set extra config keys if needed') parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes') parser.add_argument('--start_epoch', type=int, default=0, help='') parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment') parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints') parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed') parser.add_argument('--save_to_file', action='store_true', default=False, help='') args = parser.parse_args() cfg_from_yaml_file(args.cfg_file, cfg) cfg.TAG = Path(args.cfg_file).stem cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml' np.random.seed(1024) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs, cfg) return args, cfg
def parse_config(): parser = argparse.ArgumentParser(description='arg parser') parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training') parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training') parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for') parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader') parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment') parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from') parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model') parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none') parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training') parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn') parser.add_argument('--fix_random_seed', action='store_true', default=False, help='') parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs') parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training') parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint') parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='') parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER, help='set extra config keys if needed') parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes') parser.add_argument('--start_epoch', type=int, default=0, help='') parser.add_argument('--save_to_file', action='store_true', default=False, help='') args = parser.parse_args() cfg_from_yaml_file(args.cfg_file, cfg) cfg.TAG = Path(args.cfg_file).stem cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml' if args.set_cfgs is not None: cfg_from_list(args.set_cfgs, cfg) return args, cfg
def __init__(self, cfg_file, logger=False): self.logger = False if logger: self.logger = True logging.basicConfig( filename="../results/log2.txt", level=logging.INFO, format='%(levelname)s: %(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S') cfg_from_yaml_file(cfg_file, cfg) self.model_dir = Path('../results') self.dataset = JrdbDataset( dataset_cfg=cfg.DATA_CONFIG, class_names=['Pedestrian'], root_path=None, training=False, logger=None, ) self.num_frames = len(self.dataset.data_infos) print(self.num_frames) self.class_names = self.dataset.class_names self.easy_eval = True
def setup_model(self): cfg_from_yaml_file(self.detector_config, cfg) self.logger = common_utils.create_logger() self.dataset = DummyDataset(dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES) self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") self.net = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=self.dataset) self.net.load_params_from_file(filename=self.model_path, logger=self.logger, to_cpu=True) self.net = self.net.to(self.device).eval()
def parse_config(): parser = argparse.ArgumentParser(description='arg parser') parser.add_argument('--bag_file', type=str, default=None, help='specify the bag file to be inferenced') parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for inference') parser.add_argument('--save_video', default=False, action='store_true') parser.add_argument('--save_path', default='../data/plusai/inference_result/', help='path to save the inference result') parser.add_argument('--ckpt', type=str, default=None, help='model checkpoint') args = parser.parse_args() cfg_from_yaml_file(args.cfg_file, cfg) cfg.TAG = Path(args.cfg_file).stem cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml' np.random.seed(1024) return args, cfg
def parse_config(): parser = argparse.ArgumentParser(description='arg parser') parser.add_argument( '--cfg_file', type=str, default= '/home/syang/Data/data_object_velodyne/output/kitti_models/onenet_twostage_0130/test/onenet_twostage_0130.yaml', help='specify the config for training') parser.add_argument( '--ckpt', type=str, default= '/home/syang/Data/data_object_velodyne/output/kitti_models/onenet_twostage_0130/test/ckpt/checkpoint_epoch_78.pth', help='checkpoint to start from') parser.add_argument('--show_heatmap', action='store_true', default=False, help='') parser.add_argument('--batch_size', type=int, default=1, required=False, help='batch size for training') parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader') parser.add_argument('--data_path', type=str, default='demo_data', help='specify the point cloud data file or directory') parser.add_argument( '--ext', type=str, default='.bin', help='specify the extension of your point cloud data file') args = parser.parse_args() cfg_from_yaml_file(args.cfg_file, cfg) cfg.TAG = Path(args.cfg_file).stem cfg.EXP_GROUP_PATH = '/'.join( args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml' np.random.seed(1024) return args, cfg
def parse_config(): parser = argparse.ArgumentParser(description='arg parser') parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training') parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from') parser.add_argument('--data_path', type=str, default='demo_data', help='specify the scene directory or val info pkl') parser.add_argument('--save_path', default='../data/plusai/inference_result/', help='path to save the inference result') parser.add_argument('--batch_size', type=int, default=1, required=False, help='batch size for training') parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader') args = parser.parse_args() cfg_from_yaml_file(args.cfg_file, cfg) cfg.TAG = Path(args.cfg_file).stem cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml' np.random.seed(1024) return args, cfg
def __init__(self, input_dict): # pvrcnn cfg cfg_from_yaml_file(input_dict.cfg_file, cfg) # create logger log_dir = Path(str(input_dict.output_dir)) / 'log' log_dir.mkdir(parents=True, exist_ok=True) self.logger = common_utils.create_logger( log_dir / ('log_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S')), rank=cfg.LOCAL_RANK) # build dataset and network self.demo_dataset = DemoDataset( # dummy dataset for preprocess inputdata dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=input_dict.dummy_cloud, ext='.bin', logger=self.logger) self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") self.model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=self.demo_dataset) self.model.load_params_from_file(filename=input_dict.ckpt_file, logger=self.logger, to_cpu=self.device == "cpu") self.model.to(self.device) self.model.eval() self.score_threshold = input_dict.score_threashold # for ROS self.action_server = actionlib.SimpleActionServer( "excavator/lidar_perception/ros_pvrcnn_action", detector3dAction, execute_cb=self.execute_cb, auto_start=False) self.action_server.start() self.mk_pub = rospy.Publisher("ros_pvrcnn", MarkerArray, queue_size=1) self.cls_list = [String(cls) for cls in cfg.CLASS_NAMES]
def read_config(self): config_path = self.config_path cfg_from_yaml_file(self.config_path, cfg) self.logger = common_utils.create_logger() self.demo_dataset = DemoDataset(dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path("/none"), ext='.bin') self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") self.net = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=self.demo_dataset) print("Model path: ", self.model_path) self.net.load_params_from_file(filename=self.model_path, logger=self.logger, to_cpu=True) self.net = self.net.to(self.device).eval()
def read_config(self): config_path = self.config_path cfg_from_yaml_file(self.config_path, cfg) self.logger = common_utils.create_logger() self.demo_dataset = DemoDataset( dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path( "/home/muzi2045/Documents/project/OpenPCDet/data/kitti/velodyne/000001.bin" ), ext='.bin') self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") self.net = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=self.demo_dataset) self.net.load_params_from_file(filename=self.model_path, logger=self.logger, to_cpu=True) self.net = self.net.to(self.device).eval()
def parse_config(): parser = argparse.ArgumentParser(description='arg parser') parser.add_argument('--cfg_file', type=str, default='cfgs/kitti_models/second.yaml', help='specify the config for demo') # parser.add_argument('--data_path', type=str, default='demo_data', # help='specify the point cloud data file or directory') parser.add_argument( '--data_root', type=str, default='demo_data', help='specify the root of calib, velodyne and image files') parser.add_argument('--file_number', type=str, default='000008', help='specify file number to detect objects for') parser.add_argument('--ckpt', type=str, default=None, help='specify the pretrained model') parser.add_argument( '--ext', type=str, default='.bin', help='specify the extension of your point cloud data file') # parser.add_argument('--ext') parser.add_argument( '--res', type=str, default="../results/3dod/vis/", help="specify the results folder of the detection result") args = parser.parse_args() cfg_from_yaml_file(args.cfg_file, cfg) return args, cfg
def parse_config(): parser = argparse.ArgumentParser(description='arg parser') parser.add_argument('--cfg_file', type=str, default='cfgs/kitti_models/second.yaml', help='specify the config for demo') parser.add_argument('--seq_path', type=str, default='demo_data', help='specify the point cloud data sequence path') parser.add_argument('--ckpt', type=str, default=None, help='specify the pretrained model') parser.add_argument( '--ext', type=str, default='.bin', help='specify the extension of your point cloud data file') parser.add_argument( '--output_dir', type=str, default='', help='The path to save predictions and output log files for tracking') parser.add_argument( '--saved_pred', type=str, default='', help= 'The path to existing saved predictions and output log files for visualizing' ) args = parser.parse_args() cfg_from_yaml_file(args.cfg_file, cfg) return args, cfg
def parse_config(): parser = argparse.ArgumentParser(description='arg parser') parser.add_argument('--cfg_file', type=str, default='cfgs/kitti_models/second.yaml', help='specify the config for demo') parser.add_argument('--data_path', type=str, default='demo_data', help='specify the point cloud data file or directory') parser.add_argument('--ckpt', type=str, default=None, help='specify the pretrained model') parser.add_argument( '--ext', type=str, default='.bin', help='specify the extension of your point cloud data file') parser.add_argument('--vis', type=bool, default=False, help='visualize detection results') parser.add_argument('--point', type=bool, default=False, help='save point prediction results') parser.add_argument('--save_gt', type=bool, default=False, help='save point ground truth labels') args = parser.parse_args() cfg_from_yaml_file(args.cfg_file, cfg) return args, cfg
def read_config(self): print(self.config_path) config_path = self.config_path cfg_from_yaml_file(self.config_path, cfg) self.logger = common_utils.create_logger() self.demo_datasets = DemoDataset( dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path( '/home/syang/Data/RS_datasets/datasets/ruby119_longzhudadao_1200423181920/npy/ruby119_longzhudadao_1200423181920_755.npy' ), ext='.npy') self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") print(self.device) self.net = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=self.demo_datasets) self.net.load_params_from_file(filename=self.model_path, logger=self.logger, to_cpu=True) self.net = self.net.to(self.device).eval()
model_path = 'cfgs/kitti_models/pointpillar_7728.pth' config_path = 'cfgs/kitti_models/pp_multihead.yaml' model_path = 'cfgs/kitti_models/pp_multihead_nds5823.pth' ''' config_path = 'cfgs/kitti_models/second.yaml' model_path = 'cfgs/kitti_models/second_7862.pth' movelidarcenter = 0 #69.12/2 threshold = 0.4 proc_1 = Processor_ROS(config_path, model_path) proc_1.initialize() cfg_from_yaml_file(config_path, cfg) ''' rospy.init_node('centerpoint_ros_node') sub_lidar_topic = [ "/velodyne_points", "/carla/ego_vehicle/lidar/lidar1/point_cloud", "/kitti_player/hdl64e", "/lidar_protector/merged_cloud", "/merged_cloud", "/lidar_top", "/roi_pclouds", "/livox/lidar", "/SimOneSM_PointCloud_0"]
bag_frame_list = os.listdir(os.path.join(data_path, cur_bag_name, 'pointcloud')) bag_frame_list.sort() f.write(os.path.join(bag_name, 'pointcloud', bag_frame_list[int(pointcloud_idx[:-4]) + 1]) + '\n') f.close() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data_path', help='directory to data path which should contains bag and label') parser.add_argument('--lidar_topic', default='/unified/lidar_points') parser.add_argument('--odom_topic', default='/navsat/odom') parser.add_argument('--cfg_file', type=str, default='/home/jingsen/workspace/OpenPCDet/tools/cfgs/livox_models/pv_rcnn_multiframe.yaml') parser.add_argument('--visualize', action='store_true', default=False, help='visualize the multi-frame point cloud') parser.add_argument('--num_workers', default=6, help='num workers to process label data') args = parser.parse_args() cfg_from_yaml_file(args.cfg_file, cfg) log_file = os.path.join(args.data_path, 'data_preprocessing_log.txt') logger = create_logger(log_file, rank=0) # logger.info('=== Start extract point-cloud and annotations from origin bag and label files, this will take a long time ... ===') # preprocess_dataset() logger.info('\n\n=== Start process multiframe dataset ... ===') prepare_multiframe_dataset() # logger.info('\n\n=== Start get image sets ... ===') # get_images_sets() print('log file saved in {}'.format(log_file))
def parse_config(): parser = argparse.ArgumentParser(description='KITTI Demo Video') parser.add_argument('--maxdisp', type=int, default=192, help='maxium disparity') parser.add_argument('--loss_weights', type=float, nargs='+', default=[0.25, 0.5, 1., 1.]) parser.add_argument('--max_disparity', type=int, default=192) parser.add_argument('--maxdisplist', type=int, nargs='+', default=[12, 3, 3]) parser.add_argument('--datatype', default='2015', help='datapath') parser.add_argument('--datapath', default='data/kitti/training', help='datapath') parser.add_argument('--epochs', type=int, default=300, help='number of epochs to train') parser.add_argument('--train_bsize', type=int, default=6, help='batch size for training (default: 6)') parser.add_argument('--test_bsize', type=int, default=8, help='batch size for testing (default: 8)') parser.add_argument('--save_path', type=str, default='results/pseudoLidar/', help='the path of saving checkpoints and log') parser.add_argument('--resume', type=str, default=None, help='resume path') parser.add_argument('--lr', type=float, default=5e-4, help='learning rate') parser.add_argument('--with_spn', action='store_true', help='with spn network or not') parser.add_argument('--print_freq', type=int, default=5, help='print frequence') parser.add_argument('--init_channels', type=int, default=1, help='initial channels for 2d feature extractor') parser.add_argument('--nblocks', type=int, default=2, help='number of layers in each stage') parser.add_argument( '--channels_3d', type=int, default=4, help='number of initial channels 3d feature extractor ') parser.add_argument('--layers_3d', type=int, default=4, help='number of initial layers in 3d network') parser.add_argument('--growth_rate', type=int, nargs='+', default=[4, 1, 1], help='growth rate in the 3d network') parser.add_argument('--spn_init_channels', type=int, default=8, help='initial channels for spnet') parser.add_argument('--start_epoch_for_spn', type=int, default=121) parser.add_argument( '--pretrained', type=str, default='configs/checkpoint/kitti2015_ck/checkpoint.tar', help='pretrained model path') parser.add_argument('--split_file', type=str, default=None) parser.add_argument('--evaluate', action='store_true') parser.add_argument('--max_high', type=int, default=1) parser.add_argument('--cfg_file', type=str, default=paper.cfg, help='specify the config for demo') parser.add_argument('--data_path', type=str, default='data/kitti/training') parser.add_argument('--ckpt', type=str, default=paper.model, help='specify the pretrained model') args = parser.parse_args() cfg_from_yaml_file(args.cfg_file, cfg) return args, cfg
def parse_config(): parser = argparse.ArgumentParser(description='arg parser') parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training') parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training') parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for') parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader') parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment') parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from') parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model') parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none') parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training') parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn') parser.add_argument('--fix_random_seed', action='store_true', default=False, help='') parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs') parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training') parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint') parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='') parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER, help='set extra config keys if needed') parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes') parser.add_argument('--start_epoch', type=int, default=0, help='') parser.add_argument('--save_to_file', action='store_true', default=False, help='') parser.add_argument('--adv', action='store_true', default=False, help='adv defense or not') parser.add_argument('--norm', type=str, default='inf', help='norm type') parser.add_argument('--epsilon', type=float, default=0.01, help='epsilon value') parser.add_argument('--rec_type', type=str, default='both', help='both: attack to points and reflectance' 'points: attack to points only' 'reflectance: attack to reflectance only') parser.add_argument('--iterations', type=int, default=1, help='iterations of different method') parser.add_argument( '--pgd', type=bool, default=False, help= 'pgd adversarial type, when pgd is True, momentum should be False and iterations should be 10' ) parser.add_argument( '--momentum', type=bool, default=False, help= 'adversarial type momentum, when momentum is True, pgd should be False and iterations should be 10' ) parser.add_argument('--cfg_root_dir', type=str, default='', help='model and relative informations save dir') args = parser.parse_args() cfg_from_yaml_file(args.cfg_file, cfg) cfg.TAG = Path(args.cfg_file).stem cfg.EXP_GROUP_PATH = '/'.join( args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml' if args.set_cfgs is not None: cfg_from_list(args.set_cfgs, cfg) return args, cfg
# with open(test_filename, 'wb') as f: # pickle.dump(kitti_infos_test, f) # print('Kitti info test file is saved to %s' % test_filename) print('---------------Start create groundtruth database for data augmentation---------------') if not cfg.DATA_CONFIG.TS_DATA: dataset.set_split(train_split) dataset.create_groundtruth_database(train_filename, split=train_split) print('---------------Data preparation Done---------------') if __name__ == '__main__': if sys.argv.__len__() > 1 and sys.argv[1] == 'create_kitti_infos': cfg_file = sys.argv[2] cfg_from_yaml_file(cfg_file, cfg) if not cfg.DATA_CONFIG.TS_DATA: data_path = cfg.ROOT_DIR / 'data' / 'kitti' save_path = cfg.ROOT_DIR / 'data' / 'kitti' else: data_path = cfg.ROOT_DIR / 'ts_data' save_path = cfg.ROOT_DIR / 'ts_data' create_kitti_infos( data_path=data_path, save_path=save_path ) else: A = KittiDataset(root_path='data/kitti', class_names=cfg.CLASS_NAMES, split='train', training=True) import pdb pdb.set_trace()
from pcdet.datasets.JRDB.jrdb_dataset import JrdbDataset from pcdet.datasets.kitti.kitti_dataset import KittiDataset from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file from pathlib import Path import pdb import numpy as np # root_path = Path('data/jrdb') cfg_from_yaml_file('cfgs/kitti_models/pointrcnn_test.yaml', cfg) dataset = JrdbDataset( dataset_cfg=cfg.DATA_CONFIG, class_names=['Pedestrian'], root_path=None, training=True, logger=None, ) dataset.set_split('train') train_filename = (Path(__file__).resolve() / '../../' ).resolve() / 'data' / 'jrdb_temp' / ('jrdb_infos_train.pkl') dataset.create_groundtruth_database(train_filename, split='train') # cfg_from_yaml_file('cfgs/kitti_models/pointrcnn.yaml', cfg) # dataset = KittiDataset( # dataset_cfg=cfg.DATA_CONFIG,
# These locations are where the model weights and other misc files are stored. MODEL_CFG_PATH = '/code/submission/OpenPCDet/tools/cfgs/waymo_models/pv_rcnn.yaml' MODEL_WEIGHTS = '/code/submission/lib/wod_latency_submission/WAYMO_MODEL_WEIGHTS.pth' # The names of the lidars and input fields that users might want to use for # detection. LIDAR_NAMES = ['TOP', 'REAR', 'FRONT', 'SIDE_LEFT', 'SIDE_RIGHT'] LIDAR_FIELD = 'RANGE_IMAGE_FIRST_RETURN' # The data fields requested from the evaluation script should be specified in # this field in the module. DATA_FIELDS = [lidar_name + '_' + LIDAR_FIELD for lidar_name in LIDAR_NAMES] # Global variables that hold the models and configurations. model_cfg = cfg_from_yaml_file(MODEL_CFG_PATH, cfg) logger = common_utils.create_logger() dataset_processor = DatasetTemplate(dataset_cfg=model_cfg.DATA_CONFIG, class_names=model_cfg.CLASS_NAMES, training=False, root_path=None, logger=logger) model = None def initialize_model(): """Method that will be called by the evaluation script to load the model and weights. """ global model model = build_network(model_cfg=model_cfg.MODEL, num_class=len(model_cfg.CLASS_NAMES),
def parse_config(): parser = argparse.ArgumentParser(description='arg parser') parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training') parser.add_argument('--data_dir', type=str, default=None) parser.add_argument('--batch_size', type=int, default=16, required=False, help='batch size for training') parser.add_argument('--epochs', type=int, default=80, required=False, help='number of epochs to train for') parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader') parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment') parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from') parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model') parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none') parser.add_argument('--federated', choices=['none', 'sync', 'async'], default='none') #NOTE: for federated learning parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distributed training') parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn') parser.add_argument('--fix_random_seed', action='store_true', default=False, help='whether to use sync bn') parser.add_argument('--ckpt_save_interval', type=int, default=2, help='number of training epochs') parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training') parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint') parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER, help='set extra config keys if needed') args = parser.parse_args() cfg_from_yaml_file(args.cfg_file, cfg) cfg.TAG = Path(args.cfg_file).stem if args.set_cfgs is not None: cfg_from_list(args.set_cfgs, cfg) return args, cfg