def main(): cfg = ConfigTest device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") net = Network(cfg).to(device) print("model parameters:", sum(param.numel() for param in net.parameters())) for i in tqdm(range(10)): npts = cfg.num_points pcld = np.random.rand(1, npts, 3) feat = np.random.rand(1, 6, npts) n_layers = 4 sub_s_r = [16, 1, 4, 1] inputs = {} for i in range(n_layers): nei_idx = DP.knn_search(pcld, pcld, 16) sub_pts = pcld[:, :pcld.shape[1] // sub_s_r[i], :] pool_i = nei_idx[:, :pcld.shape[1] // sub_s_r[i], :] up_i = torch.LongTensor(DP.knn_search(sub_pts, pcld, 1)) inputs['xyz'] = inputs.get('xyz', []) + [torch.from_numpy(pcld).float().to(device)] inputs['neigh_idx'] = inputs.get('neigh_idx', []) + [torch.LongTensor(nei_idx).to(device)] inputs['sub_idx'] = inputs.get('sub_idx', []) + [torch.LongTensor(pool_i).to(device)] inputs['interp_idx'] = inputs.get('interp_idx', []) + [torch.LongTensor(up_i).to(device)] pcld = sub_pts inputs['features'] = torch.from_numpy(feat).float().to(device) end_points = net(inputs) for k, v in end_points.items(): if type(v) == list: for ii, item in enumerate(v): print(k+'%d'%ii, item.size()) else: print(k, v.size())
type=str, default='None', help='pretrained model path') FLAGS = parser.parse_args() os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ['CUDA_VISIBLE_DEVICES'] = str(FLAGS.gpu) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' Mode = FLAGS.mode test_area = FLAGS.test_area dataset = SemanticKITTI(test_area) dataset.init_input_pipeline() if Mode == 'train': model = Network(dataset, cfg) model.train(dataset) elif Mode == 'test': cfg.saving = False model = Network(dataset, cfg) if FLAGS.model_path is not 'None': chosen_snap = FLAGS.model_path else: chosen_snapshot = -1 logs = np.sort([ os.path.join('results', f) for f in os.listdir('results') if f.startswith('Log') ]) chosen_folder = logs[-1] snap_path = join(chosen_folder, 'snapshots') snap_steps = [
worker_init_fn=my_worker_init_fn, collate_fn=TRAIN_DATASET.collate_fn) TEST_DATALOADER = DataLoader(TEST_DATASET, batch_size=FLAGS.batch_size, shuffle=True, num_workers=20, worker_init_fn=my_worker_init_fn, collate_fn=TEST_DATASET.collate_fn) print(len(TRAIN_DATALOADER), len(TEST_DATALOADER)) ################################################# network ################################################# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") net = Network(cfg) net.to(device) # Load the Adam optimizer optimizer = optim.Adam(net.parameters(), lr=cfg.learning_rate) # Load checkpoint if there is any it = -1 # for the initialize value of `LambdaLR` and `BNMomentumScheduler` start_epoch = 0 CHECKPOINT_PATH = FLAGS.checkpoint_path if CHECKPOINT_PATH is not None and os.path.isfile(CHECKPOINT_PATH): checkpoint = torch.load(CHECKPOINT_PATH) net.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) start_epoch = checkpoint['epoch'] log_string("-> loaded checkpoint %s (epoch: %d)" %
def launch_action(parameters, verbose=True): """ Function that set the step by step to train and predict the model :INPUT: parameters: dict, { "gpu": integer, ID of the GPU to use "mode": str, action to execute 'train', 'test' or 'kfold' "model_path": str, Path to the model. This model if going to use for make the predictions ['mode'='test'] or to continue the training process [mode='train' and 'trainFromCKP'=True]. "path2data":str, path to the folder that contain the data [train/*.ply, test/*.ply, fold_x/*.ply] "path2output": str, path to the output folder, this is only use when the mode is set to train "protocol":str, training protocol, 'xyz', 'field', 'kfold' "trainFromCKP":bool, only use it if the models is going to be trained from a checkpoint } """ GPU_ID = parameters["gpu"] os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ['CUDA_VISIBLE_DEVICES'] = str(GPU_ID) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' Mode = parameters["mode"] dataset = AppleTree(protocol, path2dataset=inputDir) dataset.init_input_pipeline(mode=Mode) if Mode == 'train' and not parameters["trainFromCKP"]: model = Network(dataset, cfg) model.train(dataset) elif Mode == "train" and parameters["trainFromCKP"]: model = Network(dataset, cfg, restore_snap=parameters["model_path"]) model.train(dataset) elif Mode == "validation": cfg.saving = False if parameters["model_path"] is not 'None': chosen_snap = parameters["model_path"] else: chosen_snapshot = -1 # logs = np.sort([os.path.join('results', f) for f in os.listdir('results') if f.startswith('Log')]) # chosen_folder = logs[-1] chosen_folder = parameters["outputDir"] snap_path = join(chosen_folder, 'snapshots') snap_steps = [int(f[:-5].split('-')[-1]) for f in os.listdir(snap_path) if f[-5:] == '.meta'] chosen_step = np.sort(snap_steps)[-1] chosen_snap = os.path.join(snap_path, 'snap-{:d}'.format(chosen_step)) model = Network(dataset, cfg, restore_snap=chosen_snap) model.evaluate(dataset) elif Mode == 'test': cfg.saving = False model = Network(dataset, cfg) if parameters["model_path"] is not 'None': chosen_snap = parameters["model_path"] else: chosen_snapshot = -1 # logs = np.sort([os.path.join('results', f) for f in os.listdir('results') if f.startswith('Log')]) # chosen_folder = logs[-1] chosen_folder = parameters["outputDir"] snap_path = join(chosen_folder, 'snapshots') snap_steps = [int(f[:-5].split('-')[-1]) for f in os.listdir(snap_path) if f[-5:] == '.meta'] chosen_step = np.sort(snap_steps)[-1] chosen_snap = os.path.join(snap_path, 'snap-{:d}'.format(chosen_step)) if(verbose): print("Test snap: ", chosen_snap) tester = ModelTester(model, dataset, restore_snap=chosen_snap) tester.test(model, dataset) else: ################## # Visualize data # ################## with tf.Session() as sess: sess.run(tf.global_variables_initializer()) sess.run(dataset.train_init_op) while True: flat_inputs = sess.run(dataset.flat_inputs) pc_xyz = flat_inputs[0] sub_pc_xyz = flat_inputs[1] labels = flat_inputs[21] Plot.draw_pc_sem_ins(pc_xyz[0, :, :], labels[0, :]) Plot.draw_pc_sem_ins(sub_pc_xyz[0, :, :], labels[0, 0:np.shape(sub_pc_xyz)[1]])
parser.add_argument('--path_cls', type=str, help='path to classes') parser.add_argument('--run', type=str, default='None', help='run folder path') parser.add_argument('--snap', type=str, default='None', help='snapshot number') FLAGS = parser.parse_args() os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ['CUDA_VISIBLE_DEVICES'] = str(FLAGS.gpu) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' test_name = FLAGS.test_name data_path = FLAGS.data_path path_cls = FLAGS.path_cls dataset = DATA(data_path, path_cls, test_name) dataset.init_input_pipeline() cfg.saving = False run = FLAGS.run snap = FLAGS.snap cfg.train_dir = run model = Network(dataset, cfg) tester = ModelTester(model, dataset, run, test_name, restore_snap=snap) tester.test(model, dataset, run, path_cls, test_name)
def launch_training(protocol, inputDir, parameters=None): GPU_ID = parameters["gpu"] os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ['CUDA_VISIBLE_DEVICES'] = str(GPU_ID) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' Mode = parameters["mode"] dataset = AppleTree(protocol, path2dataset=inputDir) dataset.init_input_pipeline(mode=Mode) if Mode == 'train' and not parameters["restoreTrain"]: model = Network(dataset, cfg) model.train(dataset) elif Mode == "train" and parameters["restoreTrain"]: model = Network(dataset, cfg, restore_snap=parameters["model_path"]) model.train(dataset) elif Mode == "validation": cfg.saving = False if parameters["model_path"] is not 'None': chosen_snap = parameters["model_path"] else: chosen_snapshot = -1 # logs = np.sort([os.path.join('results', f) for f in os.listdir('results') if f.startswith('Log')]) # chosen_folder = logs[-1] chosen_folder = parameters["outputDir"] snap_path = join(chosen_folder, 'snapshots') snap_steps = [ int(f[:-5].split('-')[-1]) for f in os.listdir(snap_path) if f[-5:] == '.meta' ] chosen_step = np.sort(snap_steps)[-1] chosen_snap = os.path.join(snap_path, 'snap-{:d}'.format(chosen_step)) model = Network(dataset, cfg, restore_snap=chosen_snap) model.evaluate(dataset) elif Mode == 'test': cfg.saving = False model = Network(dataset, cfg) if parameters["model_path"] is not 'None': chosen_snap = parameters["model_path"] else: chosen_snapshot = -1 # logs = np.sort([os.path.join('results', f) for f in os.listdir('results') if f.startswith('Log')]) # chosen_folder = logs[-1] chosen_folder = parameters["outputDir"] snap_path = join(chosen_folder, 'snapshots') snap_steps = [ int(f[:-5].split('-')[-1]) for f in os.listdir(snap_path) if f[-5:] == '.meta' ] chosen_step = np.sort(snap_steps)[-1] chosen_snap = os.path.join(snap_path, 'snap-{:d}'.format(chosen_step)) print(chosen_snap) tester = ModelTester(model, dataset, restore_snap=chosen_snap) tester.test(model, dataset) else: ################## # Visualize data # ################## with tf.Session() as sess: sess.run(tf.global_variables_initializer()) sess.run(dataset.train_init_op) while True: flat_inputs = sess.run(dataset.flat_inputs) pc_xyz = flat_inputs[0] sub_pc_xyz = flat_inputs[1] labels = flat_inputs[21] Plot.draw_pc_sem_ins(pc_xyz[0, :, :], labels[0, :]) Plot.draw_pc_sem_ins(sub_pc_xyz[0, :, :], labels[0, 0:np.shape(sub_pc_xyz)[1]])