def main(args):
    pytorch_device = torch.device('cuda:0')

    config_path = args.config_path

    configs = load_config_data(config_path)

    dataset_config = configs['dataset_params']
    train_dataloader_config = configs['train_data_loader']
    val_dataloader_config = configs['val_data_loader']

    val_batch_size = val_dataloader_config['batch_size']
    train_batch_size = train_dataloader_config['batch_size']

    model_config = configs['model_params']
    train_hypers = configs['train_params']

    grid_size = model_config['output_shape']
    num_class = model_config['num_class']
    ignore_label = dataset_config['ignore_label']

    model_load_path = train_hypers['model_load_path']
    model_save_path = train_hypers['model_save_path']

    SemKITTI_label_name = get_nuScenes_label_name(
        dataset_config["label_mapping"])
    unique_label = np.asarray(sorted(list(SemKITTI_label_name.keys())))[1:] - 1
    unique_label_str = [SemKITTI_label_name[x] for x in unique_label + 1]

    train_dataset_loader, val_dataset_loader = data_builder.build(
        dataset_config,
        train_dataloader_config,
        val_dataloader_config,
        grid_size=grid_size)
    # print('train_dataset_loader:',train_dataset_loader)
    # _, train_vox_label, train_grid, _, train_pt_fea = train_dataset_loader[1]
    # print('train_grid:', train_grid, end='\n')
    # print('train_pt_fea', train_pt_fea, end='\n')
    # print('train_vox_label', train_vox_label, end='\n')
    for i_iter, (_, train_vox_label, train_grid, _,
                 train_pt_fea) in enumerate(train_dataset_loader):
        print('train_grid:', train_grid, end='\n')
        print('train_pt_fea', train_pt_fea, end='\n')
        print('train_vox_label', train_vox_label, end='\n')

    my_model = model_builder.build_pt(model_config)
    if os.path.exists(model_load_path):
        my_model = load_checkpoint_1b1(model_load_path, my_model)

    my_model.to(pytorch_device)
    optimizer = optim.Adam(my_model.parameters(),
                           lr=train_hypers["learning_rate"])

    loss_func, lovasz_softmax = loss_builder.build(wce=True,
                                                   lovasz=True,
                                                   num_class=num_class,
                                                   ignore_label=ignore_label)
Esempio n. 2
0
    def __init__(self, config_path):
        self.devide = torch.device('cuda:0')

        self.configs = load_config_data(config_path)

        self.val_dataloader_config = self.configs['val_data_loader']

        self.val_batch_size = self.val_dataloader_config['batch_size']

        self.model_config = self.configs['model_params']

        self.model = model_builder.build(self.model_config)

        model_path = self.configs['train_params']['model_load_path']
        self.model = load_checkpoint(model_path, self.model)

        self.model.to(self.devide)
        self.model.eval()
def main(args):
    pytorch_device = torch.device('cuda:0')

    config_path = args.config_path

    configs = load_config_data(config_path)

    dataset_config = configs['dataset_params']
    test_dataloader_config = configs['test_data_loader']
    val_dataloader_config = configs['val_data_loader']

    val_batch_size = val_dataloader_config['batch_size']
    test_batch_size = test_dataloader_config['batch_size']

    model_config = configs['model_params']
    test_hypers = configs['test_params']

    grid_size = model_config['output_shape']
    num_class = model_config['num_class']
    ignore_label = dataset_config['ignore_label']

    model_load_path = test_hypers['model_load_path']
    # model_save_path = test_hypers['model_save_path']
    output_path=test_hypers['output_save_path']

    SemKITTI_label_name = get_SemKITTI_label_name(dataset_config["label_mapping"])
    unique_label = np.asarray(sorted(list(SemKITTI_label_name.keys())))[1:] - 1
    unique_label_str = [SemKITTI_label_name[x] for x in unique_label + 1]

    my_model = model_builder.build(model_config)
    if os.path.exists(model_load_path):
        my_model = load_checkpoint(model_load_path, my_model)

    my_model.to(pytorch_device)
    
    test_dataset_loader, val_dataset_loader = data_builder.build_valtest(dataset_config,
                                                                  test_dataloader_config,
                                                                  val_dataloader_config,
                                                                  grid_size=grid_size)


    ### Validation inference pipeline starts  
    print('#'*80)
    print("Processing the validation section")
    print('#'*80)
    pbar = tqdm(total=len(val_dataset_loader))
    print("THe length of the validation dataset : {} ".format(len(val_dataset_loader)))
    my_model.eval()
    hist_list = []
    time_list = []
    
    

    
    
    with torch.no_grad():
        for i_iter_val, (_, val_vox_label, val_grid, val_pt_labs, val_pt_fea) in enumerate(
                            val_dataset_loader):
            print("The processingframe is : {}".format(i_iter_val))


            val_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(pytorch_device) for i in
                                          val_pt_fea]
            val_grid_ten = [torch.from_numpy(i).to(pytorch_device) for i in val_grid]
            val_label_tensor = val_vox_label.type(torch.LongTensor).to(pytorch_device)

            ###similar to polar seg 
            torch.cuda.synchronize()
            start_time = time.time()
            predict_labels = my_model(val_pt_fea_ten, val_grid_ten, val_batch_size)
            torch.cuda.synchronize()
            time_list.append(time.time()-start_time)
            
  

            predict_labels = torch.argmax(predict_labels, dim=1)
            predict_labels = predict_labels.cpu().detach().numpy()
            for count, i_val_grid in enumerate(val_grid):
                hist_list.append(fast_hist_crop(predict_labels[
                                count, val_grid[count][:, 0], val_grid[count][:, 1],
                                val_grid[count][:, 2]], val_pt_labs[count],
                            unique_label))
            pbar.update(1)

    iou = per_class_iu(sum(hist_list))
    print('*'*80)
    print('Validation per class iou: ')
    print('*'*80)
    for class_name, class_iou in zip(unique_label_str, iou):
        print('%s : %.2f%%' % (class_name, class_iou * 100))
    val_miou = np.nanmean(iou) * 100
    del val_vox_label, val_grid, val_pt_fea, val_grid_ten
    pbar.close()
    
    print('Current val miou is %.3f ' % val_miou)
    print('Inference time per %d is %.4f seconds\n' %
            (val_batch_size,np.mean(time_list)))


    
   #####Testing inference pipeline starts 
    pbar = tqdm(total=len(test_dataset_loader))
    print('#'*80)
    print("Processing the Testing pipeline")
    print("The length of the test dataset is {}".format(len(test_dataset_loader)))
    print('#'*80)
    print(len(test_dataset_loader))
    with torch.no_grad():
        for i_iter_val, (_,test_vox_label,test_grid,test_pt_labs,test_pt_fea,test_index,filename) in enumerate(test_dataset_loader):
#             print(" THe enumuerated values test_grid:{} test_pt_feat:{} test_index:{}".format(test_grid,test_pt_fea,test_index))

            test_label_tensor = test_vox_label.type(torch.LongTensor).to(pytorch_device)



            test_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(pytorch_device) for i in
                                            test_pt_fea]
            test_grid_ten = [torch.from_numpy(i).to(pytorch_device) for i in test_grid]
         
            predict_labels = my_model(test_pt_fea_ten, test_grid_ten,test_batch_size)
            


            predict_labels = torch.argmax(predict_labels, dim=1)
            predict_labels = predict_labels.cpu().detach().numpy()
           

            # write to label file
            for count,i_test_grid in enumerate(test_grid):
                test_pred_label = predict_labels[count,test_grid[count][:,0],test_grid[count][:,1],test_grid[count][:,2]]

                test_pred_label = np.expand_dims(test_pred_label,axis=1)
#                 print(" The test labels befor conversion {}".format(max(test_pred_label, dim=1)))
#                 save_dir = test_dataset_loader.im_idx[test_index[count]]
                _,dir2 = filename[0].split('/sequences/',1)
                new_save_dir = output_path + '/sequences/' +dir2.replace('velodyne','predictions')[:-3]+'label'                
                if not os.path.exists(os.path.dirname(new_save_dir)):
                    try:
                        os.makedirs(os.path.dirname(new_save_dir))
                    except OSError as exc:
                        if exc.errno != errno.EEXIST:
                            raise
                test_pred_label=get_SemKITTI_label_color(dataset_config["label_mapping"],test_pred_label)
                test_pred_label = test_pred_label.astype(np.uint32)
#                 print(" The test labels after conversion {}".format(max(test_pred_label, dim=1)))
     
                        
                        
                test_pred_label.tofile(new_save_dir)

            ##### To check the predicted results 
            for count, i_test_grid in enumerate(test_grid):
                hist_list.append(fast_hist_crop(predict_labels[
                                count, test_grid[count][:, 0], test_grid[count][:, 1],
                                test_grid[count][:, 2]], test_pt_labs[count],
                            unique_label))

            pbar.update(1)
        iou = per_class_iu(sum(hist_list))
        print('*'*80)
        print('Testing per class iou: ')
        print('*'*80)
        for class_name, class_iou in zip(unique_label_str, iou):
            print('%s : %.2f%%' % (class_name, class_iou * 100))
        test_miou = np.nanmean(iou) * 100
        print('Current test miou is %.3f ' % test_miou)
        print('Inference time per %d is %.4f seconds\n' %
        (test_batch_size,np.mean(time_list)))
    del test_vox_label, test_grid, test_pt_fea, test_grid_ten,test_index
    pbar.close()
def main(args):
    pytorch_device = torch.device('cuda:0')

    config_path = args.config_path

    configs = load_config_data(config_path)

    dataset_config = configs['dataset_params']
    train_dataloader_config = configs['train_data_loader']
    val_dataloader_config = configs['val_data_loader']

    val_batch_size = val_dataloader_config['batch_size']
    train_batch_size = train_dataloader_config['batch_size']

    model_config = configs['model_params']
    train_hypers = configs['train_params']

    grid_size = model_config['output_shape']
    num_class = model_config['num_class']
    ignore_label = dataset_config['ignore_label']

    model_load_path = train_hypers['model_load_path']
    model_save_path = train_hypers['model_save_path']

    SemKITTI_label_name = get_SemKITTI_label_name(dataset_config["label_mapping"])
    unique_label = np.asarray(sorted(list(SemKITTI_label_name.keys())))[1:] - 1
    unique_label_str = [SemKITTI_label_name[x] for x in unique_label + 1]

    my_model = model_builder.build(model_config)
    if os.path.exists(model_load_path):
        my_model = load_checkpoint(model_load_path, my_model)

    my_model.to(pytorch_device)
    optimizer = optim.Adam(my_model.parameters(), lr=train_hypers["learning_rate"])

    loss_func, lovasz_softmax = loss_builder.build(wce=True, lovasz=True,
                                                   num_class=num_class, ignore_label=ignore_label)

    train_dataset_loader, val_dataset_loader = data_builder.build(dataset_config,
                                                                  train_dataloader_config,
                                                                  val_dataloader_config,
                                                                  grid_size=grid_size)

    # training
    epoch = 0
    best_val_miou = 0
    my_model.train()
    global_iter = 0
    check_iter = train_hypers['eval_every_n_steps']

    while epoch < train_hypers['max_num_epochs']:
        loss_list = []
        pbar = tqdm(total=len(train_dataset_loader))
        time.sleep(10)
        # lr_scheduler.step(epoch)
        for i_iter, (_, train_vox_label, train_grid, _, train_pt_fea) in enumerate(train_dataset_loader):
            if global_iter % check_iter == 0 and epoch >= 1:
                my_model.eval()
                hist_list = []
                val_loss_list = []
                with torch.no_grad():
                    for i_iter_val, (_, val_vox_label, val_grid, val_pt_labs, val_pt_fea) in enumerate(
                            val_dataset_loader):

                        val_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(pytorch_device) for i in
                                          val_pt_fea]
                        val_grid_ten   = [torch.from_numpy(i).to(pytorch_device) for i in val_grid]
                        val_label_tensor = val_vox_label.type(torch.LongTensor).to(pytorch_device)

                        predict_labels   = my_model(val_pt_fea_ten, val_grid_ten, val_batch_size)
                        # aux_loss = loss_fun(aux_outputs, point_label_tensor)
                        loss = lovasz_softmax(torch.nn.functional.softmax(predict_labels).detach(), val_label_tensor,
                                              ignore=0) + loss_func(predict_labels.detach(), val_label_tensor)
                        predict_labels = torch.argmax(predict_labels, dim=1)
                        predict_labels = predict_labels.cpu().detach().numpy()
                        for count, i_val_grid in enumerate(val_grid):
                            hist_list.append(fast_hist_crop(predict_labels[
                                                                count, val_grid[count][:, 0], val_grid[count][:, 1],
                                                                val_grid[count][:, 2]], val_pt_labs[count],
                                                            unique_label))
                        val_loss_list.append(loss.detach().cpu().numpy())
                # Sets the module in training mode.
                my_model.train()
                iou = per_class_iu(sum(hist_list))
                print('Validation per class iou: ')
                for class_name, class_iou in zip(unique_label_str, iou):
                    print('%s : %.2f%%' % (class_name, class_iou * 100))
                val_miou = np.nanmean(iou) * 100

                del val_vox_label, val_grid, val_pt_fea, val_grid_ten

                # save model if performance is improved
                if best_val_miou < val_miou:
                    best_val_miou = val_miou
                    torch.save(my_model.state_dict(), model_save_path)

                print('Current val miou is %.3f while the best val miou is %.3f' %
                      (val_miou, best_val_miou))
                print('Current val loss is %.3f' %
                      (np.mean(val_loss_list)))

                wandb.log({"val_miou":val_miou, "val_loss_list":np.mean(val_loss_list)})

            train_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(pytorch_device) for i in train_pt_fea]
            # train_grid_ten = [torch.from_numpy(i[:,:2]).to(pytorch_device) for i in train_grid]
            train_vox_ten = [torch.from_numpy(i).to(pytorch_device) for i in train_grid]
            point_label_tensor = train_vox_label.type(torch.LongTensor).to(pytorch_device)

            # forward + backward + optimize
            outputs = my_model(train_pt_fea_ten, train_vox_ten, train_batch_size)
            loss = lovasz_softmax(torch.nn.functional.softmax(outputs), point_label_tensor, ignore=0) + loss_func(
                outputs, point_label_tensor)
            loss.backward()
            # All optimizers implement a .step() method, that updates the parameters.
            optimizer.step()
            loss_list.append(loss.item())

            if global_iter % 1000 == 0:
                if len(loss_list) > 0:
                    print('epoch %d iter %5d, loss: %.3f\n' %
                          (epoch, i_iter, np.mean(loss_list)))
                    wandb.log({"train_loss":np.mean(loss_list)})
                else:
                    print('loss error')

            optimizer.zero_grad()
            pbar.update(1)
            global_iter += 1
            if global_iter % check_iter == 0:
                if len(loss_list) > 0:
                    print('epoch %d iter %5d, loss: %.3f\n' %
                          (epoch, i_iter, np.mean(loss_list)))
                    wandb.log({"train_loss":np.mean(loss_list)})
                else:
                    print('loss error')
        pbar.close()
        epoch += 1
Esempio n. 5
0
def main(args):
    pytorch_device = torch.device('cuda:0')
    config_path = args.config_path
    configs = load_config_data(config_path)
    dataset_config = configs['dataset_params']
    data_dir = args.demo_folder
    demo_label_dir = args.demo_label_folder
    save_dir = args.save_folder + "/"

    demo_batch_size = 1
    model_config = configs['model_params']
    train_hypers = configs['train_params']

    grid_size = model_config['output_shape']
    num_class = model_config['num_class']
    ignore_label = dataset_config['ignore_label']
    model_load_path = train_hypers['model_load_path']

    SemKITTI_label_name = get_SemKITTI_label_name(
        dataset_config["label_mapping"])
    unique_label = np.asarray(sorted(list(SemKITTI_label_name.keys())))[1:] - 1
    unique_label_str = [SemKITTI_label_name[x] for x in unique_label + 1]

    my_model = model_builder.build(model_config)
    if os.path.exists(model_load_path):
        my_model = load_checkpoint(model_load_path, my_model)

    my_model.to(pytorch_device)
    optimizer = optim.Adam(my_model.parameters(),
                           lr=train_hypers["learning_rate"])

    loss_func, lovasz_softmax = loss_builder.build(wce=True,
                                                   lovasz=True,
                                                   num_class=num_class,
                                                   ignore_label=ignore_label)

    demo_dataset_loader = build_dataset(dataset_config,
                                        data_dir,
                                        grid_size=grid_size,
                                        demo_label_dir=demo_label_dir)
    with open(dataset_config["label_mapping"], 'r') as stream:
        semkittiyaml = yaml.safe_load(stream)
    inv_learning_map = semkittiyaml['learning_map_inv']

    my_model.eval()
    hist_list = []
    demo_loss_list = []
    with torch.no_grad():
        for i_iter_demo, (_, demo_vox_label, demo_grid, demo_pt_labs,
                          demo_pt_fea) in enumerate(demo_dataset_loader):
            demo_pt_fea_ten = [
                torch.from_numpy(i).type(torch.FloatTensor).to(pytorch_device)
                for i in demo_pt_fea
            ]
            demo_grid_ten = [
                torch.from_numpy(i).to(pytorch_device) for i in demo_grid
            ]
            demo_label_tensor = demo_vox_label.type(
                torch.LongTensor).to(pytorch_device)

            predict_labels = my_model(demo_pt_fea_ten, demo_grid_ten,
                                      demo_batch_size)
            loss = lovasz_softmax(
                torch.nn.functional.softmax(predict_labels).detach(),
                demo_label_tensor,
                ignore=0) + loss_func(predict_labels.detach(),
                                      demo_label_tensor)
            predict_labels = torch.argmax(predict_labels, dim=1)
            predict_labels = predict_labels.cpu().detach().numpy()
            for count, i_demo_grid in enumerate(demo_grid):
                hist_list.append(
                    fast_hist_crop(
                        predict_labels[count, demo_grid[count][:, 0],
                                       demo_grid[count][:, 1],
                                       demo_grid[count][:, 2]],
                        demo_pt_labs[count], unique_label))
                inv_labels = np.vectorize(inv_learning_map.__getitem__)(
                    predict_labels[count, demo_grid[count][:, 0],
                                   demo_grid[count][:, 1],
                                   demo_grid[count][:, 2]])
                inv_labels = inv_labels.astype('uint32')
                outputPath = save_dir + str(i_iter_demo).zfill(6) + '.label'
                inv_labels.tofile(outputPath)
                print("save " + outputPath)
            demo_loss_list.append(loss.detach().cpu().numpy())

    if demo_label_dir != '':
        my_model.train()
        iou = per_class_iu(sum(hist_list))
        print('Validation per class iou: ')
        for class_name, class_iou in zip(unique_label_str, iou):
            print('%s : %.2f%%' % (class_name, class_iou * 100))
        val_miou = np.nanmean(iou) * 100
        del demo_vox_label, demo_grid, demo_pt_fea, demo_grid_ten

        print('Current val miou is %.3f' % (val_miou))
        print('Current val loss is %.3f' % (np.mean(demo_loss_list)))