Exemple #1
0
def evaluate(args):
    test_data, test_label = load_data(
        'experiment/data/modelnet40_ply_hdf5_2048/', train=False)
    testDataset = ModelNetDataLoader(test_data, test_label)
    testDataLoader = torch.utils.data.DataLoader(testDataset,
                                                 batch_size=args.batch_size,
                                                 shuffle=False)

    log.debug('Building Model', args.model_name)
    if args.model_name == 'pointnet':
        num_class = 40
        model = PointNetCls(num_class, args.feature_transform)
    else:
        model = PointNet2ClsMsg()

    torch.backends.cudnn.benchmark = True
    model = torch.nn.DataParallel(model).cuda()
    log.debug('Using gpu:', args.gpu)

    if args.pretrain is None:
        log.err('No pretrain model')
        return

    log.debug('Loading pretrain model...')
    state_dict = torch.load(args.pretrain)
    model.load_state_dict(state_dict)

    acc = test_clf(model.eval(), testDataLoader)
    log.msg(Test_Accuracy='%.5f' % (acc))
Exemple #2
0
def vis(args):
    test_data, test_label = load_data(root, train=False)
    log.info(test_data=test_data.shape, test_label=test_label.shape)

    log.debug('Building Model', args.model_name)
    if args.model_name == 'pointnet':
        num_class = 40
        model = PointNetCls(num_class, args.feature_transform).cuda()
    else:
        model = PointNet2ClsMsg().cuda()

    torch.backends.cudnn.benchmark = True
    model = torch.nn.DataParallel(model)
    model.cuda()
    log.info('Using multi GPU:', args.gpu)

    if args.pretrain is None:
        log.err('No pretrain model')
        return

    log.debug('Loading pretrain model...')
    checkpoint = torch.load(args.pretrain)
    model.load_state_dict(checkpoint)
    model.eval()

    log.info('Press space to exit, press Q for next frame')

    for idx in range(test_data.shape[0]):
        point_np = test_data[idx:idx + 1]
        gt = test_label[idx][0]

        points = torch.from_numpy(point_np)
        points = points.transpose(2, 1).cuda()

        pred, trans_feat = model(points)
        pred_choice = pred.data.max(1)[1]
        log.info(gt=class_names[gt],
                 pred_choice=class_names[pred_choice.cpu().numpy().item()])

        point_cloud = open3d.geometry.PointCloud()
        point_cloud.points = open3d.utility.Vector3dVector(point_np[0])

        vis = open3d.visualization.VisualizerWithKeyCallback()
        vis.create_window()
        vis.get_render_option().background_color = np.asarray([0, 0, 0])
        vis.add_geometry(point_cloud)
        vis.register_key_callback(32, lambda vis: exit())
        vis.run()
        vis.destroy_window()
def main(args):
    ''' --- SELECT DEVICES --- '''
    # Select either gpu or cpu
    device = torch.device("cuda" if args.cuda else "cpu")
    # Select among available GPUs
    if args.cuda:
        os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(
            str(x) for x in args.gpudevice)
    ''' --- CREATE EXPERIMENTS DIRECTORY AND LOGGERS IN TENSORBOARD --- '''
    projdir = sys.path[0]
    # Path for saving and loading the network.
    saveloadpath = os.path.join(projdir, 'experiment\\checkpoints',
                                args.exp_name + '.pth')
    Path(os.path.dirname(saveloadpath)).mkdir(exist_ok=True, parents=True)
    # timestamp = str(datetime.datetime.now().strftime('%Y-%m-%d-%H-%M'))
    tblogdir = os.path.join(projdir, 'experiment\\tensorboardX',
                            args.exp_name)  # + '_' + timestamp )
    Path(tblogdir).mkdir(exist_ok=True, parents=True)
    # Create tb_writer(the writer will be used to write the information on tb) by using SummaryWriter,
    # flush_secs defines how much seconds need to wait for writing information.
    tb_writer = SummaryWriter(logdir=tblogdir,
                              flush_secs=3,
                              write_to_disk=True)
    ''' --- INIT DATASETS AND DATALOADER (FOR SINGLE EPOCH) --- '''
    # Read data from file, and create training data and testing data which are both in multiple frames. Beware Ts is
    # recording for every frame, i.e. every 82ms the automotive radar records once to form single frame(We need this information for LSTM).
    train_dataset, test_dataset, class_names = read_dataset(
        args.datapath, Ts=0.082, train_test_split=0.8)

    # Prepare the traing and testing dataset. both trainDataset and testDataset are dataset have multiple frames data,
    # for each frame it contains the "unified number of detection points"(NMAX detection points per frame).

    # Init test dataset(Beware we should NOT use data augmentation for test dataset)
    test_dataTransformations = transforms.Compose(
        [NormalizeTime(), Resampling(maxPointsPerFrame=10)])
    testDataset = RadarClassDataset(dataset=test_dataset,
                                    transforms=test_dataTransformations,
                                    sequence_length=1)
    # Init train datasets
    train_dataTransformations = transforms.Compose([
        NormalizeTime(),
        DataAugmentation(),
        Resampling(maxPointsPerFrame=10)
    ])
    trainDataset = RadarClassDataset(dataset=train_dataset,
                                     transforms=train_dataTransformations,
                                     sequence_length=1)
    # Create dataloader for training by using batch_size frames' data in each batch
    trainDataLoader = DataLoader(trainDataset,
                                 batch_size=args.batchsize,
                                 shuffle=True,
                                 num_workers=args.num_workers)
    ''' --- INIT NETWORK MODEL --- '''
    # Load selected network model and put it to right device
    if args.model_name == 'pointnet':
        classifier = PointNetCls(dim=args.pointCoordDim,
                                 num_class=args.numclasses,
                                 feature_transform=args.feature_transform)
    elif args.model_name == 'pointnet2':
        classifier = PointNet2ClsMsg(
            dim=args.pointCoordDim,
            num_class=args.numclasses,
        )
    else:
        raise Exception(
            'Argument "model_name" does not match existent networks')
    classifier = classifier.to(device)
    ''' --- LOAD NETWORK IF EXISTS --- '''
    if os.path.exists(saveloadpath):
        print('Using pretrained model found...')
        checkpoint = torch.load(saveloadpath)
        start_epoch = checkpoint[
            'epoch'] + 1  # Just becase make sure counting starts from 1, 2, ..., rather than 0, 1, ..., when print the information of start_epoch
        iteration = checkpoint['iteration']
        best_test_acc = checkpoint['test_accuracy']
        classifier.load_state_dict(checkpoint['model_state_dict'])
    else:
        print('No existing model, starting training from scratch...')
        start_epoch = 1  # Just becase make sure counting starts from 1, 2, ..., rather than 0, 1, ..., when print the information of start_epoch
        iteration = 1  # Just becase make sure counting starts from 1, 2, ..., rather than 0, 1, ..., when print the information of iteration
        best_test_acc = 0
    ''' --- CREATE OPTIMIZER ---'''
    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=args.lr,
                                    momentum=0.9)
    elif args.optimizer == 'ADAM':
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.lr,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    scheduler = torch.optim.lr_scheduler.StepLR(
        optimizer, step_size=args.lr_epoch_half,
        gamma=0.5)  # half(0.5) the learning rate every 'step_size' epochs

    # log info
    printparams = 'Model parameters:' + json.dumps(
        vars(args), indent=4, sort_keys=True)
    print(printparams)
    tb_writer.add_text('hyper-parameters', printparams,
                       iteration)  # tb_writer.add_hparam(args)
    tb_writer.add_text(
        'dataset', 'dataset sample size: training: {}, test: {}'.format(
            train_dataset.shape[0], test_dataset.shape[0]), iteration)
    ''' --- START TRANING ---'''
    for epoch in range(start_epoch, args.epoch + 1):
        print('Epoch %d/%s:' % (epoch, args.epoch))

        # Add the "learning rate" into tensorboard scalar which will be shown in tensorboard
        tb_writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'],
                             iteration)

        for batch_id, data in tqdm(enumerate(trainDataLoader, 0),
                                   total=len(trainDataLoader),
                                   smoothing=0.9):
            points, target = data  # (B:batch x S:seq x C:features x N:points) , (B x S:seq)
            # Squeeze to drop Sequence dimension, which is equal to 1, convert all the data to float(otherwise there will be data type problems when running the model) and move to device
            points, target = points.squeeze(
                dim=1).float().to(device), target.float().to(
                    device)  # (B:batch x C:features x N:points) , (B)
            # points, target = points.float().to(device), target.float().to(device)
            # Reset gradients
            optimizer.zero_grad()
            # Sets the module in training mode
            classifier = classifier.train()
            # Forward propagation
            pred = classifier(points)
            # Calculate cross entropy loss (In the pointnet/pointnet2 network model, it outputs log_softmax result. Since
            # "log_softmax -> nll_loss" == CrossEntropyLoss, so that we just need to call F.nll_loss)
            loss = F.nll_loss(pred, target.long())
            if args.model_name == 'pointnet':
                loss += feature_transform_regularizer(classifier.trans) * 0.001
                if args.feature_transform:
                    loss += feature_transform_regularizer(
                        classifier.trans_feat) * 0.001
            # Back propagate
            loss.backward()
            # Update weights
            optimizer.step()
            # Log once for every 5 batches, add the "train_loss/cross_entropy" into tensorboard scalar which will be shown in tensorboard
            if not batch_id % 5:
                tb_writer.add_scalar('train_loss/cross_entropy', loss.item(),
                                     iteration)
            iteration += 1
            # if batch_id> 2: break

        scheduler.step()
        ''' --- TEST AND SAVE NETWORK --- '''
        if not epoch % 10:  # Doing the following things every epoch.
            # Perform predictions on the training data.
            train_targ, train_pred = test(classifier,
                                          trainDataset,
                                          device,
                                          num_workers=args.num_workers,
                                          batch_size=1800)
            # Perform predictions on the testing data.
            test_targ, test_pred = test(classifier,
                                        testDataset,
                                        device,
                                        num_workers=args.num_workers,
                                        batch_size=1800)

            # Calculate the accuracy rate for training data.
            train_acc = metrics_accuracy(train_targ, train_pred)
            # Calculate the accuracy rate for testing data.
            test_acc = metrics_accuracy(test_targ, test_pred)
            print('\r Training loss: {}'.format(loss.item()))
            print('Train Accuracy: {}\nTest Accuracy: {}'.format(
                train_acc, test_acc))
            # Add the "train_acc" "test_acc" into tensorboard scalars which will be shown in tensorboard.
            tb_writer.add_scalars('metrics/accuracy', {
                'train': train_acc,
                'test': test_acc
            }, iteration)

            # Calculate confusion matrix.
            confmatrix_test = metrics_confusion_matrix(test_targ, test_pred)
            print('Test confusion matrix: \n', confmatrix_test)
            # Log confusion matrix.
            fig, ax = plot_confusion_matrix(confmatrix_test,
                                            class_names,
                                            normalize=False,
                                            title='Test Confusion Matrix')
            # Log normalized confusion matrix.
            fig_n, ax_n = plot_confusion_matrix(
                confmatrix_test,
                class_names,
                normalize=True,
                title='Test Confusion Matrix - Normalized')
            # Add the "confusion matrix" "normalized confusion matrix" into tensorboard figure which will be shown in tensorboard.
            tb_writer.add_figure('test_confusion_matrix/abs',
                                 fig,
                                 global_step=iteration,
                                 close=True)
            tb_writer.add_figure('test_confusion_matrix/norm',
                                 fig_n,
                                 global_step=iteration,
                                 close=True)

            # Log precision recall curves.
            for idx, clsname in enumerate(class_names):
                # Convert log_softmax to softmax(which is actual probability) and select the desired class.
                test_pred_binary = torch.exp(test_pred[:, idx])
                test_targ_binary = test_targ.eq(idx)
                # Add the "precision recall curves" which will be shown in tensorboard.
                tb_writer.add_pr_curve(tag='pr_curves/' + clsname,
                                       labels=test_targ_binary,
                                       predictions=test_pred_binary,
                                       global_step=iteration)
            ''' --- SAVE NETWORK --- '''
            # if (test_acc >= best_test_acc): # For now lets save every time, since we are only testing in a subset of the test dataset
            best_test_acc = test_acc  # if test_acc > best_test_acc else best_test_acc
            state = {
                'epoch': epoch,
                'iteration': iteration,
                'train_accuracy': train_acc if args.train_metric else 0.0,
                'test_accuracy': best_test_acc,
                'model_state_dict': classifier.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }
            torch.save(state, saveloadpath)
            print('Model saved!!!')

    print('Best Accuracy: %f' % best_test_acc)

    tb_writer.close()
Exemple #4
0
    worker_init_fn=worker_init_fn,
    collate_fn=my_collate,
)

is_resume = 0
if args.load_model and args.load_epoch != -1:
    is_resume = 1

if is_resume or args.mode == 'test':
    model = torch.load(args.load_model,
                       map_location='cuda:{}'.format(args.gpu))
    model.device_ids = [args.gpu]
    print('load model {}'.format(args.load_model))
else:
    model = PointNetCls(num_points=grasp_points_num,
                        input_chann=point_channel,
                        k=2)
if args.cuda:
    if args.gpu != -1:
        torch.cuda.set_device(args.gpu)
        model = model.cuda()
    else:
        device_id = [0, 1, 2, 3]
        torch.cuda.set_device(device_id[0])
        model = nn.DataParallel(model, device_ids=device_id).cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=30, gamma=0.5)


def train(model, loader, epoch):
    scheduler.step()
import torch
import torch.utils.data
# import torch.nn as nn
# import torch.nn.functional as F
import numpy as np
import sys
from os import path
from model.pointnet import PointNetCls
import torch.multiprocessing as mp

sys.path.append(path.dirname(path.dirname(path.abspath("__file__"))))

# torch.cuda.manual_seed(1)  # don't delete

grasp_points_num = 1024
model = PointNetCls(num_points=grasp_points_num, input_chann=3, k=2)
model.cuda()

model.eval()
torch.set_grad_enabled(False)


def load_weight(weights_path):
    print('[Python] Load weight: {}'.format(weights_path))
    model.load_state_dict(torch.load(weights_path))


def classify_pcs(local_pcs, output_cls=0):
    """ Classify point clouds FPS:650 """
    print("[Python] Classify point clouds...")
def main():
    ''' --- SELECT DEVICES --- '''
    # Select either gpu or cpu
    device = torch.device("cuda" if args.cuda else "cpu")
    # Select among available GPUs
    if args.cuda:
        os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(
            str(x) for x in args.gpudevice)
    ''' --- INIT NETWORK MODEL --- '''
    projdir = sys.path[0]
    # Path for saving and loading the network.
    saveloadpath = os.path.join(projdir, 'experiment\\checkpoints',
                                args.exp_name + '.pth')
    Path(os.path.dirname(saveloadpath)).mkdir(exist_ok=True, parents=True)
    # Load selected network model and put it to right device
    if args.model_name == 'pointnet':
        classifier = PointNetCls(dim=args.pointCoordDim,
                                 num_class=len(args.categories),
                                 feature_transform=args.feature_transform)
    elif args.model_name == 'pointnet2':
        classifier = PointNet2ClsMsg(dim=args.pointCoordDim,
                                     num_class=len(args.categories))
    else:
        raise Exception(
            'Argument "model_name" does not match existent networks')
    classifier = classifier.to(device)
    ''' --- LOAD NETWORK IF EXISTS --- '''
    if os.path.exists(saveloadpath):
        print('Using pretrained model found...')
        checkpoint = torch.load(saveloadpath)
        start_epoch = checkpoint[
            'epoch'] + 1  # Just becase make sure counting starts from 1, 2, ..., rather than 0, 1, ..., when print the information of start_epoch
        iteration = checkpoint['iteration']
        best_test_acc = checkpoint['test_accuracy']
        classifier.load_state_dict(checkpoint['model_state_dict'])
    else:
        raise Exception('Model in: {} does not exists'.format(saveloadpath))

    # put classifier in evaluation mode
    classifier = classifier.eval()
    ''' --- INIT DATASETS AND DATALOADER (FOR SINGLE EPOCH) --- '''
    # Ideal for PointNet and pointLSTM - dataloader will return (B:batch, S:seq, C:features, N:points)
    dataTransformations = transforms.Compose([
        ToSeries(),
        DataAugmentation(),
        Resampling(maxPointsPerFrame=10),
        ToTensor()
    ])

    # init dataset
    nusc = NuScenes(version=args.nuscenes_eval_dir,
                    dataroot=args.nuscenes_dir,
                    verbose=True)
    ''' Iterate over samples '''
    idx = list(range(len(nusc.sample)))
    random.shuffle(idx)  # shuffle samples
    # for sample_rec in nusc.sample:
    for i in idx:
        sample_rec = nusc.sample[i]
        sample_token = sample_rec['token']

        # read all sensors data and merge them to a common reference frame
        sensorlist = []
        for sensor in args.sensors:
            sensorlist.append(
                MyRadarPointCloud.from_sample_token(nusc, sample_token,
                                                    sensor))

        global radar_pc
        radar_pc = MyRadarPointCloud.merge(sensorlist, 0)

        # get annotations for actual sample
        ann_tokens = nusc.get('sample', sample_token)['anns']

        # filter annotations that are objects of interest for our classifier
        def get_ann_properties(nusc, ann_token: str):
            ann_rec = nusc.get('sample_annotation', ann_token)
            category = ann_rec['category_name']
            attr_tokens = ann_rec['attribute_tokens']
            attr = [
                nusc.get('attribute', attr_token)['name']
                for attr_token in attr_tokens
            ]
            if len(attr) == 0: attr = ['']
            print('object category:{:30} attr:{:40} center:{:20}'.format(
                category, str(attr),
                str(ann_rec['translation'] - radar_pc.A_cs_2_gl[:3, 3])))
            return category, attr[
                0]  # for now lets return only the first attribute (we might have more per object)

        def isInterestingObject(ann_token):
            cat, att = get_ann_properties(nusc, ann_token)
            # category-attribute pair must match at least one of the desired category-attr pair
            for desired_cat, desired_att in zip(args.categories,
                                                args.attributes):
                if cat in desired_cat and att in desired_att:
                    return True
            return False

        print('\n\nGround-truth objects on this frame:')
        ann_tokens = list(filter(isInterestingObject, ann_tokens))

        def getObjLabel(ann_token):
            cat, att = get_ann_properties(nusc, ann_token)
            for idx, (desired_cat, desired_att) in enumerate(
                    zip(args.categories, args.attributes)):
                if cat in desired_cat and att in desired_att:
                    return idx
            return np.NaN

        labels = list(map(getObjLabel, ann_tokens))
        assert np.all(
            ~np.isnan(labels)
        ), 'Something strange happened... object was selected as interesting but we can not find its label'

        # create bounding boxes from annotations
        ann_boxes = []
        for ann_token, label in zip(ann_tokens, labels):
            box = radar_pc.box(ann_token)
            box.label = label
            box.name = args.objectNames[label]
            ann_boxes.append(box)
        ''' APPLY DBSCAN ON EACH SCENE'''
        from sklearn.cluster import DBSCAN
        points_scene = radar_pc.points  # points_gl = <5,N>
        # filter out objects, whose speed is less than 0.3 m/s
        idx_moving = np.linalg.norm(points_scene[2:4, :].T, axis=1) > 0.25
        points_scene = points_scene[:, idx_moving]
        # apply DBSCAN
        clustering = DBSCAN(eps=3, min_samples=1).fit(points_scene[:2, :].T)
        ''' For each cluster, run network and predict class '''
        pred_results = []
        for cluster_idx in range(max(clustering.labels_)):
            # select points from the current cluster
            points_idx = clustering.labels_ == cluster_idx
            points_obj = points_scene[:, points_idx]
            # apply necessary transformations
            features = dataTransformations(points_obj)
            # convert to torch.tensor
            features = torch.tensor(features).type(
                torch.FloatTensor).unsqueeze(0).to(device)
            # calculate network prediction
            pred = classifier(features).argmax().item()
            # store result
            pred_results.append((points_obj, pred))
        ''' RENDER '''
        OBJCOLORS = ['red', 'green', 'blue', 'grey', 'orange', 'white']
        fig = plt.figure(constrained_layout=True)
        gs = GridSpec(2, 6, figure=fig)
        gs.update(wspace=0, hspace=0)
        axs = [
            fig.add_subplot(gs[1, :3]),
            fig.add_subplot(gs[1, 3:]),
            fig.add_subplot(gs[0, 0]),
            fig.add_subplot(gs[0, 1]),
            fig.add_subplot(gs[0, 2]),
            fig.add_subplot(gs[0, 3]),
            fig.add_subplot(gs[0, 4]),
            fig.add_subplot(gs[0, 5])
        ]

        # render annotation boxes
        for box in ann_boxes:
            color = OBJCOLORS[box.label]
            box.render(axs[0], colors=(color, ) * 3)
            box.render(axs[1], colors=(color, ) * 3)
        # render cars
        radar_pc.car.render(axs[0], colors=('orange', 'k', 'k'))
        radar_pc.car.render(axs[1], colors=('orange', 'k', 'k'))
        # render pointcloud
        radar_pc._render_pc(axs[0], radar_pc.points, color_channel='k')
        for cluster_points, pred in pred_results:
            # radar_pc._render_pc( axs[1], cluster_points, color_channel=OBJCOLORS[pred])
            if pred != 5:
                axs[1].scatter(cluster_points[0, :],
                               cluster_points[1, :],
                               s=30,
                               c=OBJCOLORS[pred],
                               edgecolors='k',
                               linewidths=1,
                               zorder=100)
                # plot ellipses
                if cluster_points.shape[1] > 1:
                    confidence_ellipse(axs[1],
                                       cluster_points[0, :],
                                       cluster_points[1, :],
                                       edgecolor=OBJCOLORS[pred],
                                       facecolor=OBJCOLORS[pred],
                                       alpha=0.5)
        # render camera images
        cameras = [
            'CAM_BACK_LEFT', 'CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT',
            'CAM_BACK_RIGHT', 'CAM_BACK'
        ]
        for ax_idx, cam_name in enumerate(cameras):
            cam_token = sample_rec['data'][cam_name]
            cam_rec = nusc.get('sample_data', cam_token)
            img_filename = os.path.join(nusc.dataroot, cam_rec['filename'])
            img = mpimg.imread(img_filename)
            # axs[ax_idx].set_axis_off()
            axs[ax_idx + 2].get_xaxis().set_visible(False)
            axs[ax_idx + 2].get_yaxis().set_visible(False)
            axs[ax_idx + 2].imshow(img)
            axs[ax_idx + 2].set_title(cam_name)
        # format
        legend_elements = [
            Line2D([0], [0], color=color, lw=4, label=name)
            for color, name in zip(OBJCOLORS, args.objectNames)
        ]
        axs[0].legend(handles=legend_elements,
                      loc='upper right',
                      prop={'size': 8})
        axs[1].legend(handles=legend_elements,
                      loc='upper right',
                      prop={'size': 8})
        axs[0].axis('equal')
        axs[1].axis('equal')
        mng = plt.get_current_fig_manager()
        mng.window.showMaximized()
        plt.tight_layout()
        lims = (-60, 60)
        axs[0].set_xlim(lims)
        axs[0].set_ylim(lims)
        axs[0].set_title(
            'Radar detection points and ground truth bounding boxes of MOVING objects'
        )
        axs[1].set_xlim(lims)
        axs[1].set_ylim(lims)
        axs[1].set_title(
            'Segmented segmentation of detection points (DBSCAN + PointNet classifier)\n3-sigma ellipses for each cluster and ground-truth bbs'
        )
        plt.tight_layout()
        plt.show()

        # plot scene with the standard Nuscenes method
        # nusc.render_sample(sample_token)
        # plt.show()

    # sample_token = nusc.get('sample', sample_token)['next']

    tb_writer.close()
dataloader = torch.utils.data.DataLoader(
    dataset,
    batch_size=args.batch_size,
    num_workers=16,
    pin_memory=True,
    shuffle=True,
    worker_init_fn=worker_init_fn,
    collate_fn=my_collate,
)

is_resume = 0
if args.load_model and args.load_epoch != -1:
    is_resume = 1

model = PointNetCls(num_points=grasp_points_num,
                    input_chann=point_channel,
                    k=2)
if is_resume or args.mode == 'test':
    model.load_state_dict(torch.load(args.load_model))
    print('load weights {}'.format(args.load_model))
if args.cuda:
    base_model = model.cuda()
    model = nn.DataParallel(base_model)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=30, gamma=0.5)


def train(model, dataset, loader, epoch):
    scheduler.step()
    dataset.train()
    model.train()
Exemple #8
0
def main(args):
    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    # datapath = './data/ModelNet/'  
    datapath = './data/objecnn20_data_hdf5_2048/'
    if args.rotation is not None:
        ROTATION = (int(args.rotation[0:2]),int(args.rotation[3:5]))
    else:
        ROTATION = None

    '''CREATE DIR'''
    experiment_dir = Path('./experiment/')
    experiment_dir.mkdir(exist_ok=True)
    file_dir = Path(str(experiment_dir) +'/%sObjectNNClf-'%args.model_name+ str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')))
    file_dir.mkdir(exist_ok=True)
    checkpoints_dir = file_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = file_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)

    '''LOG'''
    args = parse_args()
    logger = logging.getLogger(args.model_name)
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler(str(log_dir) + '/train_%s_ObjectNNClf.txt'%args.model_name)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    logger.info('---------------------------------------------------TRANING---------------------------------------------------')
    logger.info('PARAMETER ...')
    logger.info(args)

    '''DATA LOADING'''
    logger.info('Load dataset ...')
    train_data, train_label, test_data, test_label = load_data(datapath, classification=True)
    logger.info("The number of training data is: %d",train_data.shape[0])
    logger.info("The number of test data is: %d", test_data.shape[0])
    trainDataset = ObjectNNDataLoader(train_data, train_label, rotation=ROTATION)
    if ROTATION is not None:
        print('The range of training rotation is',ROTATION)
    testDataset = ObjectNNDataLoader(test_data, test_label, rotation=ROTATION)

    trainDataLoader = torch.utils.data.DataLoader(trainDataset, batch_size=args.batchsize, shuffle=True)
    testDataLoader = torch.utils.data.DataLoader(testDataset, batch_size=args.batchsize, shuffle=False)

    '''MODEL LOADING'''
    num_class = 20
    classifier = PointNetCls(num_class,args.feature_transform).cuda() if args.model_name == 'pointnet' else PointNet2ClsMsg().cuda()
    if args.pretrain is not None:
        print('Use pretrain model...')
        logger.info('Use pretrain model')
        checkpoint = torch.load(args.pretrain)
        start_epoch = checkpoint['epoch']
        # classifier.load_state_dict(checkpoint['model_state_dict'])
        # print(checkpoint['model_state_dict'])
        model_dict = classifier.state_dict()
        pretrained_dict = {k: v for k, v in checkpoint['model_state_dict'].items() if k in model_dict}
        model_dict.update(pretrained_dict)
        classifier.load_state_dict(model_dict)
    else:
        print('No existing model, starting training from scratch...')
        start_epoch = 0


    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
    elif args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(
            classifier.parameters(),
            lr=args.learning_rate,
            betas=(0.9, 0.999),
            eps=1e-08,
            weight_decay=args.decay_rate
        )
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5) # 调整学习率
    global_epoch = 0
    global_step = 0
    best_tst_accuracy = 0.0
    blue = lambda x: '\033[94m' + x + '\033[0m'

    '''TRANING'''
    logger.info('Start training...')
    for epoch in range(start_epoch,args.epoch):
        print('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))
        logger.info('Epoch %d (%d/%s):' ,global_epoch + 1, epoch + 1, args.epoch)

        scheduler.step()
        for batch_id, data in tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9):
            points, target = data
            target = target[:, 0]
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()
            classifier = classifier.train()
            pred, trans_feat, global_feature = classifier(points)
            loss = F.nll_loss(pred, target.long())
            if args.feature_transform and args.model_name == 'pointnet':
                loss += feature_transform_reguliarzer(trans_feat) * 0.001
            loss.backward()
            optimizer.step()
            global_step += 1

        train_acc = test(classifier.eval(), trainDataLoader) if args.train_metric else None
        acc = test(classifier, testDataLoader)


        print('\r Loss: %f' % loss.data)
        logger.info('Loss: %.2f', loss.data)
        if args.train_metric:
            print('Train Accuracy: %f' % train_acc)
            logger.info('Train Accuracy: %f', (train_acc))
        print('\r Test %s: %f' % (blue('Accuracy'),acc))
        logger.info('Test Accuracy: %f', acc)

        if (acc >= best_tst_accuracy) and epoch > 5:
            best_tst_accuracy = acc
            logger.info('Save model...')
            save_checkpoint(
                global_epoch + 1,
                train_acc if args.train_metric else 0.0,
                acc,
                classifier,
                optimizer,
                str(checkpoints_dir),
                args.model_name)
            print('Saving model....')
        global_epoch += 1
    print('Best Accuracy: %f'%best_tst_accuracy)

    logger.info('End of training...')
Exemple #9
0
def main(testDataset):
    args = parse_args()
    '''HYPER PARAMETER'''
    # os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu if args.multi_gpu is None else '0,1,2,3'

    datapath = './data/ModelNet/'

    if args.rotation is not None:
        ROTATION = (int(args.rotation[0:2]),int(args.rotation[3:5]))
    else:
        ROTATION = None

    '''CREATE DIR'''
    experiment_dir = Path('./experiment/')
    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = Path('./experiment/checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = Path('./experiment/logs/')
    log_dir.mkdir(exist_ok=True)

    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("PointNet2")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('./experiment/logs/train_%s_'%args.model_name+ str(datetime.datetime.now().strftime('%Y-%m-%d %H-%M'))+'.txt')
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    logger.info('---------------------------------------------------TRANING---------------------------------------------------')
    logger.info('PARAMETER ...')
    logger.info(args)

    '''DATA LOADING'''
    logger.info('Load dataset ...')
    # train_data, train_label, test_data, test_label = load_data(datapath, classification=True)
    # logger.info("The number of training data is: %d",train_data.shape[0])
    # logger.info("The number of test data is: %d", test_data.shape[0])
    # trainDataset = ModelNetDataLoader(train_data, train_label, rotation=ROTATION)
    #if ROTATION is not None:
    #    print('The range of training rotation is',ROTATION)
    # testDataset = ModelNetDataLoader(test_data, test_label, rotation=ROTATION)
#     trainDataLoader = torch.utils.data.DataLoader(trainDataset, batch_size=args.batchsize, shuffle=True)
    testDataLoader = torch.utils.data.DataLoader(testDataset, batch_size=args.batchsize, shuffle=False)

    '''MODEL LOADING'''
    num_class = 40
    classifier = PointNetCls(num_class,args.feature_transform).cuda() if args.model_name == 'pointnet' else PointNet2ClsMsg().cuda()

    '''GPU selection and multi-GPU'''
    if args.multi_gpu is not None:
        device_ids = [int(x) for x in args.multi_gpu.split(',')]
        torch.backends.cudnn.benchmark = True
        classifier.cuda(device_ids[0])
        classifier = torch.nn.DataParallel(classifier, device_ids=device_ids)
    else:
        classifier.cuda()

    
    
    
    ''' 使用预训练模型  '''
    if args.pretrain is not None:
        print('Use pretrain model...')
        logger.info('Use pretrain model')
        checkpoint = torch.load(args.pretrain)
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
    else:
        print('No existing model, starting training from scratch...')
        start_epoch = 0


    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
    elif args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(
            classifier.parameters(),
            lr=args.learning_rate,
            betas=(0.9, 0.999),
            eps=1e-08,
            weight_decay=args.decay_rate
        )
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
    global_epoch = 0
    global_step = 0
    best_tst_accuracy = 0.0
    blue = lambda x: '\033[94m' + x + '\033[0m'
    
    start_epoch = 0
    args.epoch = 1
    '''TRANING'''
    logger.info('Start training...')
    for epoch in range(start_epoch,args.epoch):
        print('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))
        logger.info('Epoch %d (%d/%s):' ,global_epoch + 1, epoch + 1, args.epoch)

#         scheduler.step()
#         for batch_id, data in tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9):
#             points, target = data
#             target = target[:, 0]
#             points = points.transpose(2, 1)
#             points, target = points.cuda(), target.cuda()
#             optimizer.zero_grad()
#             classifier = classifier.train()
#             pred, trans_feat = classifier(points)
#             loss = F.nll_loss(pred, target.long())
#             if args.feature_transform and args.model_name == 'pointnet':
#                 loss += feature_transform_reguliarzer(trans_feat) * 0.001
#             loss.backward()
#             optimizer.step()
#             global_step += 1

        train_acc = test(classifier.eval(), trainDataLoader) if args.train_metric else None
        acc, fts = test(classifier, testDataLoader)
        # return fts
        # print('\r Loss: %f' % loss.data)
        # logger.info('Loss: %.2f', loss.data)
        if args.train_metric:
            print('Train Accuracy: %f' % train_acc)
            logger.info('Train Accuracy: %f', (train_acc))
        print('\r Test %s: %f' % (blue('Accuracy'),acc))
        logger.info('Test Accuracy: %f', acc)
        return fts
        if (acc >= best_tst_accuracy) and epoch > 5:
            best_tst_accuracy = acc
            logger.info('Save model...')
            save_checkpoint(
                global_epoch + 1,
                train_acc if args.train_metric else 0.0,
                acc,
                classifier,
                optimizer,
                str(checkpoints_dir),
                args.model_name)
            print('Saving model....')
        global_epoch += 1
    print('Best Accuracy: %f'%best_tst_accuracy)

    logger.info('End of training...')
Exemple #10
0
def main():

    train_loader = torch.utils.data.DataLoader(
        PointGraspOneViewDataset(
            grasp_points_num=grasp_points_num,
            path=args.data_path,
            tag='train',
            grasp_amount_per_file=6500,
            thresh_good=thresh_good,
            thresh_bad=thresh_bad,
        ),
        batch_size=args.batch_size,
        num_workers=32,
        pin_memory=True,
        shuffle=True,
        worker_init_fn=worker_init_fn,
        collate_fn=my_collate,
    )

    test_loader = torch.utils.data.DataLoader(
        PointGraspOneViewDataset(
            grasp_points_num=grasp_points_num,
            path=args.data_path,
            tag='test',
            grasp_amount_per_file=500,
            thresh_good=thresh_good,
            thresh_bad=thresh_bad,
            with_obj=True,
        ),
        batch_size=args.batch_size,
        num_workers=32,
        pin_memory=True,
        shuffle=True,
        worker_init_fn=worker_init_fn,
        collate_fn=my_collate,
    )

    is_resume = 0
    if args.load_model and args.load_epoch != -1:
        is_resume = 1

    if is_resume or args.mode == 'test':
        model = torch.load(args.load_model,
                           map_location='cuda:{}'.format(args.gpu))
        model.device_ids = [args.gpu]
        print('load model {}'.format(args.load_model))
    else:
        model = PointNetCls(num_points=grasp_points_num,
                            input_chann=point_channel,
                            k=2)
    if args.cuda:
        if args.gpu != -1:
            torch.cuda.set_device(args.gpu)
            model = model.cuda()
        else:
            device_id = [0, 1, 2, 3]
            torch.cuda.set_device(device_id[0])
            model = nn.DataParallel(model, device_ids=device_id).cuda()

    if args.mode == 'train':
        for epoch in range(is_resume * args.load_epoch, args.epoch):
            acc_train = train(model, train_loader, epoch)
            print('Train done, acc={}'.format(acc_train))
            acc, loss = test(model, test_loader)
            print('Test done, acc={}, loss={}'.format(acc, loss))
            logger.add_scalar('train_acc', acc_train, epoch)
            logger.add_scalar('test_acc', acc, epoch)
            logger.add_scalar('test_loss', loss, epoch)
            if epoch % args.save_interval == 0:
                path = os.path.join(args.model_path,
                                    args.tag + '_{}.model'.format(epoch))
                torch.save(model, path)
                print('Save model @ {}'.format(path))
    else:
        print('testing...')
        acc, loss = test(model, test_loader)
        print('Test done, acc={}, loss={}'.format(acc, loss))
Exemple #11
0
def train(args):
    experiment_dir = mkdir('./experiment/')
    checkpoints_dir = mkdir('./experiment/clf/%s/' % (args.model_name))
    train_data, train_label, test_data, test_label = load_data(
        'experiment/data/modelnet40_ply_hdf5_2048/')

    trainDataset = ModelNetDataLoader(train_data,
                                      train_label,
                                      data_augmentation=args.augment)
    trainDataLoader = DataLoader(trainDataset,
                                 batch_size=args.batch_size,
                                 shuffle=True)

    testDataset = ModelNetDataLoader(test_data, test_label)
    testDataLoader = torch.utils.data.DataLoader(testDataset,
                                                 batch_size=args.batch_size,
                                                 shuffle=False)

    log.info('Building Model', args.model_name)
    if args.model_name == 'pointnet':
        num_class = 40
        model = PointNetCls(num_class, args.feature_transform).cuda()
    else:
        model = PointNet2ClsMsg().cuda()

    torch.backends.cudnn.benchmark = True
    model = torch.nn.DataParallel(model).cuda()
    log.debug('Using gpu:', args.gpu)

    if args.pretrain is not None:
        log.info('Use pretrain model...')
        state_dict = torch.load(args.pretrain)
        model.load_state_dict(state_dict)
        init_epoch = int(args.pretrain[:-4].split('-')[-1])
        log.info('start epoch from', init_epoch)
    else:
        log.info('Training from scratch')
        init_epoch = 0

    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
    elif args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=20,
                                                gamma=0.5)
    LEARNING_RATE_CLIP = 1e-5

    global_epoch = 0
    global_step = 0
    best_tst_accuracy = 0.0

    log.info('Start training...')
    for epoch in range(init_epoch, args.epoch):
        scheduler.step()
        lr = max(optimizer.param_groups[0]['lr'], LEARNING_RATE_CLIP)

        log.debug(job='clf',
                  model=args.model_name,
                  gpu=args.gpu,
                  epoch='%d/%s' % (epoch, args.epoch),
                  lr=lr)

        for param_group in optimizer.param_groups:
            param_group['lr'] = lr

        for batch_id, data in tqdm(enumerate(trainDataLoader, 0),
                                   total=len(trainDataLoader),
                                   smoothing=0.9):
            points, target = data
            target = target[:, 0]
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()
            model = model.train()
            pred, trans_feat = model(points)
            loss = F.nll_loss(pred, target.long())
            if args.feature_transform and args.model_name == 'pointnet':
                loss += feature_transform_reguliarzer(trans_feat) * 0.001
            loss.backward()
            optimizer.step()
            global_step += 1

        log.debug('clear cuda cache')
        torch.cuda.empty_cache()

        acc = test_clf(model, testDataLoader)
        log.info(loss='%.5f' % (loss.data))
        log.info(Test_Accuracy='%.5f' % acc)

        if acc >= best_tst_accuracy:
            best_tst_accuracy = acc
            fn_pth = 'clf-%s-%.5f-%04d.pth' % (args.model_name, acc, epoch)
            log.debug('Saving model....', fn_pth)
            torch.save(model.state_dict(),
                       os.path.join(checkpoints_dir, fn_pth))
        global_epoch += 1

    log.info(Best_Accuracy=best_tst_accuracy)
    log.info('End of training...')
Exemple #12
0
def main():    

    ''' --- SELECT DEVICES --- '''
    # Select either gpu or cpu
    device = torch.device("cuda" if args.cuda else "cpu")
    # Select among available GPUs
    if args.cuda: os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in args.gpudevice)
    

    ''' --- CREATE EXPERIMENTS DIRECTORY AND LOGGERS IN TENSORBOARD --- '''
    projdir = sys.path[0]
    # Path for saving and loading the network.
    saveloadpath = os.path.join( projdir, 'experiment\\checkpoints', args.exp_name+'.pth')
    Path(os.path.dirname(saveloadpath)).mkdir(exist_ok=True, parents=True)
    # timestamp = str(datetime.datetime.now().strftime('%Y-%m-%d-%H-%M'))
    tblogdir = os.path.join( projdir, 'experiment\\tensorboardX', args.exp_name ) # + '_' + timestamp )
    Path(tblogdir).mkdir(exist_ok=True, parents=True)
    # Create tb_writer(the writer will be used to write the information on tb) by using SummaryWriter, 
    # flush_secs defines how much seconds need to wait for writing information.
    tb_writer = SummaryWriter( logdir=tblogdir, flush_secs=3, write_to_disk=True)


    ''' --- INIT DATASETS AND DATALOADER (FOR SINGLE EPOCH) --- '''
    # Ideal for PointNet and pointLSTM - dataloader will return (B:batch, S:seq, C:features, N:points)
    dataTransformations = transforms.Compose([
        ToSeries(),
        DataAugmentation(),
        Resampling(maxPointsPerFrame=10),
        ToTensor()
    ])
    # Init nuScenes datasets
    nusc_train = NuScenes(version=args.nuscenes_train_dir, dataroot=args.nuscenes_dir, verbose=True)
    train_dataset = RadarClassDataset(nusc_train, categories=args.categories, sensors=args.sensors, transforms=dataTransformations, sequence_length=1)
    nusc_test = NuScenes(version=args.nuscenes_test_dir, dataroot=args.nuscenes_dir, verbose=True)
    test_dataset = RadarClassDataset(nusc_test, categories=args.categories, sensors=args.sensors, transforms=dataTransformations, sequence_length=1)
    # Init training data loader
    trainDataLoader = DataLoader(train_dataset, batch_size=args.batchsize, shuffle=True, num_workers=args.num_workers)

    ''' --- INIT NETWORK MODEL --- '''
    # Load selected network model and put it to right device
    if args.model_name == 'pointnet':
        classifier = PointNetCls(dim=args.pointCoordDim, num_class=len(args.categories), feature_transform=args.feature_transform)  
    elif args.model_name == 'pointnet2':
        classifier = PointNet2ClsMsg(dim=args.pointCoordDim, num_class=len(args.categories) )
    else:
        raise Exception('Argument "model_name" does not match existent networks')
    classifier = classifier.to(device)

    ''' --- INIT LOSS FUNCTION --- '''
    loss_fun = FocalLoss(gamma=args.focalLoss_gamma, num_classes=len(args.categories), alpha=args.weight_cat).to(device)

    ''' --- LOAD NETWORK IF EXISTS --- '''
    if os.path.exists(saveloadpath):
        print('Using pretrained model found...')
        checkpoint    = torch.load(saveloadpath)
        start_epoch   = checkpoint['epoch'] +1 # Just becase make sure counting starts from 1, 2, ..., rather than 0, 1, ..., when print the information of start_epoch
        iteration     = checkpoint['iteration']
        best_test_acc = checkpoint['test_accuracy']
        classifier.load_state_dict(checkpoint['model_state_dict'])
    else:
        print('No existing model, starting training from scratch...')
        start_epoch   = 1 # Just becase make sure counting starts from 1, 2, ..., rather than 0, 1, ..., when print the information of start_epoch
        iteration     = 1 # Just becase make sure counting starts from 1, 2, ..., rather than 0, 1, ..., when print the information of iteration
        best_test_acc = 0


    ''' --- CREATE OPTIMIZER ---'''
    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(
            classifier.parameters(), 
            lr=args.lr, 
            momentum=0.9)
    elif args.optimizer == 'ADAM':
        optimizer = torch.optim.Adam(
            classifier.parameters(),
            lr=args.lr,
            betas=(0.9, 0.999),
            eps=1e-08,
            weight_decay=args.decay_rate)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_epoch_half, gamma=0.5) # half(0.5) the learning rate every 'step_size' epochs
    

    # Log info
    printparams = 'Model parameters:' + json.dumps(vars(args), indent=4, sort_keys=True)
    print(printparams)
    tb_writer.add_text('hyper-parameters',printparams,iteration) # tb_writer.add_hparam(args)
    tb_writer.add_text('dataset','dataset sample size: training: {}, test: {}'.format(len(train_dataset),len(test_dataset)),iteration)


    ''' --- START TRANING ---'''
    for epoch in range(start_epoch, args.epoch+1):
    # epoch = start_epoch
        print('Epoch %d/%s:' % (epoch, args.epoch))
        # Add the "learning rate" into tensorboard scalar which will be shown in tensorboard
        tb_writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], iteration)

        # Beware epochs_left = args.epoch - epoch
        for batch_id, data in tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9):
            points, target = data   # (B:batch x S:seq x C:features x N:points) , (B x S:seq) 
            # Squeeze to drop Sequence dimension, which is equal to 1, convert all the data to float(otherwise there will be data type problems when running the model) and move to device
            points, target = points.squeeze(dim=1).float().to(device), target.float().to(device) # (B:batch x C:features x N:points) , (B)
            # points, target = points.float().to(device), target.float().to(device)
            # Reset gradients
            optimizer.zero_grad()
            # Sets the module in training mode
            classifier = classifier.train()           
            # Forward propagation
            pred = classifier(points)
            # MLE estimator = min (- log (softmax(x)) ) = min nll_loss(log_softmax(x))
            # loss = F.nll_loss(pred, target.long())
            loss = loss_fun(pred, target.long())
            if args.model_name == 'pointnet':
                loss += feature_transform_regularizer(classifier.trans) * 0.001
                if args.feature_transform:
                    loss +=  feature_transform_regularizer(classifier.trans_feat) * 0.001
            # Back propagate
            loss.backward()
            # Update weights
            optimizer.step()            
            # Log once for every 5 batches, add the "train_loss/cross_entropy" into tensorboard scalar which will be shown in tensorboard
            if not batch_id % 5: tb_writer.add_scalar('train_loss/cross_entropy', loss.item(), iteration)
            iteration += 1

            # Plot train confusion matrix every X steps
            if not iteration % 20:
                confmatrix_train = metrics_confusion_matrix(target, pred)
                print('\nTrain confusion matrix: \n',confmatrix_train)

            # We just finished one epoch
            # if not batch_id+1 % int(train_dataset.len__()/args.batchsize):

        ''' --- TEST NETWORK --- '''
        if not epoch % int(args.test_every_X_epochs): # Doing the following things every epoch.
            # Perform predictions on the training data.
            train_targ, train_pred = test(classifier, train_dataset, device, num_workers=0, batch_size=512)
            # Perform predictions on the testing data.
            test_targ,  test_pred  = test(classifier, test_dataset, device,  num_workers=0, batch_size=512)
            
            # Calculate the accuracy rate for training data.
            train_acc = metrics_accuracy(train_targ, train_pred)
            # Calculate the accuracy rate for testing data.
            test_acc  = metrics_accuracy(test_targ,  test_pred)
            print('\r Training loss: {}'.format(loss.item()))
            print('Train Accuracy: {}\nTest Accuracy: {}'.format(train_acc, test_acc) )
            # Add the "train_acc" "test_acc" into tensorboard scalars which will be shown in tensorboard.                       
            tb_writer.add_scalars('metrics/accuracy', {'train':train_acc, 'test':test_acc}, iteration)
            
            # Calculate confusion matrix.
            confmatrix_test = metrics_confusion_matrix(test_targ, test_pred)
            print('Test confusion matrix: \n',confmatrix_test)
            # Log confusion matrix.
            fig,   ax   = plot_confusion_matrix(confmatrix_test, args.categories, normalize=False, title='Test Confusion Matrix')
            # Log normalized confusion matrix.
            fig_n, ax_n = plot_confusion_matrix(confmatrix_test, args.categories, normalize=True,  title='Test Confusion Matrix - Normalized')
            # Add the "confusion matrix" "normalized confusion matrix" into tensorboard figure which will be shown in tensorboard.
            tb_writer.add_figure('test_confusion_matrix/abs',  fig,   global_step=iteration, close=True)
            tb_writer.add_figure('test_confusion_matrix/norm', fig_n, global_step=iteration, close=True)

            # Log precision recall curves.
            for idx, clsname in enumerate(args.categories):
                # Convert log_softmax to softmax(which is actual probability) and select the desired class.
                test_pred_binary = torch.exp(test_pred[:,idx])
                test_targ_binary = test_targ.eq(idx)
                # Add the "precision recall curves" which will be shown in tensorboard.
                tb_writer.add_pr_curve(tag='pr_curves/'+clsname, labels=test_targ_binary, predictions=test_pred_binary, global_step=iteration)

            # Store the best test accuracy
            if (test_acc >= best_test_acc):
                best_test_acc = max([best_test_acc, test_acc])
                # NOTE: we possibly want to save only when when the best test accuracy is surpassed. For now lets save every X epoch
        
        ''' --- SAVE NETWORK --- '''
        if not epoch % int(args.save_every_X_epochs):
            print('Best Accuracy: %f'%best_test_acc)
            state = {
                'epoch': epoch,
                'iteration': iteration,
                'test_accuracy': best_test_acc,
                'model_state_dict': classifier.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }
            torch.save(state, saveloadpath)
            print('Model saved!!!')
                
            # epoch += 1
            # print('Epoch %d/%s:' % (epoch, args.epoch))
        scheduler.step()
        
    
    tb_writer.close()
Exemple #13
0
def main(args):
    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    # datapath = './data/ModelNet/'
    datapath = './data/modelnet40_ply_hdf5_2048/'
    if args.rotation is not None:
        ROTATION = (int(args.rotation[0:2]), int(args.rotation[3:5]))
    else:
        ROTATION = None
    '''CREATE DIR'''
    experiment_dir = Path('./experiment/')
    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = Path('./experiment/checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = Path('./experiment/logs/')
    log_dir.mkdir(exist_ok=True)
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("PointNet2")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler(
        './experiment/logs/test_%s_' % args.model_name +
        str(datetime.datetime.now().strftime('%Y-%m-%d %H-%M')) + '.txt')
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    logger.info(
        '---------------------------------------------------Test---------------------------------------------------'
    )
    logger.info('PARAMETER ...')
    logger.info(args)
    '''DATA LOADING'''
    logger.info('Load dataset ...')
    train_data, train_label, test_data, test_label = load_data(
        datapath, classification=True)
    logger.info("The number of training data is: %d", train_data.shape[0])
    logger.info("The number of test data is: %d", test_data.shape[0])
    trainDataset = ModelNetDataLoader(train_data,
                                      train_label,
                                      rotation=ROTATION)
    if ROTATION is not None:
        print('The range of training rotation is', ROTATION)
    testDataset = ModelNetDataLoader(test_data, test_label, rotation=ROTATION)

    trainDataLoader = torch.utils.data.DataLoader(trainDataset,
                                                  batch_size=args.batchsize,
                                                  shuffle=True)
    testDataLoader = torch.utils.data.DataLoader(testDataset,
                                                 batch_size=args.batchsize,
                                                 shuffle=False)  # 不打乱
    '''MODEL LOADING'''
    num_class = 40
    ###################### PointNetCls ######################
    classifier = PointNetCls(num_class, args.feature_transform).cuda(
    ) if args.model_name == 'pointnet' else PointNet2ClsMsg().cuda()
    if args.pretrain is not None:
        print('Use pretrain model...')
        logger.info('Use pretrain model')
        checkpoint = torch.load(args.pretrain)
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
    else:
        print('Please Input the pretrained model ***.pth')
        return

    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=0.01,
                                    momentum=0.9)
    elif args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=20,
                                                gamma=0.5)
    global_epoch = 0
    global_step = 0
    best_tst_accuracy = 0.0
    blue = lambda x: '\033[94m' + x + '\033[0m'
    '''TestING'''
    logger.info('Start testing...')

    scheduler.step()

    acc = test(classifier.eval(), testDataLoader)

    print('\r Test %s: %f' % (blue('Accuracy'), acc))
    logger.info('Test Accuracy: %f', acc)

    logger.info('End of testing...')
Exemple #14
0
def main(args):
    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    # datapath = './data/ModelNet/'
    datapath = './data/objecnn20_data_hdf5_2048/'
    if args.rotation is not None:
        ROTATION = (int(args.rotation[0:2]), int(args.rotation[3:5]))
    else:
        ROTATION = None
    '''CREATE DIR'''
    experiment_dir = Path('./experiment/')
    experiment_dir.mkdir(exist_ok=True)
    file_dir = Path(
        str(experiment_dir) + '/Test_%sObjectNNClf-' % args.model_name +
        str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')))
    file_dir.mkdir(exist_ok=True)
    checkpoints_dir = file_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = file_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger(args.model_name)
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler(
        str(log_dir) + '/test_%s_ObjectNNClf.txt' % args.model_name)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    logger.info(
        '---------------------------------------------------Test---------------------------------------------------'
    )
    logger.info('PARAMETER ...')
    logger.info(args)
    '''DATA LOADING'''
    logger.info('Load dataset ...')
    database_data, database_label, query_data, query_label = load_data(
        datapath, classification=True)

    print(">>>>>>>>>database_data:", database_data.shape)
    print(">>>>>>>>>query_data:", query_data.shape)

    logger.info("The number of database_data data is: %d",
                database_data.shape[0])
    logger.info("The number of query_data data is: %d", query_data.shape[0])

    ###################### 加载 database 和 query ######################
    databaseDataset = TestQueryObjectNNDataLoader(database_data,
                                                  database_label,
                                                  rotation=ROTATION)
    if ROTATION is not None:
        print('The range of training rotation is', ROTATION)
    queryDataset = TestQueryObjectNNDataLoader(query_data,
                                               query_label,
                                               rotation=ROTATION)

    databaseDataLoader = torch.utils.data.DataLoader(databaseDataset,
                                                     batch_size=args.batchsize,
                                                     shuffle=False)
    queryDataLoader = torch.utils.data.DataLoader(queryDataset,
                                                  batch_size=args.batchsize,
                                                  shuffle=False)  # 不打乱
    '''MODEL LOADING'''
    num_class = 20
    ###################### PointNetCls ######################
    classifier = PointNetCls(num_class, args.feature_transform).cuda(
    ) if args.model_name == 'pointnet' else PointNet2ClsMsg().cuda()
    # classifier = PointNetCls(num_class,args.feature_transform) if args.model_name == 'pointnet' else PointNet2ClsMsg()

    if args.pretrain is not None:
        print('Use pretrain model...')
        logger.info('Use pretrain model')
        checkpoint = torch.load(args.pretrain)
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
    else:
        print('Please Input the pretrained model ***.pth')
        return

    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=0.01,
                                    momentum=0.9)
    elif args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=20,
                                                gamma=0.5)
    global_epoch = 0
    global_step = 0
    best_tst_accuracy = 0.0
    blue = lambda x: '\033[94m' + x + '\033[0m'
    '''QueryING'''
    logger.info('Start query...')

    scheduler.step()

    ###################### Query ######################
    classifier.eval()

    _acc, database_feature_martix = getGlobalFeature('database',
                                                     classifier.eval(),
                                                     databaseDataLoader)

    _acc, query_feature_martix = getGlobalFeature('query', classifier.eval(),
                                                  queryDataLoader)