optimizer = optim.SGD(classifier.parameters(), lr=opt.lr, momentum=opt.momentum)
classifier.cuda()

num_batch = len(dataset)/opt.batchSize

for epoch in range(opt.nepoch):
    for i, data in enumerate(dataloader, 0):
        points, target = data
        points, target = Variable(points), Variable(target[:,0])
        points = points.transpose(2,1)
        # sys.exit(0)
        points, target = points.cuda(), target.cuda()
        optimizer.zero_grad()
        classifier = classifier.train()
        pred = classifier(points)
        loss = F.nll_loss(pred, target)
        loss.backward()
        optimizer.step()
        pred_choice = pred.data.max(1)[1]
        correct = pred_choice.eq(target.data).cpu().sum()
        print('[%d: %d/%d] train loss: %f accuracy: %f' %(epoch, i, num_batch, loss.item(),correct.item() / float(opt.batchSize)))

        if i % 10 == 0:
            j, data = next(enumerate(testdataloader, 0))
            points, target = data
            points, target = Variable(points), Variable(target[:,0])
            points = points.transpose(2,1)
            points, target = points.cuda(), target.cuda()
            classifier = classifier.eval()
Exemplo n.º 2
0
def train(config):
    print('Random seed: %d' % int(config.seed))
    torch.manual_seed(config.seed)
    
    torch.backends.cudnn.benchmark = True

    dset = config.dataset
    if dset == 'modelnet10' or dset == 'modelnet40':
        dataset = ClsDataset(root=config.root, npoints=config.npoints, train=True)
        test_dataset = ClsDataset(root=config.root, npoints=config.npoints, train=False)
    else:
        raise NotImplementedError('Dataset not supported.')
    
    print('Selected %s' % dset)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=config.batchsize, shuffle=True, 
                num_workers=config.workers)
    test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=config.batchsize, shuffle=True, 
        num_workers=config.workers)

    num_classes = dataset.num_classes
    print('number of classes: %d' % num_classes)
    print('train set size: %d | test set size: %d' % (len(dataset), len(test_dataset)))
    try:
        os.makedirs(config.outf)
    except:
        pass

    blue = lambda x: '\033[94m' + x + '\033[0m'
    yellow = lambda x: '\033[93m' + x + '\033[0m'
    red = lambda x: '\033[91m' + x + '\033[0m'

    classifier = PointNetCls(k=num_classes)

    if config.model != '':
        classifier.load_state_dict(torch.load(config.model))

    optimizer = optim.SGD(classifier.parameters(), lr=config.lr, momentum=config.momentum)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    classifier.to(device)
    if config.mgpu:
        classifier = torch.nn.DataParallel(classifier, device_ids=config.gpuids)

    num_batch = len(dataset) / config.batchsize

    lera.log_hyperparams({
        'title': dset, 
        'batchsize': config.batchsize, 
        'epochs': config.nepochs, 
        'npoints': config.npoints, 
        'optimizer': 'SGD', 
        'lr': config.lr, 
        })

    for epoch in range(config.nepochs):
        train_acc_epoch, test_acc_epoch = [], []
        for i, data in enumerate(dataloader):
            points, labels = data
            points = points.transpose(2, 1)
            labels = labels[:, 0]
            points, labels = points.to(device), labels.to(device)
            optimizer.zero_grad()
            classifier = classifier.train()
            pred, _ = classifier(points)
            pred = pred.view(-1, num_classes)
            # print(pred.size(), labels.size())
            loss = F.nll_loss(pred, labels)
            loss.backward()
            optimizer.step()
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(labels.data).cpu().sum()
            train_acc = correct.item() / float(config.batchsize)
            print('epoch %d: %d/%d | train loss: %f | train acc: %f' % (epoch+1, i+1, num_batch+1, loss.item(), train_acc))
            train_acc_epoch.append(train_acc)
            lera.log({
                'train loss': loss.item(), 
                'train acc': train_acc
                })

            if (i+1) % 10 == 0:
                j, data = next(enumerate(test_dataloader, 0))
                points, labels = data
                points = points.transpose(2, 1)
                labels = labels[:, 0]
                points, labels = points.to(device), labels.to(device)
                classifier = classifier.eval()
                with torch.no_grad():
                    pred, _ = classifier(points)
                pred = pred.view(-1, num_classes)
                loss = F.nll_loss(pred, labels)
                pred_choice = pred.data.max(1)[1]
                correct = pred_choice.eq(labels.data).cpu().sum()
                test_acc = correct.item() / float(config.batchsize)
                print(blue('epoch %d: %d/%d | test loss: %f | test acc: %f') % (epoch+1, i+1, num_batch+1, loss.item(), test_acc))
                test_acc_epoch.append(test_acc)
                lera.log({
                    'test loss': loss.item(), 
                    'test acc': test_acc
                    })
        print(yellow('epoch %d | mean train acc: %f') % (epoch+1, np.mean(train_acc_epoch)))
        print(red('epoch %d | mean test acc: %f') % (epoch+1, np.mean(test_acc_epoch)))
        lera.log({
            'train acc epoch': np.mean(train_acc_epoch), 
            'test acc epoch': np.mean(test_acc_epoch)})
        torch.save(classifier.state_dict(), '%s/%s_model_%d.pth' % (config.outf, config.dataset, epoch))
Exemplo n.º 3
0
Arquivo: main.py Projeto: yuchongY/ddn
def main():
    # Download dataset for point cloud classification
    modelnet_dir = 'modelnet40_ply_hdf5_2048'
    BASE_DIR = os.path.dirname(os.path.abspath(__file__))
    sys.path.append(BASE_DIR)
    DATA_DIR = os.path.join(BASE_DIR, 'data')
    if not os.path.exists(DATA_DIR):
        os.mkdir(DATA_DIR)
    if not os.path.exists(os.path.join(DATA_DIR, modelnet_dir)):
        www = 'https://shapenet.cs.stanford.edu/media/' + modelnet_dir + '.zip'
        zipfile = os.path.basename(www)
        os.system('wget %s; unzip %s' % (www, zipfile))
        os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
        os.system('rm %s' % (zipfile))

    datapath = './data/' + modelnet_dir + '/'

    args = parse_args()

    if args.robust_type == 'Q':
        type_string = 'quadratic'
        outlier_string = 'outliers_' + str(args.outlier_fraction)
    elif args.robust_type == 'PH':
        type_string = 'pseudohuber'
        outlier_string = 'outliers_' + str(args.outlier_fraction)
    elif args.robust_type == 'H':
        type_string = 'huber'
        outlier_string = 'outliers_' + str(args.outlier_fraction)
    elif args.robust_type == 'W':
        type_string = 'welsch'
        outlier_string = 'outliers_' + str(args.outlier_fraction)
    elif args.robust_type == 'TQ':
        type_string = 'truncatedquadratic'
        outlier_string = 'outliers_' + str(args.outlier_fraction)
    else:
        type_string = 'max'
        outlier_string = 'outliers_' + str(args.outlier_fraction)

    if args.rotation is not None:
        ROTATION = (int(args.rotation[0:2]), int(args.rotation[3:5]))
    else:
        ROTATION = None
    '''CREATE DIRS'''
    experiment_dir = Path('./tests/')
    if not experiment_dir.exists():
        experiment_dir.mkdir()
    type_dir = Path(str(experiment_dir) + '/' + type_string + '/')
    if not type_dir.exists():
        type_dir.mkdir()
    outlier_dir = Path(str(type_dir) + '/' + outlier_string + '/')
    if not outlier_dir.exists():
        outlier_dir.mkdir()
    checkpoints_dir = outlier_dir
    '''LOG'''
    logger = logging.getLogger("PointNet")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler(
        str(checkpoints_dir) + '/' + 'train_%s_' % args.model_name +
        str(datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')) + '.txt')
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    logger.info(
        '---------------------------------------------------TRAINING---------------------------------------------------'
    )
    logger.info('PARAMETER ...')
    logger.info(args)
    '''DATA LOADING'''
    logger.info('Load dataset ...')
    train_data, train_label, test_data, test_label = load_data(
        datapath, classification=True)
    logger.info("The number of training data is: %d", train_data.shape[0])
    logger.info("The number of test data is: %d", test_data.shape[0])

    ## Replace a fraction of the points with outliers drawn uniformly from the unit sphere
    if args.outlier_fraction > 0.0:
        # Training set
        num_outliers = int(args.outlier_fraction * train_data.shape[1])
        print('Number of training set outliers per point cloud: {}'.format(
            num_outliers))
        for i in range(
                train_data.shape[0]):  # For each point cloud in the batch
            random_indices = np.random.choice(train_data.shape[1],
                                              num_outliers,
                                              replace=False)
            for j in range(num_outliers):  # For each point in outlier subset
                random_point = 2.0 * np.random.rand(3) - 1.0
                # Ensure outliers are within unit sphere:
                while np.linalg.norm(random_point) > 1.0:
                    random_point = 2.0 * np.random.rand(3) - 1.0
                train_data[i, random_indices[
                    j], :] = random_point  # Make an outlier, uniform distribution in [-1,1]^3
        # Testing set
        num_outliers = int(args.outlier_fraction * test_data.shape[1])
        print('Number of test set outliers per point cloud: {}'.format(
            num_outliers))
        for i in range(
                test_data.shape[0]):  # For each point cloud in the batch
            random_indices = np.random.choice(test_data.shape[1],
                                              num_outliers,
                                              replace=False)
            for j in range(num_outliers):  # For each point in outlier subset
                random_point = 2.0 * np.random.rand(3) - 1.0
                # Ensure outliers are within unit sphere:
                while np.linalg.norm(random_point) > 1.0:
                    random_point = 2.0 * np.random.rand(3) - 1.0
                test_data[i, random_indices[
                    j], :] = random_point  # Make an outlier, uniform distribution in [-1,1]^3

    trainDataset = ModelNetDataLoader(train_data,
                                      train_label,
                                      rotation=ROTATION)
    if ROTATION is not None:
        print('The range of training rotation is', ROTATION)
    testDataset = ModelNetDataLoader(test_data, test_label, rotation=ROTATION)
    trainDataLoader = torch.utils.data.DataLoader(trainDataset,
                                                  batch_size=args.batchsize,
                                                  shuffle=True)
    testDataLoader = torch.utils.data.DataLoader(testDataset,
                                                 batch_size=args.batchsize,
                                                 shuffle=False)
    '''MODEL LOADING'''
    num_class = 40
    classifier = PointNetCls(num_class, args.input_transform,
                             args.feature_transform, args.robust_type,
                             args.alpha).cuda()
    if args.pretrain is not None:
        print('Use pretrain model...')
        logger.info('Use pretrain model')
        checkpoint = torch.load(args.pretrain)
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
    else:
        print('No existing model, starting training from scratch...')
        start_epoch = 0

    if args.evaluate:
        acc, map, _ = test(classifier, testDataLoader, do_map=True)
        logger.info('Test Accuracy: %f', acc)
        logger.info('mAP: %f', map)
        logger.info('%f,%f' % (acc, map))
        print('Test Accuracy:\n%f' % acc)
        print('mAP:\n%f' % map)
        # print('%f,%f'%(acc, map))
        return

    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=0.01,
                                    momentum=0.9)
    elif args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=20,
                                                gamma=0.5)
    global_epoch = 0
    global_step = 0
    best_tst_accuracy = 0.0
    blue = lambda x: '\033[94m' + x + '\033[0m'
    '''TRAINING'''
    logger.info('Start training...')
    for epoch in range(start_epoch, args.epoch):
        print('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))
        logger.info('Epoch %d (%d/%s):', global_epoch + 1, epoch + 1,
                    args.epoch)

        scheduler.step()
        for batch_id, data in tqdm(enumerate(trainDataLoader, 0),
                                   total=len(trainDataLoader),
                                   smoothing=0.9):
            points, target = data
            target = target[:, 0]
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()
            classifier = classifier.train()
            pred, trans_feat = classifier(points)
            loss = F.nll_loss(pred, target.long())
            if args.feature_transform and args.model_name == 'pointnet':
                loss += feature_transform_regularizer(trans_feat) * 0.001
            loss.backward()
            optimizer.step()
            global_step += 1

        train_acc = test(classifier.eval(),
                         trainDataLoader) if args.train_metric else None
        acc, map, _ = test(classifier, testDataLoader, do_map=True)

        print('\r Loss: %f' % loss.data)
        logger.info('Loss: %f', loss.data)
        if args.train_metric:
            print('Train Accuracy: %f' % train_acc)
            logger.info('Train Accuracy: %f', (train_acc))
        logger.info('Test Accuracy: %f', acc)
        logger.info('Test mAP: %f', map)
        print('\r Test %s: %f' % (blue('Accuracy'), acc))
        print('\r Test %s: %f' % (blue('mAP'), map))
        if args.train_metric:
            logger.info('%f,%f,%f' % (train_acc, acc, map))
            print('\r%f,%f,%f' % (train_acc, acc, map))
        else:
            logger.info('%f,%f' % (acc, map))
            print('\r%f,%f' % (acc, map))

        if (acc >= best_tst_accuracy):
            best_tst_accuracy = acc
        # Save every 10
        if (epoch + 1) % 10 == 0:
            logger.info('Save model...')
            save_checkpoint(global_epoch + 1,
                            train_acc if args.train_metric else 0.0,
                            acc, map, classifier, optimizer,
                            str(checkpoints_dir), args.model_name)
            print('Saving model....')
        global_epoch += 1
    print('Best Accuracy: %f' % best_tst_accuracy)

    logger.info('Save final model...')
    save_checkpoint(global_epoch, train_acc if args.train_metric else 0.0, acc,
                    map, classifier, optimizer, str(checkpoints_dir),
                    args.model_name)
    print('Saving final model....')

    logger.info('End of training...')
    classifier.load_state_dict(torch.load(opt.model))


optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
classifier.cuda()

num_batch = len(dataset)/opt.batchSize

for epoch in range(opt.nepoch):
    for i, data in enumerate(dataloader, 0):
        points, target = data
        points, target = Variable(points), Variable(target[:,0])
        points = points.transpose(2,1)
        points, target = points.cuda(), target.cuda()
        optimizer.zero_grad()
        classifier = classifier.train()
        pred, _ = classifier(points)
        loss = F.nll_loss(pred, target)
        loss.backward()
        optimizer.step()
        pred_choice = pred.data.max(1)[1]
        correct = pred_choice.eq(target.data).cpu().sum()
        print('[%d: %d/%d] train loss: %f accuracy: %f' %(epoch, i, num_batch, loss.item(),correct.item() / float(opt.batchSize)))

        if i % 10 == 0:
            j, data = next(enumerate(testdataloader, 0))
            points, target = data
            points, target = Variable(points), Variable(target[:,0])
            points = points.transpose(2,1)
            points, target = points.cuda(), target.cuda()
            classifier = classifier.eval()
Exemplo n.º 5
0
plots_dir = os.path.join(model_dir, 'plots')
if not os.path.exists(model_dir):
    os.makedirs(model_dir)
if not os.path.exists(plots_dir):
    os.makedirs(plots_dir)

for epoch in range(opt.nepoch):
    epoch_train_loss = 0
    epoch_test_loss = 0
    test_batch_num = 0

    lr_scheduler.step()

    total_train_correct = 0
    total_train_loss = 0
    classifier.train()
    for i, data in enumerate(dataloader, 0):
        points, target = data
        points, target = points.to(device, non_blocking=True), target[:, 0].to(
            device, non_blocking=True)
        points = points.transpose(2, 1)
        optimizer.zero_grad()
        pred, _ = classifier(points)
        loss = F.nll_loss(pred, target.long())
        epoch_train_loss += loss.item()
        loss.backward()
        optimizer.step()
        pred_choice = pred.data.max(1)[1]
        correct = pred_choice.eq(target.long().data).cpu().sum()
        total_train_correct += correct.item()
        total_train_loss += loss.item()
Exemplo n.º 6
0
# define something about training...
mynet = PointNetCls()
optimizer = torch.optim.Adam(mynet.parameters(), lr=LR)

scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.8)
loss_func = torch.nn.MSELoss()

# train
myepoch = tqdm(range(1, 500))
for epoch in myepoch:
    loss_list = []
    valid_loss_list = []
    for step, (features, targets) in enumerate(train_loader):
        mynet.cuda()
        mynet.train()
        features = features.transpose(2, 1)
        features, targets = features.cuda(), targets.cuda()
        predicted_targets, feature_transform_matrix = mynet(features)

        loss = loss_func(targets, predicted_targets)
        loss = (loss +
                mynet.feature_transform_regularizer(feature_transform_matrix) *
                0.001)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        loss_list.append(loss.cpu().data.numpy())

    ave_loss = np.array(loss_list).mean()
    writer.add_scalar("loss", ave_loss, epoch)