Example #1
0
def train(net, trainloader, optimizer, criterion, device):
    net.train()
    train_loss = 0
    correct = 0
    total = 0
    time_cost = datetime.datetime.now()
    for batch_idx, (points, targets) in enumerate(trainloader):
        points = points.data.numpy()
        points = provider.random_point_dropout(points)
        points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :, 0:3])
        points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
        points = torch.Tensor(points)
        points = points.transpose(2, 1)
        points, targets = points.to(device), targets.to(device).long()
        optimizer.zero_grad()
        out = net(points)
        loss = criterion(out, targets)
        loss.backward()
        optimizer.step()
        train_loss += loss.item()
        _, predicted = out["logits"].max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()

        progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
                     % (train_loss / (batch_idx + 1), 100. * correct / total, correct, total))

    time_cost = int((datetime.datetime.now() - time_cost).total_seconds())
    return {
        "loss": float("%.3f" % (train_loss / (batch_idx + 1))),
        "acc": float("%.3f" % (100. * correct / total)),
        "time": time_cost
    }
Example #2
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))  # 每一次的5个train文件的顺序都是不一样的
    np.random.shuffle(train_file_idxs)

    for fn in range(len(TRAIN_FILES)):  # 对每一个train文件
        log_string('----train file' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(
            TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:, 0:
                                    NUM_POINT, :]  # 采样1024个点,current代表这个文件中所有的点云
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label))  # 每个点云之间的顺序被打乱
        current_label = np.squeeze(current_label)  # 移除数组中单一维度的数据

        file_size = current_data.shape[0]  # 点云的总数
        num_batches = file_size // BATCH_SIZE  # 需要几个batch

        total_correct = 0
        total_seen = 0
        loss_sum = 0

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(
                current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)
            jittered_data = provider.random_scale_point_cloud(jittered_data)
            jittered_data = provider.rotate_perturbation_point_cloud(
                jittered_data)
            jittered_data = provider.shift_point_cloud(jittered_data)

            feed_dict = {
                ops['pointclouds_pl']: jittered_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
            }  # feed_dict的key一定是place_holder
            summary, step, _, loss_val, pred_val = sess.run(
                [
                    ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                    ops['pred']
                ],
                feed_dict=feed_dict)
            print("--train file:{}, batch_idx:{},step:{}".format(
                str(fn), str(batch_idx), str(step)))
            train_writer.add_summary(summary, step)  # 只有train才保存训练过程的曲线
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val

        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
Example #3
0
 def _augment_batch_data(self, batch_data):
     jittered_data = provider.random_scale_point_cloud(batch_data[:, :,
                                                                  0:3])
     jittered_data = provider.shift_point_cloud(jittered_data)
     jittered_data = provider.jitter_point_cloud(jittered_data)
     batch_data[:, :, 0:3] = jittered_data
     return provider.shuffle_points(batch_data)
 def _augment_batch_data(self, batch_data):
     rotated_data = provider.rotate_point_cloud(batch_data)
     rotated_data = provider.rotate_perturbation_point_cloud(rotated_data)
     jittered_data = provider.random_scale_point_cloud(rotated_data[:,:,0:3])
     jittered_data = provider.shift_point_cloud(jittered_data)
     jittered_data = provider.jitter_point_cloud(jittered_data)
     rotated_data[:,:,0:3] = jittered_data
     return provider.shuffle_points(rotated_data)
Example #5
0
def augment_batch_data(batch_data):
    rotated_data = provider.rotate_point_cloud_with_normal(batch_data)
    jittered_data = provider.random_scale_point_cloud(rotated_data[:, :, 0:3])
    jittered_data = provider.rotate_perturbation_point_cloud(jittered_data)
    jittered_data = provider.shift_point_cloud(jittered_data)
    jittered_data = provider.jitter_point_cloud(jittered_data)
    rotated_data[:, :, 0:3] = jittered_data
    return rotated_data
Example #6
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    is_training = False

    # Make sure batch data is of same size
    cur_batch_data = np.zeros((BATCH_SIZE,NUM_POINT,3))
    cur_batch_normals = np.zeros((BATCH_SIZE,NUM_POINT,3))
    cur_batch_label = np.zeros((BATCH_SIZE), dtype=np.int32)

    total_correct = 0
    total_seen = 0
    loss_sum = 0
    batch_idx = 0
    shape_ious = []
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]

    while TEST_DATASET.has_next_batch():
        batch_data, batch_label = TEST_DATASET.next_batch(augment=False)
        bsize = batch_data.shape[0]

        print('Batch: %03d, batch size: %d'%(batch_idx, bsize))

        batch_pred_sum = np.zeros((BATCH_SIZE, NUM_CLASSES)) # score for classes
        cur_batch_label[0:bsize] = batch_label

        for vote_idx in range(num_votes):
            original_data = np.copy(batch_data)
            jittered_data = provider.random_scale_point_cloud(original_data[:,:,:3])
            original_data[:,:,:3] = jittered_data
            shuffled_data = provider.shuffle_points(original_data)

            cur_batch_data[0:bsize,...] = shuffled_data[:,:,:3]
            cur_batch_normals[0:bsize,...] = shuffled_data[:,:,3:]

            feed_dict = {ops['pointclouds_pl']: cur_batch_data,
                         ops['labels_pl']: cur_batch_label,
                         ops['normals_pl']: cur_batch_normals,
                         ops['is_training_pl']: is_training}
            loss_val, pred_val = sess.run([ops['loss'], ops['pred']], feed_dict=feed_dict)
            batch_pred_sum += pred_val
        pred_val = np.argmax(batch_pred_sum, 1)
        correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize])
        total_correct += correct
        total_seen += bsize
        loss_sum += loss_val
        batch_idx += 1
        for i in range(bsize):
            l = batch_label[i]
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_val[i] == l)

    log_string('eval mean loss: %f' % (loss_sum / float(batch_idx)))
    log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))

    class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
Example #7
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)

    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(
            TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        total_correct = 0
        total_seen = 0
        loss_sum = 0

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(
                current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)
            jittered_data = provider.random_scale_point_cloud(jittered_data)
            jittered_data = provider.rotate_perturbation_point_cloud(
                jittered_data)
            jittered_data = provider.shift_point_cloud(jittered_data)

            feed_dict = {
                ops['pointclouds_pl']: jittered_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
            }
            summary, step, _, loss_val, pred_val = sess.run(
                [
                    ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                    ops['pred']
                ],
                feed_dict=feed_dict)
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val

        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
 def _augment_batch_data(self, batch_data):
     if self.normal_channel:
         rotated_data = provider.rotate_point_cloud_with_normal(batch_data)
         rotated_data = provider.rotate_perturbation_point_cloud_with_normal(rotated_data)
     else:
         rotated_data = provider.rotate_point_cloud(batch_data)
         rotated_data = provider.rotate_perturbation_point_cloud(rotated_data)
     jittered_data = provider.random_scale_point_cloud(rotated_data[:,:,0:3])
     jittered_data = provider.shift_point_cloud(jittered_data)
     jittered_data = provider.jitter_point_cloud(jittered_data)
     rotated_data[:,:,0:3] = jittered_data
     return provider.shuffle_points(rotated_data)
Example #9
0
 def _augment_batch_data(self, batch_data):
     if self.normal_channel:
         rotated_data = provider.rotate_point_cloud_with_normal(batch_data)
         rotated_data = provider.rotate_perturbation_point_cloud_with_normal(rotated_data)
     else:
         rotated_data = provider.rotate_point_cloud(batch_data)
         rotated_data = provider.rotate_perturbation_point_cloud(rotated_data)
 
     jittered_data = provider.random_scale_point_cloud(rotated_data[:,:,0:3])
     jittered_data = provider.shift_point_cloud(jittered_data)
     jittered_data = provider.jitter_point_cloud(jittered_data)
     rotated_data[:,:,0:3] = jittered_data
     return provider.shuffle_points(rotated_data)
Example #10
0
def train_init_class(classifier, criterion, trainDataLoader, num_classes,
                     num_part):
    """ Pre-train the classifier layer using logistic regression """
    optim = torch.optim.SGD(classifier.conv2.parameters(),
                            lr=0.1,
                            momentum=0.5)
    num_epoch = 500

    for epoch in range(num_epoch):
        print('Init Classifier: Epoch (%d/%d):' % (epoch + 1, num_epoch))
        mean_correct = []
        mean_loss = []
        for batch_id, (points, label, target) in enumerate(trainDataLoader):
            cur_batch_size, NUM_POINT, _ = points.size()
            points = points.data.numpy()
            points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :,
                                                                         0:3])
            points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
            points = torch.Tensor(points)
            points, label, target = points.float().cuda(), label.long().cuda(
            ), target.long().cuda()
            points = points.transpose(2, 1)
            optim.zero_grad()

            classifier = classifier.eval()  # batch stats aren't updated
            '''applying supervised cross-entropy loss'''
            seg_pred, trans_feat, feat = classifier(
                points, to_categorical(label, num_classes))
            seg_pred = seg_pred.contiguous().view(-1, num_part)
            target = target.view(-1, 1)[:, 0]
            pred_choice = seg_pred.data.max(1)[1]
            correct = pred_choice.eq(target.data).cpu().sum()

            mean_correct.append(correct.item() / (cur_batch_size * NUM_POINT))

            loss = criterion(seg_pred, target, trans_feat)
            loss.backward()
            optim.step()
            mean_loss.append(loss.item())
            print('classifier: batch (%d/%s) Loss: %f Acc:%f' %
                  (batch_id, len(trainDataLoader), loss.item(),
                   mean_correct[-1]))

        log_value('init_cls_loss', np.mean(mean_loss), epoch)
        log_value('init_cls_acc', np.mean(mean_correct), epoch)

        # print('Epoch: %d Accuracy: %f' % (epoch+1, np.mean(mean_correct)))
    classifier = classifier.train()

    return classifier
Example #11
0
 def _augment_batch_data(self, batch_data, augment, rotate=0):
     if augment:
         #augment points
         jittered_data = provider.random_scale_point_cloud(batch_data[:, :,
                                                                      0:3])
         jittered_data = provider.shift_point_cloud(jittered_data)
         jittered_data = provider.jitter_point_cloud(jittered_data)
         batch_data[:, :, 0:3] = jittered_data
     if rotate == 2:
         #rotated points and normal
         batch_data = provider.rotate_point_cloud_with_normal(batch_data)
     elif rotate == 3:
         batch_data = provider.rotate_perturbation_point_cloud_with_normal(
             batch_data)
     return provider.shuffle_points(batch_data)
Example #12
0
    def _augment_batch_data(self, batch_data, batch_normal, batch_seg,
                            batch_direc):
        batch_data_nor = np.concatenate([batch_data, batch_normal], axis=2)
        rotated_data, rotated_dir = batch_data_nor, batch_direc

        jittered_data = provider.random_scale_point_cloud(rotated_data[:, :,
                                                                       0:3])
        jittered_data = provider.jitter_point_cloud(jittered_data)
        rotated_data[:, :, 0:3] = jittered_data
        idx = np.arange(batch_data.shape[1])
        return rotated_data[:, idx, 0:
                            3], rotated_data[:, idx, 3:
                                             6], batch_seg[:,
                                                           idx], rotated_dir[:,
                                                                             idx, :]
Example #13
0
def train(net, opt, scheduler, train_loader, dev):

    net.train()

    total_loss = 0
    num_batches = 0
    total_correct = 0
    count = 0
    loss_f = nn.CrossEntropyLoss()
    start_time = time.time()
    with tqdm.tqdm(train_loader, ascii=True) as tq:
        for data, label in tq:
            data = data.data.numpy()
            data = provider.random_point_dropout(data)
            data[:, :, 0:3] = provider.random_scale_point_cloud(data[:, :,
                                                                     0:3])
            data[:, :, 0:3] = provider.jitter_point_cloud(data[:, :, 0:3])
            data[:, :, 0:3] = provider.shift_point_cloud(data[:, :, 0:3])
            data = torch.tensor(data)
            label = label[:, 0]

            num_examples = label.shape[0]
            data, label = data.to(dev), label.to(dev).squeeze().long()
            opt.zero_grad()
            logits = net(data)
            loss = loss_f(logits, label)
            loss.backward()
            opt.step()

            _, preds = logits.max(1)

            num_batches += 1
            count += num_examples
            loss = loss.item()
            correct = (preds == label).sum().item()
            total_loss += loss
            total_correct += correct

            tq.set_postfix({
                'AvgLoss': '%.5f' % (total_loss / num_batches),
                'AvgAcc': '%.5f' % (total_correct / count)
            })
    print("[Train] AvgLoss: {:.5}, AvgAcc: {:.5}, Time: {:.5}s".format(
        total_loss / num_batches, total_correct / count,
        time.time() - start_time))
    scheduler.step()
Example #14
0
def augment_batch_data_MODELNET(batch_data, is_include_normal):
    '''
    is_include_normal=False: xyz
    is_include_normal=True: xyznxnynz
    '''
    if is_include_normal:
        rotated_data = provider.rotate_point_cloud_with_normal(batch_data)
        rotated_data = provider.rotate_perturbation_point_cloud_with_normal(
            rotated_data)
    else:
        rotated_data = provider.rotate_point_cloud(batch_data)
        rotated_data = provider.rotate_perturbation_point_cloud(rotated_data)

    jittered_data = provider.random_scale_point_cloud(rotated_data[:, :, 0:3])
    jittered_data = provider.shift_point_cloud(jittered_data)
    jittered_data = provider.jitter_point_cloud(jittered_data)
    rotated_data[:, :, 0:3] = jittered_data
    return provider.shuffle_points(rotated_data)
Example #15
0
 def aug_data_batch(self,
                    data_batch,
                    scale_low=0.8,
                    scale_high=1.25,
                    rot=True,
                    snap2ground=True,
                    trans=0.1):
     res_batch = data_batch
     if True:
         res_batch = provider.random_scale_point_cloud(
             data_batch, scale_low=scale_low, scale_high=scale_high)
     if rot:
         res_batch = provider.rotate_point_cloud(res_batch)
     if trans is not None:
         res_batch = provider.shift_point_cloud(res_batch,
                                                shift_range=trans)
     if snap2ground:
         res_batch = provider.lift_point_cloud_to_ground(res_batch)
     return res_batch
Example #16
0
 def get_example(self, i):
     """Return i-th data"""
     if self.augment:
         rotated_data = provider.rotate_point_cloud(self.data[i:i +
                                                              1, :, :])
         jittered_data = provider.jitter_point_cloud(rotated_data)
         jittered_data = provider.random_scale_point_cloud(jittered_data)
         jittered_data = provider.rotate_perturbation_point_cloud(
             jittered_data)
         jittered_data = provider.shift_point_cloud(jittered_data)
         point_data = jittered_data[0]
     else:
         point_data = self.data[i]
     # pint_data (2048, 3): (num_point, k) --> convert to (k, num_point, 1)
     point_data = np.transpose(point_data.astype(np.float32), (1, 0))[:, :,
                                                                      None]
     assert point_data.dtype == np.float32
     assert self.label[i].dtype == np.int32
     return point_data, self.label[i]
Example #17
0
def get_features_from_encoder(encoder, loader):
    raw_model = pointnet2_cls_msg_raw.get_model(num_class=40,
                                                normal_channel=True).cuda()
    x_train = []
    y_train = []
    print(type(loader))
    # get the features from the pre-trained model
    for batch_id, data in tqdm(enumerate(loader, 0),
                               total=len(loader),
                               smoothing=0.9):
        points, target = data
        points = points.data.numpy()
        points = provider.random_point_dropout(points)
        points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :,
                                                                     0:3])
        points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
        points = torch.Tensor(points)
        target = target[:, 0]
        points = points.transpose(2, 1)
        points, target = points.cuda(), target.cuda()
        with torch.no_grad():
            raw_feature, raw_cls = raw_model(points)
            feature_vector, cls = encoder(points)
            feature_vector = torch.cat((raw_feature, feature_vector), 1)
            #这里要用extend,append会把[12*128]一块放进去
            x_train.extend(feature_vector.cpu().numpy())
            y_train.extend(target.cpu().numpy())
    x_train = np.array(x_train)
    y_train = torch.tensor(y_train)
    print("success feature")

    # for i, (x,y) in enumerate(loader):
    #     # i=i.to(device)
    #     # x=x.to(device)
    #     # y=y.to(device)
    #     x1=torch.tensor([item.cpu().detach().numpy() for item in x1]).cuda()
    #     with torch.no_grad():
    #         feature_vector = encoder(x1)
    #         x_train.extend(feature_vector)
    #         y_train.extend(y.numpy())
    return x_train, y_train
Example #18
0
 def __data_generation(self, batch_idx):
     x = np.zeros((self.batch_size, self.npoints, 3))
     y = np.zeros((self.batch_size, ))
     for i, idx in enumerate(batch_idx, 0):
         x[i] = self.datas[
             idx, 0:self.
             npoints, :]  # take the first n points. TODO: random choice
         y[i] = self.labels[idx]
     if self.augment and np.random.rand() > 0.5:
         # implement data augmentation to the whole BATCH
         rotated_x = provider.rotate_point_cloud(x)  # rotate around x-axis
         rotated_x = provider.rotate_perturbation_point_cloud(
             rotated_x)  # slightly rotate around every aixs
         jittered_x = provider.random_scale_point_cloud(
             rotated_x)  # random scale a little bit
         jittered_x = provider.shift_point_cloud(
             jittered_x)  # shift a little
         jittered_x = provider.jitter_point_cloud(
             jittered_x)  # add random noise (jitter)
         jittered_x = provider.shuffle_points(
             jittered_x)  # shuffle the point. for FPS
         x = jittered_x
     return x, keras.utils.to_categorical(y, num_classes=len(self.cat))
Example #19
0
    def _augment_batch_data(self, batch_data):
        if self.normal_channel:
            rotated_data = provider.rotate_point_cloud_with_normal(batch_data)
            rotated_data = provider.rotate_perturbation_point_cloud_with_normal(
                rotated_data)
        else:
            rotated_data = provider.rotate_point_cloud(batch_data)
            rotated_data = provider.rotate_perturbation_point_cloud(
                rotated_data)

        jittered_data = provider.random_scale_point_cloud(
            rotated_data[:, :, 0:3],
            scale_low=self.scale_low,
            scale_high=self.scale_high)
        jittered_data = provider.shift_point_cloud(
            jittered_data, shift_range=self.shift_range)
        jittered_data = provider.jitter_point_cloud(jittered_data,
                                                    sigma=self.jitter_sigma,
                                                    clip=0.1)
        rotated_data[:, :, 0:3] = jittered_data
        if self.shuffle_points:
            return provider.shuffle_points(rotated_data)
        else:
            return rotated_data
def main(args):
    def log_string(str):
        logger.info(str)
        # print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu  # 0号GPU
    '''CREATE DIR'''  # 创建log存放的文件目录及文件夹,存储在log目录下
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
    experiment_dir = Path('./log/')
    experiment_dir.mkdir(exist_ok=True)
    experiment_dir = experiment_dir.joinpath('part_seg')
    experiment_dir.mkdir(exist_ok=True)
    if args.log_dir is None:
        experiment_dir = experiment_dir.joinpath(timestr)
    else:
        experiment_dir = experiment_dir.joinpath(args.log_dir)
    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = experiment_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = experiment_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(
        args
    )  # Namespace(batch_size=4, decay_rate=0.0001, epoch=251, gpu='0', learning_rate=0.001, log_dir='pointnet2_part_seg_msg', lr_decay=0.5, model='pointnet2_part_seg_msg', normal=True, npoint=2048, optimizer='Adam', step_size=20)

    root = 'data/shapenetcore_partanno_segmentation_benchmark_v0_normal/'

    # 开始处理数据集
    # 返回2048个点,并进行正则化   提前已经分配好了哪些是训练集,哪些作为测试集
    TRAIN_DATASET = PartNormalDataset(root=root,
                                      npoints=args.npoint,
                                      split='trainval',
                                      normal_channel=args.normal)

    # 按照batch_size进行组装数据
    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET,
                                                  batch_size=args.batch_size,
                                                  shuffle=True,
                                                  num_workers=4)
    # 测试数据同样处理
    TEST_DATASET = PartNormalDataset(root=root,
                                     npoints=args.npoint,
                                     split='test',
                                     normal_channel=args.normal)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=4)

    log_string("The number of training data is: %d" %
               len(TRAIN_DATASET))  # 训练数据 13998
    log_string("The number of test data is: %d" %
               len(TEST_DATASET))  # 测试数据 2874
    num_classes = 16
    num_part = 50
    '''MODEL LOADING'''
    MODEL = importlib.import_module(args.model)
    # 将模型和工具包都添加到log文件中
    shutil.copy('models/%s.py' % args.model, str(experiment_dir))
    shutil.copy('models/pointnet_util.py', str(experiment_dir))
    # 分类器,进行50分类,对2048个点都要进行分类
    classifier = MODEL.get_model(num_part, normal_channel=args.normal).cuda()
    criterion = MODEL.get_loss().cuda()  # 计算损失函数的方式

    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv2d') != -1:
            torch.nn.init.xavier_normal_(m.weight.data)
            torch.nn.init.constant_(m.bias.data, 0.0)
        elif classname.find('Linear') != -1:
            torch.nn.init.xavier_normal_(m.weight.data)
            torch.nn.init.constant_(m.bias.data, 0.0)

    try:  # 加载预训练的模型
        checkpoint = torch.load(
            str(experiment_dir) + '/checkpoints/best_model.pth')
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
        log_string('Use pretrain model')
    except:
        log_string('No existing model, starting training from scratch...')
        start_epoch = 0
        classifier = classifier.apply(weights_init)

    if args.optimizer == 'Adam':  #TODO 研究这些参数
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    else:
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=args.learning_rate,
                                    momentum=0.9)
    # 依据动量进行调整
    def bn_momentum_adjust(m, momentum):
        if isinstance(m, torch.nn.BatchNorm2d) or isinstance(
                m, torch.nn.BatchNorm1d):
            m.momentum = momentum

    LEARNING_RATE_CLIP = 1e-5
    MOMENTUM_ORIGINAL = 0.1
    MOMENTUM_DECCAY = 0.5
    MOMENTUM_DECCAY_STEP = args.step_size

    best_acc = 0
    global_epoch = 0
    best_class_avg_iou = 0
    best_inctance_avg_iou = 0

    # 开始进行迭代训练
    for epoch in range(start_epoch, args.epoch):
        log_string('Epoch %d (%d/%s):' %
                   (global_epoch + 1, epoch + 1, args.epoch))
        '''Adjust learning rate and BN momentum'''
        lr = max(
            args.learning_rate * (args.lr_decay**(epoch // args.step_size)),
            LEARNING_RATE_CLIP)
        log_string('Learning rate:%f' % lr)
        #TODO:???
        # param_groups 是一个list,里面每一个item都是字典;这项作用是给内部的lr项赋值为param上的lr
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        mean_correct = []
        # 0.1*(0.5^(epoch//20))  每20步,动量减小一次
        momentum = MOMENTUM_ORIGINAL * (MOMENTUM_DECCAY
                                        **(epoch // MOMENTUM_DECCAY_STEP))
        if momentum < 0.01:
            momentum = 0.01
        print('BN momentum updated to: %f' % momentum)  # 0.100000
        classifier = classifier.apply(
            lambda x: bn_momentum_adjust(x, momentum))
        '''learning one epoch'''
        for i, data in tqdm(enumerate(trainDataLoader),
                            total=len(trainDataLoader),
                            smoothing=0.9):
            points, label, target = data
            # print(points.shape) # (4,2048,6)
            # 数据增强:做一些微小扰动
            points = points.data.numpy()
            # print(points.shape)  # (4,2048,6)
            points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :,
                                                                         0:3])
            points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
            points = torch.Tensor(points)
            points, label, target = points.float().cuda(), label.long().cuda(
            ), target.long().cuda()
            # print(points.shape)  # torch.Size([4, 2048, 6])
            # print(label.shape)  # torch.Size([4, 1])   每个样本的对应的种类标签
            # print(target.shape)  # torch.Size([4, 2048])   每个点的类别标签
            points = points.transpose(2, 1)
            # print(points.shape)  # torch.Size([4, 6, 2048])
            optimizer.zero_grad()
            classifier = classifier.train()
            seg_pred, trans_feat = classifier(
                points, to_categorical(label, num_classes)
            )  # seg_pred  torch.Size([4, 2048, 50])   trans_feat:torch.Size([4, 1024, 1]) ???
            seg_pred = seg_pred.contiguous().view(
                -1, num_part)  # torch.Size([8192, 50])
            target = target.view(-1, 1)[:, 0]  # 8192
            pred_choice = seg_pred.data.max(1)[1]  # 8192   预测的结果部件类别
            correct = pred_choice.eq(
                target.data).cpu().sum()  # tensor(249)   即只有249个正确
            mean_correct.append(
                correct.item() /
                (args.batch_size * args.npoint))  # 平均正确率  0.0303955078125
            loss = criterion(seg_pred, target, trans_feat)
            loss.backward()
            optimizer.step()
        train_instance_acc = np.mean(
            mean_correct
        )  # 1个epoch 准确率  mean_correct的list中有 3500个值  13998个样本,一个batch处理4个,共需3500步 step
        log_string('Train accuracy is: %.5f' %
                   train_instance_acc)  # 实例分割 准确率: 0.8502310616629464
        # 进行测试
        with torch.no_grad(
        ):  # 非训练过程,后续的tensor操作,不需要进行计算图的构建(计算过程的构建,以便梯度反向传播等操作),只用来进行测试
            test_metrics = {}
            total_correct = 0
            total_seen = 0
            total_seen_class = [0
                                for _ in range(num_part)]  # num_part个0组成的list
            total_correct_class = [0 for _ in range(num_part)]
            shape_ious = {cat: [] for cat in seg_classes.keys()}
            seg_label_to_cat = {}  # {0:Airplane, 1:Airplane, ...49:Table}
            for cat in seg_classes.keys():
                for label in seg_classes[cat]:  # 每种的部件类别
                    seg_label_to_cat[label] = cat

            for batch_id, (points, label,
                           target) in tqdm(enumerate(testDataLoader),
                                           total=len(testDataLoader),
                                           smoothing=0.9):
                cur_batch_size, NUM_POINT, _ = points.size(
                )  # torch.Size([4, 2048, 6])
                points, label, target = points.float().cuda(), label.long(
                ).cuda(), target.long().cuda()
                points = points.transpose(2, 1)  # torch.Size([4, 6, 2048])
                classifier = classifier.eval()
                seg_pred, _ = classifier(
                    points,
                    to_categorical(label,
                                   num_classes))  # torch.Size([4, 2048, 50])
                cur_pred_val = seg_pred.cpu().data.numpy()  # (4, 2048, 50)
                cur_pred_val_logits = cur_pred_val
                cur_pred_val = np.zeros(
                    (cur_batch_size, NUM_POINT)).astype(np.int32)  # (4, 2048)
                target = target.cpu().data.numpy(
                )  # 部件的类别一个batch中 所有点的类别  (4, 2048)
                for i in range(cur_batch_size):  # 对每个实例样本
                    cat = seg_label_to_cat[target[i, 0]]  # 获取每一个点其所对应的实例类别
                    logits = cur_pred_val_logits[i, :, :]  # (2048, 50)
                    cur_pred_val[i, :] = np.argmax(
                        logits[:, seg_classes[cat]], 1
                    ) + seg_classes[cat][
                        0]  # argmax 取出logits[:, seg_classes[cat]], 1)中元素最大值的索引,
                correct = np.sum(cur_pred_val == target)  # 7200
                total_correct += correct  # 当前正确点的总和
                total_seen += (cur_batch_size * NUM_POINT)  # 当前总的可见点,已经推理过的点
                # 每个部件进行统计
                for l in range(num_part):
                    total_seen_class[l] += np.sum(
                        target == l)  # 每个部件类别总的 需要判断的点
                    total_correct_class[l] += (np.sum((cur_pred_val == l)
                                                      & (target == l))
                                               )  # 每个部件类别正确的点

                for i in range(cur_batch_size):
                    segp = cur_pred_val[i, :]  # (4, 2048)
                    segl = target[i, :]
                    cat = seg_label_to_cat[segl[0]]  # 任意一个部件类别,即可确定一个实例类别
                    part_ious = [0.0 for _ in range(len(seg_classes[cat]))
                                 ]  # 实例类别cat有多少个子类别,生成同尺寸的0.0的列表
                    for l in seg_classes[cat]:
                        if (np.sum(segl == l) == 0) and (
                                np.sum(segp == l) == 0
                        ):  # part is not present, no prediction as well
                            part_ious[l - seg_classes[cat][0]] = 1.0
                        else:
                            part_ious[l - seg_classes[cat][0]] = np.sum(
                                (segl == l) & (segp == l)) / float(
                                    np.sum((segl == l) | (segp == l)))
                    shape_ious[cat].append(np.mean(part_ious))  # 每个样本的平均部件iou

            all_shape_ious = []
            for cat in shape_ious.keys():  # 计算所有shape的部件 实例iou
                for iou in shape_ious[cat]:
                    all_shape_ious.append(iou)
                shape_ious[cat] = np.mean(shape_ious[cat])  # 每个shape的平均实例iou
            mean_shape_ious = np.mean(list(shape_ious.values()))
            test_metrics['accuracy'] = total_correct / float(total_seen)
            test_metrics['class_avg_accuracy'] = np.mean(
                np.array(total_correct_class) /
                np.array(total_seen_class, dtype=np.float))
            for cat in sorted(shape_ious.keys()):
                log_string('eval mIoU of %s %f' %
                           (cat + ' ' * (14 - len(cat)), shape_ious[cat]))
            test_metrics['class_avg_iou'] = mean_shape_ious
            test_metrics['inctance_avg_iou'] = np.mean(all_shape_ious)

        log_string(
            'Epoch %d test Accuracy: %f  Class avg mIOU: %f   Inctance avg mIOU: %f'
            %
            (epoch + 1, test_metrics['accuracy'],
             test_metrics['class_avg_iou'], test_metrics['inctance_avg_iou']))
        if (test_metrics['inctance_avg_iou'] >= best_inctance_avg_iou):
            logger.info('Save model...')
            savepath = str(checkpoints_dir) + '/best_model.pth'
            log_string('Saving at %s' % savepath)
            state = {
                'epoch': epoch,
                'train_acc': train_instance_acc,
                'test_acc': test_metrics['accuracy'],
                'class_avg_iou': test_metrics['class_avg_iou'],
                'inctance_avg_iou': test_metrics['inctance_avg_iou'],
                'model_state_dict': classifier.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }
            torch.save(state, savepath)
            log_string('Saving model....')

        if test_metrics['accuracy'] > best_acc:
            best_acc = test_metrics['accuracy']
        if test_metrics['class_avg_iou'] > best_class_avg_iou:
            best_class_avg_iou = test_metrics['class_avg_iou']
        if test_metrics['inctance_avg_iou'] > best_inctance_avg_iou:
            best_inctance_avg_iou = test_metrics['inctance_avg_iou']
        log_string('Best accuracy is: %.5f' % best_acc)
        log_string('Best class avg mIOU is: %.5f' % best_class_avg_iou)
        log_string('Best inctance avg mIOU is: %.5f' % best_inctance_avg_iou)
        global_epoch += 1
Example #21
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
    np.random.seed(101)
    for fn in range(len(TEST_FILES)):
        log_string('----' + str(fn) + '----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        if FLAGS.random_pc_order:
            current_data = change_point_cloud_order(current_data)
        current_data = current_data[:, 0:NUM_POINT, :]
        current_label = np.squeeze(current_label)
        print(current_data.shape)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        print(file_size)

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE
            cur_batch_size = end_idx - start_idx

            # Aggregating BEG
            batch_loss_sum = 0  # sum of losses for the batch
            batch_pred_sum = np.zeros(
                (cur_batch_size, NUM_CLASSES))  # score for classes
            batch_pred_max = np.ones(
                (cur_batch_size, NUM_CLASSES)) * (-999999)  # score for classes
            batch_pred_classes = np.zeros(
                (cur_batch_size, NUM_CLASSES))  # 0/1 for classes
            for vote_idx in range(num_votes):
                # Shuffle point order to achieve different farthest samplings
                shuffled_indices = np.arange(NUM_POINT)
                np.random.shuffle(shuffled_indices)
                rotated_data = provider.rotate_point_cloud_by_angle(
                    current_data[start_idx:end_idx, shuffled_indices, :],
                    vote_idx / float(num_votes) * np.pi * 2)
                jittered_data = provider.random_scale_point_cloud(rotated_data)
                jittered_data = provider.rotate_perturbation_point_cloud(
                    jittered_data)
                jittered_data = provider.jitter_point_cloud(jittered_data)
                feed_dict = {
                    ops['pointclouds_pl']: rotated_data,
                    ops['labels_pl']: current_label[start_idx:end_idx],
                    ops['is_training_pl']: is_training
                }
                loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                              feed_dict=feed_dict)
                batch_pred_sum += pred_val
                batch_pred_val = np.argmax(pred_val, 1)
                for el_idx in range(cur_batch_size):
                    batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
                batch_loss_sum += (loss_val * cur_batch_size /
                                   float(num_votes))
            # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
            # pred_val = np.argmax(batch_pred_classes, 1)
            pred_val = np.argmax(batch_pred_sum, 1)
            # Aggregating END

            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
            total_correct += correct
            total_seen += cur_batch_size
            loss_sum += batch_loss_sum

            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += (pred_val[i - start_idx] == l)
                fout.write('%d, %d\n' % (pred_val[i - start_idx], l))

                if pred_val[
                        i -
                        start_idx] != l and FLAGS.visu:  # ERROR CASE, DUMP!
                    img_filename = '%d_label_%s_pred_%s.jpg' % (
                        error_cnt, SHAPE_NAMES[l],
                        SHAPE_NAMES[pred_val[i - start_idx]])
                    img_filename = os.path.join(DUMP_DIR, img_filename)
                    output_img = pc_util.point_cloud_three_views(
                        np.squeeze(current_data[i, :, :]))
                    scipy.misc.imsave(img_filename, output_img)
                    error_cnt += 1

    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))

    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    '''CREATE DIR'''
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
    exp_dir = Path('./log/')
    exp_dir.mkdir(exist_ok=True)
    exp_dir = exp_dir.joinpath('part_seg')
    exp_dir.mkdir(exist_ok=True)
    if args.log_dir is None:
        exp_dir = exp_dir.joinpath(timestr)
    else:
        exp_dir = exp_dir.joinpath(args.log_dir)
    exp_dir.mkdir(exist_ok=True)
    checkpoints_dir = exp_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = exp_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)

    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)

    root = 'data/shapenetcore_partanno_segmentation_benchmark_v0_normal/'

    TRAIN_DATASET = PartNormalDataset(root=root, npoints=args.npoint, split='trainval', normal_channel=args.normal)
    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=args.batch_size, shuffle=True, num_workers=10, drop_last=True)
    TEST_DATASET = PartNormalDataset(root=root, npoints=args.npoint, split='test', normal_channel=args.normal)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=10)
    log_string("The number of training data is: %d" % len(TRAIN_DATASET))
    log_string("The number of test data is: %d" % len(TEST_DATASET))

    num_classes = 16
    num_part = 50

    '''MODEL LOADING'''
    MODEL = importlib.import_module(args.model)
    shutil.copy('models/%s.py' % args.model, str(exp_dir))
    shutil.copy('models/pointnet2_utils.py', str(exp_dir))

    classifier = MODEL.get_model(num_part, normal_channel=args.normal).cuda()
    criterion = MODEL.get_loss().cuda()
    classifier.apply(inplace_relu)

    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv2d') != -1:
            torch.nn.init.xavier_normal_(m.weight.data)
            if m.bias is not None:
                torch.nn.init.constant_(m.bias.data, 0.0)
        elif classname.find('Linear') != -1:
            torch.nn.init.xavier_normal_(m.weight.data)
            if m.bias is not None:
                torch.nn.init.constant_(m.bias.data, 0.0)

    try:
        checkpoint = torch.load(str(exp_dir) + '/checkpoints/best_model.pth')
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
        log_string('Use pretrain model')
    except:
        log_string('No existing model, starting training from scratch...')
        start_epoch = 0
        classifier = classifier.apply(weights_init)

    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(
            classifier.parameters(),
            lr=args.learning_rate,
            betas=(0.9, 0.999),
            eps=1e-08,
            weight_decay=args.decay_rate
        )
    else:
        optimizer = torch.optim.SGD(classifier.parameters(), lr=args.learning_rate, momentum=0.9)

    def bn_momentum_adjust(m, momentum):
        if isinstance(m, torch.nn.BatchNorm2d) or isinstance(m, torch.nn.BatchNorm1d):
            m.momentum = momentum

    LEARNING_RATE_CLIP = 1e-5
    MOMENTUM_ORIGINAL = 0.1
    MOMENTUM_DECCAY = 0.5
    MOMENTUM_DECCAY_STEP = args.step_size

    best_acc = 0
    global_epoch = 0
    best_class_avg_iou = 0
    best_inctance_avg_iou = 0

    for epoch in range(start_epoch, args.epoch):
        mean_correct = []

        log_string('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))
        '''Adjust learning rate and BN momentum'''
        lr = max(args.learning_rate * (args.lr_decay ** (epoch // args.step_size)), LEARNING_RATE_CLIP)
        log_string('Learning rate:%f' % lr)
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        momentum = MOMENTUM_ORIGINAL * (MOMENTUM_DECCAY ** (epoch // MOMENTUM_DECCAY_STEP))
        if momentum < 0.01:
            momentum = 0.01
        print('BN momentum updated to: %f' % momentum)
        classifier = classifier.apply(lambda x: bn_momentum_adjust(x, momentum))
        classifier = classifier.train()

        '''learning one epoch'''
        for i, (points, label, target) in tqdm(enumerate(trainDataLoader), total=len(trainDataLoader), smoothing=0.9):
            optimizer.zero_grad()

            points = points.data.numpy()
            points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :, 0:3])
            points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
            points = torch.Tensor(points)
            points, label, target = points.float().cuda(), label.long().cuda(), target.long().cuda()
            points = points.transpose(2, 1)

            seg_pred, trans_feat = classifier(points, to_categorical(label, num_classes))
            seg_pred = seg_pred.contiguous().view(-1, num_part)
            target = target.view(-1, 1)[:, 0]
            pred_choice = seg_pred.data.max(1)[1]

            correct = pred_choice.eq(target.data).cpu().sum()
            mean_correct.append(correct.item() / (args.batch_size * args.npoint))
            loss = criterion(seg_pred, target, trans_feat)
            loss.backward()
            optimizer.step()

        train_instance_acc = np.mean(mean_correct)
        log_string('Train accuracy is: %.5f' % train_instance_acc)

        with torch.no_grad():
            test_metrics = {}
            total_correct = 0
            total_seen = 0
            total_seen_class = [0 for _ in range(num_part)]
            total_correct_class = [0 for _ in range(num_part)]
            shape_ious = {cat: [] for cat in seg_classes.keys()}
            seg_label_to_cat = {}  # {0:Airplane, 1:Airplane, ...49:Table}

            for cat in seg_classes.keys():
                for label in seg_classes[cat]:
                    seg_label_to_cat[label] = cat

            classifier = classifier.eval()

            for batch_id, (points, label, target) in tqdm(enumerate(testDataLoader), total=len(testDataLoader), smoothing=0.9):
                cur_batch_size, NUM_POINT, _ = points.size()
                points, label, target = points.float().cuda(), label.long().cuda(), target.long().cuda()
                points = points.transpose(2, 1)
                seg_pred, _ = classifier(points, to_categorical(label, num_classes))
                cur_pred_val = seg_pred.cpu().data.numpy()
                cur_pred_val_logits = cur_pred_val
                cur_pred_val = np.zeros((cur_batch_size, NUM_POINT)).astype(np.int32)
                target = target.cpu().data.numpy()

                for i in range(cur_batch_size):
                    cat = seg_label_to_cat[target[i, 0]]
                    logits = cur_pred_val_logits[i, :, :]
                    cur_pred_val[i, :] = np.argmax(logits[:, seg_classes[cat]], 1) + seg_classes[cat][0]

                correct = np.sum(cur_pred_val == target)
                total_correct += correct
                total_seen += (cur_batch_size * NUM_POINT)

                for l in range(num_part):
                    total_seen_class[l] += np.sum(target == l)
                    total_correct_class[l] += (np.sum((cur_pred_val == l) & (target == l)))

                for i in range(cur_batch_size):
                    segp = cur_pred_val[i, :]
                    segl = target[i, :]
                    cat = seg_label_to_cat[segl[0]]
                    part_ious = [0.0 for _ in range(len(seg_classes[cat]))]
                    for l in seg_classes[cat]:
                        if (np.sum(segl == l) == 0) and (
                                np.sum(segp == l) == 0):  # part is not present, no prediction as well
                            part_ious[l - seg_classes[cat][0]] = 1.0
                        else:
                            part_ious[l - seg_classes[cat][0]] = np.sum((segl == l) & (segp == l)) / float(
                                np.sum((segl == l) | (segp == l)))
                    shape_ious[cat].append(np.mean(part_ious))

            all_shape_ious = []
            for cat in shape_ious.keys():
                for iou in shape_ious[cat]:
                    all_shape_ious.append(iou)
                shape_ious[cat] = np.mean(shape_ious[cat])
            mean_shape_ious = np.mean(list(shape_ious.values()))
            test_metrics['accuracy'] = total_correct / float(total_seen)
            test_metrics['class_avg_accuracy'] = np.mean(
                np.array(total_correct_class) / np.array(total_seen_class, dtype=np.float))
            for cat in sorted(shape_ious.keys()):
                log_string('eval mIoU of %s %f' % (cat + ' ' * (14 - len(cat)), shape_ious[cat]))
            test_metrics['class_avg_iou'] = mean_shape_ious
            test_metrics['inctance_avg_iou'] = np.mean(all_shape_ious)

        log_string('Epoch %d test Accuracy: %f  Class avg mIOU: %f   Inctance avg mIOU: %f' % (
            epoch + 1, test_metrics['accuracy'], test_metrics['class_avg_iou'], test_metrics['inctance_avg_iou']))
        if (test_metrics['inctance_avg_iou'] >= best_inctance_avg_iou):
            logger.info('Save model...')
            savepath = str(checkpoints_dir) + '/best_model.pth'
            log_string('Saving at %s' % savepath)
            state = {
                'epoch': epoch,
                'train_acc': train_instance_acc,
                'test_acc': test_metrics['accuracy'],
                'class_avg_iou': test_metrics['class_avg_iou'],
                'inctance_avg_iou': test_metrics['inctance_avg_iou'],
                'model_state_dict': classifier.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }
            torch.save(state, savepath)
            log_string('Saving model....')

        if test_metrics['accuracy'] > best_acc:
            best_acc = test_metrics['accuracy']
        if test_metrics['class_avg_iou'] > best_class_avg_iou:
            best_class_avg_iou = test_metrics['class_avg_iou']
        if test_metrics['inctance_avg_iou'] > best_inctance_avg_iou:
            best_inctance_avg_iou = test_metrics['inctance_avg_iou']
        log_string('Best accuracy is: %.5f' % best_acc)
        log_string('Best class avg mIOU is: %.5f' % best_class_avg_iou)
        log_string('Best inctance avg mIOU is: %.5f' % best_inctance_avg_iou)
        global_epoch += 1
Example #23
0
def main(args):
    omegaconf.OmegaConf.set_struct(args, False)
    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
    logger = logging.getLogger(__name__)

    print(args.pretty())
    '''DATA LOADING'''
    logger.info('Load dataset ...')
    DATA_PATH = hydra.utils.to_absolute_path('modelnet40_normal_resampled/')

    TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH,
                                       npoint=args.num_point,
                                       split='train',
                                       normal_channel=args.normal)
    TEST_DATASET = ModelNetDataLoader(root=DATA_PATH,
                                      npoint=args.num_point,
                                      split='test',
                                      normal_channel=args.normal)
    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET,
                                                  batch_size=args.batch_size,
                                                  shuffle=True,
                                                  num_workers=4)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=4)
    '''MODEL LOADING'''
    args.num_class = 40
    args.input_dim = 6 if args.normal else 3
    shutil.copy(
        hydra.utils.to_absolute_path('models/{}/model.py'.format(
            args.model.name)), '.')

    classifier = getattr(
        importlib.import_module('models.{}.model'.format(args.model.name)),
        'PointTransformer')(args).cuda()
    criterion = torch.nn.CrossEntropyLoss()

    try:
        checkpoint = torch.load('best_model.pth')
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
        logger.info('Use pretrain model')
    except:
        logger.info('No existing model, starting training from scratch...')
        start_epoch = 0

    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.weight_decay)
    else:
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=0.01,
                                    momentum=0.9)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=50,
                                                gamma=0.3)
    global_epoch = 0
    global_step = 0
    best_instance_acc = 0.0
    best_class_acc = 0.0
    best_epoch = 0
    mean_correct = []
    '''TRANING'''
    logger.info('Start training...')
    for epoch in range(start_epoch, args.epoch):
        logger.info('Epoch %d (%d/%s):' %
                    (global_epoch + 1, epoch + 1, args.epoch))

        classifier.train()
        for batch_id, data in tqdm(enumerate(trainDataLoader, 0),
                                   total=len(trainDataLoader),
                                   smoothing=0.9):
            points, target = data
            points = points.data.numpy()
            points = provider.random_point_dropout(points)
            points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :,
                                                                         0:3])
            points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
            points = torch.Tensor(points)
            target = target[:, 0]

            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()

            pred = classifier(points)
            loss = criterion(pred, target.long())
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.long().data).cpu().sum()
            mean_correct.append(correct.item() / float(points.size()[0]))
            loss.backward()
            optimizer.step()
            global_step += 1

        scheduler.step()

        train_instance_acc = np.mean(mean_correct)
        logger.info('Train Instance Accuracy: %f' % train_instance_acc)

        with torch.no_grad():
            instance_acc, class_acc = test(classifier.eval(), testDataLoader)

            if (instance_acc >= best_instance_acc):
                best_instance_acc = instance_acc
                best_epoch = epoch + 1

            if (class_acc >= best_class_acc):
                best_class_acc = class_acc
            logger.info('Test Instance Accuracy: %f, Class Accuracy: %f' %
                        (instance_acc, class_acc))
            logger.info('Best Instance Accuracy: %f, Class Accuracy: %f' %
                        (best_instance_acc, best_class_acc))

            if (instance_acc >= best_instance_acc):
                logger.info('Save model...')
                savepath = 'best_model.pth'
                logger.info('Saving at %s' % savepath)
                state = {
                    'epoch': best_epoch,
                    'instance_acc': instance_acc,
                    'class_acc': class_acc,
                    'model_state_dict': classifier.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                }
                torch.save(state, savepath)
            global_epoch += 1

    logger.info('End of training...')
def main():
    args = parser.parse_args()
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)

    if args.remark != None:
        args.remark = args.remark
    else:
        args.remark = args.dataset + "-" + args.task + "-" + args.norm

    if args.dataset == "shapenet":
        args.num_class = 16
    else:
        args.num_class = 40

    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    '''CREATE DIR'''
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
    # experiment_dir = Path('./exp/v1/')
    experiment_dir = Path('/data-x/g12/zhangjie/3dIP/exp/v1')
    experiment_dir.mkdir(exist_ok=True)
    experiment_dir = experiment_dir.joinpath('classification')
    experiment_dir.mkdir(exist_ok=True)
    experiment_dir = experiment_dir.joinpath(args.remark + "_" + timestr)
    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = experiment_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = experiment_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)
    '''LOG_curve'''
    title = args.dataset + "-" + args.task + "-" + args.norm
    logger_loss = Logger(os.path.join(log_dir, 'log_loss_v1.txt'), title=title)
    logger_loss.set_names(
        ['Train Loss', 'Valid Clean Loss', 'Valid Trigger Loss'])
    logger_acc = Logger(os.path.join(log_dir, 'log_acc_v1.txt'), title=title)
    logger_acc.set_names(
        ['Train  Acc.', 'Valid Clean Acc.', 'Valid Trigger Acc.'])
    '''LOG'''  #创建log文件
    logger = logging.getLogger("Model")  #log的名字
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
    file_handler.setLevel(logging.INFO)  #log的最低等级
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)  #log文件名
    log_string('PARAMETER ...')
    log_string(args)
    '''DATA LOADING'''
    log_string('Load dataset ...')
    if args.dataset == "shapenet":
        trainDataLoader = getData.get_dataLoader(train=True,
                                                 Shapenet=True,
                                                 batchsize=args.batch_size)
        testDataLoader = getData.get_dataLoader(train=False,
                                                Shapenet=True,
                                                batchsize=args.batch_size)
        triggerDataLoader = getData2.get_dataLoader(Shapenet=True,
                                                    batchsize=args.batch_size)
    else:
        trainDataLoader = getData.get_dataLoader(train=True,
                                                 Shapenet=False,
                                                 batchsize=args.batch_size)
        testDataLoader = getData.get_dataLoader(train=False,
                                                Shapenet=False,
                                                batchsize=args.batch_size)
        triggerDataLoader = getData2.get_dataLoader(Shapenet=False,
                                                    batchsize=args.batch_size)

    wminputs, wmtargets = [], []
    for wm_idx, (wminput, wmtarget) in enumerate(triggerDataLoader):
        wminputs.append(wminput)
        wmtargets.append(wmtarget)
    '''MODEL LOADING'''
    num_class = args.num_class
    MODEL = importlib.import_module(args.model)

    shutil.copy('./models/%s.py' % args.model, str(experiment_dir))
    shutil.copy('./models/pointnet_util.py', str(experiment_dir))
    shutil.copy('train_1_cls.py', str(experiment_dir))
    shutil.copy('./data/getData.py', str(experiment_dir))
    shutil.copy('./data/getData2.py', str(experiment_dir))
    shutil.copytree('./models/layers', str(experiment_dir) + "/layers")

    classifier = MODEL.get_model(num_class, channel=3).cuda()
    # classifier = MODEL.get_model(num_class,normal_channel=args.normal).cuda()
    criterion = MODEL.get_loss().cuda()

    pprint(classifier)

    try:
        checkpoint = torch.load(
            str(experiment_dir) + '/checkpoints/best_model.pth')
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
        log_string('Use pretrain model')
    except:
        log_string('No existing model, starting training from scratch...')
        start_epoch = 0

    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    else:
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=0.01,
                                    momentum=0.9)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=20,
                                                gamma=0.7)
    global_epoch = 0
    global_step = 0
    best_instance_acc = 0.0
    best_class_acc = 0.0
    mean_correct = []
    mean_loss = []
    '''TRANING'''
    logger.info('Start training...')
    for epoch in range(start_epoch, args.epoch):
        time_start = datetime.datetime.now()
        log_string('Epoch %d (%d/%s):' %
                   (global_epoch + 1, epoch + 1, args.epoch))

        scheduler.step()
        for batch_id, data in tqdm(enumerate(trainDataLoader, 0),
                                   total=len(trainDataLoader),
                                   smoothing=0.9):
            points, target = data
            wm_id = np.random.randint(len(wminputs))
            points = torch.cat(
                [points, wminputs[(wm_id + batch_id) % len(wminputs)]],
                dim=0)  #随机选择wininputs和inputscat
            target = torch.cat(
                [target, wmtargets[(wm_id + batch_id) % len(wminputs)]], dim=0)

            points = points.data.numpy()
            points = provider.random_point_dropout(
                points)  #provider是自己写的一个对点云操作的函数,随机dropout,置为第一个点的值
            points[:, :,
                   0:3] = provider.random_scale_point_cloud(points[:, :,
                                                                   0:3])  #点的放缩
            points[:, :, 0:3] = provider.shift_point_cloud(points[:, :,
                                                                  0:3])  #点的偏移
            points = torch.Tensor(points)
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()

            optimizer.zero_grad()
            classifier = classifier.train()
            pred, trans_feat = classifier(points)
            loss = criterion(pred, target.long(), trans_feat)
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.long().data).cpu().sum()
            mean_correct.append(correct.item() / float(points.size()[0]))
            loss.backward()
            optimizer.step()
            global_step += 1

            mean_loss.append(loss.item() / float(points.size()[0]))

        train_loss = np.mean(mean_loss)
        train_instance_acc = np.mean(mean_correct)
        log_string('Train Instance Accuracy: %f' % train_instance_acc)

        with torch.no_grad():
            val_loss, instance_acc, class_acc = test(classifier,
                                                     testDataLoader,
                                                     num_class=args.num_class)
            val_loss2, instance_acc2, class_acc2 = test(
                classifier, triggerDataLoader, num_class=args.num_class)

            if (instance_acc >= best_instance_acc):
                best_instance_acc = instance_acc
                best_epoch = epoch + 1

            if (class_acc >= best_class_acc):
                best_class_acc = class_acc
            log_string('Test Clean Instance Accuracy: %f, Class Accuracy: %f' %
                       (instance_acc, class_acc))
            log_string('Best Clean Instance Accuracy: %f, Class Accuracy: %f' %
                       (best_instance_acc, best_class_acc))
            log_string(
                'Test Trigger Accuracy: %f, Trigger Class Accuracy: %f' %
                (instance_acc2, class_acc2))

            if (instance_acc >= best_instance_acc):
                logger.info('Save model...')
                savepath = str(checkpoints_dir) + '/best_model.pth'
                log_string('Saving at %s' % savepath)
                log_string('best_epoch %s' % str(best_epoch))
                state = {
                    'epoch': best_epoch,
                    'clean instance_acc': instance_acc,
                    'clean class_acc': class_acc,
                    'trigger instance_acc': instance_acc2,
                    'trigger class_acc': class_acc2,
                    'model_state_dict': classifier.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                }
                torch.save(state, savepath)
            global_epoch += 1

        logger_loss.append([train_loss, val_loss, val_loss2])
        logger_acc.append([train_instance_acc, instance_acc, instance_acc2])

        time_end = datetime.datetime.now()
        time_span_str = str((time_end - time_start).seconds)
        log_string('Epoch time : %s S' % (time_span_str))

    logger_loss.close()
    logger_loss.plot()
    savefig(os.path.join(log_dir, 'log_loss_v3.eps'))
    logger_acc.close()
    logger_acc.plot()
    savefig(os.path.join(log_dir, 'log_acc_v3.eps'))

    log_string('best_epoch %s' % str(best_epoch))
    logger.info('End of training...')
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    '''CREATE DIR'''
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
    exp_dir = Path('./log/')
    exp_dir.mkdir(exist_ok=True)
    exp_dir = exp_dir.joinpath('classification')
    exp_dir.mkdir(exist_ok=True)
    if args.log_dir is None:
        exp_dir = exp_dir.joinpath(timestr)
    else:
        exp_dir = exp_dir.joinpath(args.log_dir)
    exp_dir.mkdir(exist_ok=True)
    checkpoints_dir = exp_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = exp_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)
    '''DATA LOADING'''
    log_string('Load dataset ...')
    #设置数据集路径
    data_path = '/home/wgk/dataset/Pointnet_Pointnet2_pytorch/modelnet40_normal_resampled/'
    #设置训练数据集
    train_dataset = ModelNetDataLoader(root=data_path,
                                       args=args,
                                       split='train',
                                       process_data=args.process_data)
    #设置测试集
    test_dataset = ModelNetDataLoader(root=data_path,
                                      args=args,
                                      split='test',
                                      process_data=args.process_data)
    #加载训练集合
    trainDataLoader = torch.utils.data.DataLoader(train_dataset,
                                                  batch_size=args.batch_size,
                                                  shuffle=True,
                                                  num_workers=1,
                                                  drop_last=True)
    testDataLoader = torch.utils.data.DataLoader(test_dataset,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=1)
    '''MODEL LOADING'''
    num_class = args.num_category
    #这里默认导入pointnet2_cls_msg.py文件,语法的使用 https://www.bilibili.com/read/cv5891176/
    model = importlib.import_module(args.model)

    shutil.copy('./models/%s.py' % args.model, str(exp_dir))
    shutil.copy('models/pointnet2_utils.py', str(exp_dir))
    shutil.copy('./train_classification.py', str(exp_dir))
    #加载分类器模型(实例化get_model这个class)
    classifier = model.get_model(num_class, normal_channel=args.use_normals)
    #实例化
    criterion = model.get_loss()
    #.apply函数,是应用在
    classifier.apply(inplace_relu)

    if not args.use_cpu:
        #将模型转移到gpu中
        classifier = classifier.cuda()
        criterion = criterion.cuda()

    try:
        checkpoint = torch.load(str(exp_dir) + '/checkpoints/best_model.pth')
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
        log_string('Use pretrain model')
    except:
        log_string('No existing model, starting training from scratch...')
        start_epoch = 0

    #配置优化器
    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    else:
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=0.01,
                                    momentum=0.9)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=20,
                                                gamma=0.7)
    global_epoch = 0
    global_step = 0
    best_instance_acc = 0.0
    best_class_acc = 0.0
    '''TRANING'''
    logger.info('Start training...')
    #默认训练200轮
    for epoch in range(start_epoch, args.epoch):
        log_string('Epoch %d (%d/%s):' %
                   (global_epoch + 1, epoch + 1, args.epoch))
        mean_correct = []
        classifier = classifier.train()

        scheduler.step()
        #
        for batch_id, (points, target) in tqdm(enumerate(trainDataLoader, 0),
                                               total=len(trainDataLoader),
                                               smoothing=0.9):
            optimizer.zero_grad()

            #获取到一个batch的数据,形状是(batch_size,点数=1024,channel)
            points = points.data.numpy()
            #print(points.shape)
            points = provider.random_point_dropout(points)
            #对数据集数据进行随机缩放
            points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :,
                                                                         0:3])
            #对数据集数据进行随机旋转
            points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
            #将ndarray数据转换为tensor
            points = torch.Tensor(points)
            #转置一下形状(batch_size,channel,点数)
            points = points.transpose(2, 1)

            if not args.use_cpu:
                #先将数据转移到显卡中
                points, target = points.cuda(), target.cuda()
            #执行训练,返回预测的值
            # pred.shape=[batchsize,40]  trans_feat.shape=[batchsize,1024,1]
            pred, trans_feat = classifier(points)
            #print(pred.shape,trans_feat.shape)
            #loss函数按说应该是添加一个正则规范项,但是这里的loss实际上并没有添加
            loss = criterion(pred, target.long(), trans_feat)
            pred_choice = pred.data.max(1)[1]
            #print(pred_choice)

            #查看预测值与真实值哪个位置相同,转到cpu中,并求总共有几个相同的
            correct = pred_choice.eq(target.long().data).cpu().sum()
            #求正确预测的百分比,points.size()[0]=batchsize
            mean_correct.append(correct.item() / float(points.size()[0]))
            #这里的loss此时是一个tensor,所以可应用backward函数
            loss.backward()
            optimizer.step()
            global_step += 1

        train_instance_acc = np.mean(mean_correct)
        log_string('Train Instance Accuracy: %f' % train_instance_acc)

        with torch.no_grad():
            instance_acc, class_acc = test(classifier.eval(),
                                           testDataLoader,
                                           num_class=num_class)

            if (instance_acc >= best_instance_acc):
                best_instance_acc = instance_acc
                best_epoch = epoch + 1

            if (class_acc >= best_class_acc):
                best_class_acc = class_acc
            log_string('Test Instance Accuracy: %f, Class Accuracy: %f' %
                       (instance_acc, class_acc))
            log_string('Best Instance Accuracy: %f, Class Accuracy: %f' %
                       (best_instance_acc, best_class_acc))

            if (instance_acc >= best_instance_acc):
                logger.info('Save model...')
                savepath = str(checkpoints_dir) + '/best_model.pth'
                log_string('Saving at %s' % savepath)
                state = {
                    'epoch': best_epoch,
                    'instance_acc': instance_acc,
                    'class_acc': class_acc,
                    'model_state_dict': classifier.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                }
                torch.save(state, savepath)
            global_epoch += 1

    logger.info('End of training...')
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    '''CREATE DIR'''
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
    experiment_dir = Path('./log/')
    experiment_dir.mkdir(exist_ok=True)
    experiment_dir = experiment_dir.joinpath('sem_seg')
    experiment_dir.mkdir(exist_ok=True)
    if args.log_dir is None:
        experiment_dir = experiment_dir.joinpath(timestr)
    else:
        experiment_dir = experiment_dir.joinpath(args.log_dir)
    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = experiment_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = experiment_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)

    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)

    root = 'data/stanford_indoor3d/'

    NUM_CLASSES = 13
    NUM_POINT = args.npoint
    BATCH_SIZE = args.batch_size
    FEATURE_CHANNEL = 3 if args.with_rgb else 0

    print("start loading training data ...")
    TRAIN_DATASET = S3DISDataset(root, split='train', with_rgb=args.with_rgb, test_area=args.test_area, block_points=NUM_POINT)
    print("start loading test data ...")
    TEST_DATASET = S3DISDataset(root, split='test', with_rgb=args.with_rgb, test_area=args.test_area, block_points=NUM_POINT)
    print("start loading whole scene validation data ...")
    TEST_DATASET_WHOLE_SCENE = S3DISDatasetWholeScene(root, split='test', with_rgb=args.with_rgb, test_area=args.test_area, block_points=NUM_POINT)
    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=BATCH_SIZE, shuffle=False, num_workers=4)
    weights = TRAIN_DATASET.labelweights
    weights = torch.Tensor(weights).cuda()

    log_string("The number of training data is: %d" % len(TRAIN_DATASET))
    log_string("The number of test data is: %d" %  len(TEST_DATASET_WHOLE_SCENE))

    '''MODEL LOADING'''
    MODEL = importlib.import_module(args.model)
    shutil.copy('models/%s.py' % args.model, str(experiment_dir))
    shutil.copy('models/pointnet_util.py', str(experiment_dir))

    classifier = MODEL.get_model(NUM_CLASSES, with_rgb=args.with_rgb).cuda()
    criterion = MODEL.get_loss().cuda()

    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv2d') != -1:
            torch.nn.init.xavier_normal_(m.weight.data)
            torch.nn.init.constant_(m.bias.data, 0.0)
        elif classname.find('Linear') != -1:
            torch.nn.init.xavier_normal_(m.weight.data)
            torch.nn.init.constant_(m.bias.data, 0.0)

    try:
        checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth')
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
        log_string('Use pretrain model')
    except:
        log_string('No existing model, starting training from scratch...')
        start_epoch = 0
        classifier = classifier.apply(weights_init)

    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(
            classifier.parameters(),
            lr=args.learning_rate,
            betas=(0.9, 0.999),
            eps=1e-08,
            weight_decay=args.decay_rate
        )
    else:
        optimizer = torch.optim.SGD(classifier.parameters(), lr=args.learning_rate, momentum=0.9)

    def bn_momentum_adjust(m, momentum):
        if isinstance(m, torch.nn.BatchNorm2d) or isinstance(m, torch.nn.BatchNorm1d):
            m.momentum = momentum

    LEARNING_RATE_CLIP = 1e-5
    MOMENTUM_ORIGINAL = 0.1
    MOMENTUM_DECCAY = 0.5
    MOMENTUM_DECCAY_STEP = args.step_size

    global_epoch = 0
    best_iou = 0

    for epoch in range(start_epoch,args.epoch):
        '''Train on chopped scenes'''
        log_string('**** Epoch %d (%d/%s) ****' % (global_epoch + 1, epoch + 1, args.epoch))
        lr = max(args.learning_rate * (args.lr_decay ** (epoch // args.step_size)), LEARNING_RATE_CLIP)
        log_string('Learning rate:%f' % lr)
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        momentum = MOMENTUM_ORIGINAL * (MOMENTUM_DECCAY ** (epoch // MOMENTUM_DECCAY_STEP))
        if momentum < 0.01:
            momentum = 0.01
        print('BN momentum updated to: %f' % momentum)
        classifier = classifier.apply(lambda x: bn_momentum_adjust(x,momentum))
        num_batches = len(trainDataLoader)
        total_correct = 0
        total_seen = 0
        loss_sum = 0
        for i, data in tqdm(enumerate(trainDataLoader), total=len(trainDataLoader), smoothing=0.9):
            points, target, _ = data
            points = points.data.numpy()
            points[:, :, :3] = provider.normalize_data(points[:, :, :3])
            points[:,:, :3] = provider.random_scale_point_cloud(points[:,:, :3])
            points[:,:, :3] = provider.rotate_point_cloud_z(points[:,:, :3])
            points = torch.Tensor(points)
            points, target = points.float().cuda(),target.long().cuda()
            points = points.transpose(2, 1)
            optimizer.zero_grad()
            classifier = classifier.train()
            seg_pred, trans_feat = classifier(points)
            seg_pred = seg_pred.contiguous().view(-1, NUM_CLASSES)
            batch_label = target.view(-1, 1)[:, 0].cpu().data.numpy()
            target = target.view(-1, 1)[:, 0]
            loss = criterion(seg_pred, target, trans_feat, weights)
            loss.backward()
            optimizer.step()
            pred_choice = seg_pred.cpu().data.max(1)[1].numpy()
            correct = np.sum(pred_choice == batch_label)
            total_correct += correct
            total_seen += (BATCH_SIZE * NUM_POINT)
            loss_sum += loss
        log_string('Training mean loss: %f' % (loss_sum / num_batches))
        log_string('Training accuracy: %f' % (total_correct / float(total_seen)))

        if epoch % 10 == 0 and epoch < 800:
            logger.info('Save model...')
            savepath = str(checkpoints_dir) + '/best_model.pth'
            log_string('Saving at %s' % savepath)
            state = {
                'epoch': epoch,
                'model_state_dict': classifier.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }
            torch.save(state, savepath)
            log_string('Saving model....')

        '''Evaluate on chopped scenes'''
        with torch.no_grad():
            num_batches = len(testDataLoader)
            total_correct = 0
            total_seen = 0
            loss_sum = 0
            log_string('---- EPOCH %03d EVALUATION ----' % (global_epoch + 1))
            for i, data in tqdm(enumerate(testDataLoader), total=len(testDataLoader), smoothing=0.9):
                points, target, _ = data
                points = points.data.numpy()
                points[:, :, :3] = provider.normalize_data(points[:, :, :3])
                points = torch.Tensor(points)
                points, target = points.float().cuda(), target.long().cuda()
                points = points.transpose(2, 1)
                classifier = classifier.eval()
                seg_pred, trans_feat = classifier(points)
                seg_pred = seg_pred.contiguous().view(-1, NUM_CLASSES)
                target = target.view(-1, 1)[:, 0]
                loss = criterion(seg_pred, target, trans_feat, weights)
                loss_sum += loss
                batch_label = target.cpu().data.numpy()
                pred_choice = seg_pred.cpu().data.max(1)[1].numpy()
                correct = np.sum(pred_choice == batch_label)
                total_correct += correct
                total_seen += (BATCH_SIZE * NUM_POINT)
            log_string('Eval mean loss: %f' % (loss_sum / num_batches))
            log_string('Eval accuracy: %f' % (total_correct / float(total_seen)))

        '''Evaluate on whole scenes'''
        if epoch % 5 ==0 and epoch > 800:
            with torch.no_grad():
                num_batches = len(TEST_DATASET_WHOLE_SCENE)
                log_string('---- EPOCH %03d EVALUATION WHOLE SCENE----' % (global_epoch + 1))
                total_correct = 0
                total_seen = 0
                loss_sum = 0
                total_seen_class = [0 for _ in range(NUM_CLASSES)]
                total_correct_class = [0 for _ in range(NUM_CLASSES)]
                total_iou_deno_class = [0 for _ in range(NUM_CLASSES)]

                labelweights = np.zeros(NUM_CLASSES)
                is_continue_batch = False

                extra_batch_data = np.zeros((0, NUM_POINT, 3 + FEATURE_CHANNEL))
                extra_batch_label = np.zeros((0, NUM_POINT))
                extra_batch_smpw = np.zeros((0, NUM_POINT))
                for batch_idx in tqdm(range(num_batches),total=num_batches):
                    if not is_continue_batch:
                        batch_data, batch_label, batch_smpw = TEST_DATASET_WHOLE_SCENE[batch_idx]
                        batch_data = np.concatenate((batch_data, extra_batch_data), axis=0)
                        batch_label = np.concatenate((batch_label, extra_batch_label), axis=0)
                        batch_smpw = np.concatenate((batch_smpw, extra_batch_smpw), axis=0)
                    else:
                        batch_data_tmp, batch_label_tmp, batch_smpw_tmp = TEST_DATASET_WHOLE_SCENE[batch_idx]
                        batch_data = np.concatenate((batch_data, batch_data_tmp), axis=0)
                        batch_label = np.concatenate((batch_label, batch_label_tmp), axis=0)
                        batch_smpw = np.concatenate((batch_smpw, batch_smpw_tmp), axis=0)
                    if batch_data.shape[0] < BATCH_SIZE:
                        is_continue_batch = True
                        continue
                    elif batch_data.shape[0] == BATCH_SIZE:
                        is_continue_batch = False
                        extra_batch_data = np.zeros((0, NUM_POINT, 3 + FEATURE_CHANNEL))
                        extra_batch_label = np.zeros((0, NUM_POINT))
                        extra_batch_smpw = np.zeros((0, NUM_POINT))
                    else:
                        is_continue_batch = False
                        extra_batch_data = batch_data[BATCH_SIZE:, :, :]
                        extra_batch_label = batch_label[BATCH_SIZE:, :]
                        extra_batch_smpw = batch_smpw[BATCH_SIZE:, :]
                        batch_data = batch_data[:BATCH_SIZE, :, :]
                        batch_label = batch_label[:BATCH_SIZE, :]
                        batch_smpw = batch_smpw[:BATCH_SIZE, :]

                    batch_data[:, :, :3] = provider.normalize_data(batch_data[:, :, :3])
                    batch_label = torch.Tensor(batch_label)
                    batch_data = torch.Tensor(batch_data)
                    batch_data, batch_label = batch_data.float().cuda(), batch_label.long().cuda()
                    batch_data = batch_data.transpose(2, 1)
                    classifier = classifier.eval()
                    seg_pred, _ = classifier(batch_data)
                    seg_pred = seg_pred.contiguous()
                    batch_label = batch_label.cpu().data.numpy()
                    pred_val = seg_pred.cpu().data.max(2)[1].numpy()
                    correct = np.sum((pred_val == batch_label) & (batch_smpw > 0))
                    total_correct += correct
                    total_seen += np.sum(batch_smpw > 0)
                    tmp, _ = np.histogram(batch_label, range(NUM_CLASSES + 1))
                    labelweights += tmp
                    for l in range(NUM_CLASSES):
                        total_seen_class[l] += np.sum((batch_label == l) & (batch_smpw > 0))
                        total_correct_class[l] += np.sum((pred_val == l) & (batch_label == l) & (batch_smpw > 0))
                        total_iou_deno_class[l] += np.sum(((pred_val == l) | (batch_label == l)) & (batch_smpw > 0))

                mIoU = np.mean(np.array(total_correct_class) / (np.array(total_iou_deno_class, dtype=np.float) + 1e-6))
                log_string('eval whole scene mean loss: %f' % (loss_sum / float(num_batches)))
                log_string('eval point avg class IoU: %f' % mIoU)
                log_string('eval whole scene point accuracy: %f' % (total_correct / float(total_seen)))
                log_string('eval whole scene point avg class acc: %f' % (
                    np.mean(np.array(total_correct_class) / (np.array(total_seen_class, dtype=np.float) + 1e-6))))
                labelweights = labelweights.astype(np.float32) / np.sum(labelweights.astype(np.float32))

                iou_per_class_str = '------- IoU --------\n'
                for l in range(NUM_CLASSES):
                    iou_per_class_str += 'class %s weight: %.3f, IoU: %.3f \n' % (
                        seg_label_to_cat[l] + ' ' * (14 - len(seg_label_to_cat[l])), labelweights[l],
                        total_correct_class[l] / float(total_iou_deno_class[l]))
                log_string(iou_per_class_str)

                if (mIoU >= best_iou):
                    logger.info('Save model...')
                    savepath = str(checkpoints_dir) + '/best_model.pth'
                    log_string('Saving at %s' % savepath)
                    state = {
                        'epoch': epoch,
                        'class_avg_iou': mIoU,
                        'model_state_dict': classifier.state_dict(),
                        'optimizer_state_dict': optimizer.state_dict(),
                    }
                    torch.save(state, savepath)
                    log_string('Saving model....')

        global_epoch += 1
def main(args):
    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    '''CREATE DIR'''
    experiment_dir = Path('./experiment/')
    experiment_dir.mkdir(exist_ok=True)
    file_dir = Path(str(experiment_dir) + '/%s_ModelNet40-' % args.model_name + str(
        datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')))
    file_dir.mkdir(exist_ok=True)
    checkpoints_dir = file_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = file_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)

    '''LOG'''
    args = parse_args()
    logger = logging.getLogger(args.model_name)
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler(str(log_dir) + 'train_%s_cls.txt' % args.model_name)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    logger.info(
        '---------------------------------------------------TRANING---------------------------------------------------')
    logger.info('PARAMETER ...')
    logger.info(args)

    '''DATA LOADING'''
    logger.info('Load dataset ...')
    DATA_PATH = './data/modelnet40_normal_resampled/'

    TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='train', normal_channel=args.normal)
    TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='test', normal_channel=args.normal)
    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=args.batchsize, shuffle=True,
                                                  num_workers=args.num_workers)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batchsize, shuffle=False,
                                                 num_workers=args.num_workers)

    logger.info("The number of training data is: %d", len(TRAIN_DATASET))
    logger.info("The number of test data is: %d", len(TEST_DATASET))

    seed = 3
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)

    '''MODEL LOADING'''
    num_class = 40
    classifier = PointConvClsSsg(num_class).cuda()
    if args.pretrain is not None:
        print('Use pretrain model...')
        logger.info('Use pretrain model')
        checkpoint = torch.load(args.pretrain)
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
    else:
        print('No existing model, starting training from scratch...')
        start_epoch = 0

    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
    elif args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(
            classifier.parameters(),
            lr=args.learning_rate,
            betas=(0.9, 0.999),
            eps=1e-08,
            weight_decay=args.decay_rate
        )
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.7)
    global_epoch = 0
    global_step = 0
    best_tst_accuracy = 0.0
    blue = lambda x: '\033[94m' + x + '\033[0m'

    '''TRANING'''
    logger.info('Start training...')
    for epoch in range(start_epoch, args.epoch):
        print('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))
        logger.info('Epoch %d (%d/%s):', global_epoch + 1, epoch + 1, args.epoch)
        mean_correct = []

        scheduler.step()
        for batch_id, data in tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9):
        # for batch_id, data in enumerate(trainDataLoader, 0):
            points, target = data
            points = points.data.numpy()
            # 增强数据: 随机放大和平移点云,随机移除一些点
            jittered_data = provider.random_scale_point_cloud(points[:, :, 0:3], scale_low=2.0 / 3, scale_high=3 / 2.0)
            jittered_data = provider.shift_point_cloud(jittered_data, shift_range=0.2)
            points[:, :, 0:3] = jittered_data
            points = provider.random_point_dropout_v2(points)
            provider.shuffle_points(points)
            points = torch.Tensor(points)
            target = target[:, 0]

            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()

            classifier = classifier.train()
            # pred = classifier(points[:, :3, :], points[:, 3:, :])
            pred = classifier(points[:, :3, :], None)
            loss = F.nll_loss(pred, target.long())
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.long().data).cpu().sum()
            mean_correct.append(correct.item() / float(points.size()[0]))
            loss.backward()
            optimizer.step()
            global_step += 1

        train_acc = np.mean(mean_correct)
        print('Train Accuracy: %f' % train_acc)
        logger.info('Train Accuracy: %f' % train_acc)

        acc = test(classifier, testDataLoader)

        if (acc >= best_tst_accuracy) and epoch > 5:
            best_tst_accuracy = acc
            logger.info('Save model...')
            save_checkpoint(
                global_epoch + 1,
                train_acc,
                acc,
                classifier,
                optimizer,
                str(checkpoints_dir),
                args.model_name)
            print('Saving model....')

        print('\r Loss: %f' % loss.data)
        logger.info('Loss: %.2f', loss.data)
        print('\r Test %s: %f   ***  %s: %f' % (blue('Accuracy'), acc, blue('Best Accuracy'), best_tst_accuracy))
        logger.info('Test Accuracy: %f  *** Best Test Accuracy: %f', acc, best_tst_accuracy)

        global_epoch += 1
    print('Best Accuracy: %f' % best_tst_accuracy)

    logger.info('End of training...')
Example #28
0
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''CREATE DIR'''
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
    exp_dir = Path('./log/')
    exp_dir.mkdir(exist_ok=True)
    exp_dir = exp_dir.joinpath('reg_seg_heatmap_v3')
    exp_dir.mkdir(exist_ok=True)
    if args.log_dir is None:
        exp_dir = exp_dir.joinpath(timestr)
    else:
        exp_dir = exp_dir.joinpath(args.log_dir)
    exp_dir.mkdir(exist_ok=True)
    checkpoints_dir = exp_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = exp_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)
    '''DATA LOADING'''
    log_string('Load dataset ...')
    # Construct the dataset
    train_dataset, train_config = construct_dataset(is_train=True)
    # Random split
    train_set_size = int(len(train_dataset) * 0.8)
    valid_set_size = len(train_dataset) - train_set_size
    train_dataset, valid_dataset = torch.utils.data.random_split(
        train_dataset, [train_set_size, valid_set_size])
    # And the dataloader
    trainDataLoader = DataLoader(dataset=train_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=True,
                                 num_workers=4)
    validDataLoader = DataLoader(dataset=valid_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=4)
    '''MODEL LOADING'''
    out_channel = args.out_channel
    model = importlib.import_module(args.model)
    shutil.copy('./models/%s.py' % args.model, str(exp_dir))
    shutil.copy('models/pointnet2_utils.py', str(exp_dir))
    shutil.copy('./train_pointnet2_reg_seg_heatmap_stepsize.py', str(exp_dir))

    #network = model.get_model(out_channel, normal_channel=args.use_normals)
    network = model.get_model(out_channel)
    criterion_rmse = RMSELoss()
    criterion_cos = torch.nn.CosineSimilarity(dim=1)
    criterion_bce = torch.nn.BCELoss()

    network.apply(inplace_relu)

    if not args.use_cpu:
        network = network.cuda()
        criterion_rmse = criterion_rmse.cuda()
        criterion_cos = criterion_cos.cuda()
    try:
        checkpoint = torch.load(str(exp_dir) + '/checkpoints/best_model.pth')
        start_epoch = checkpoint['epoch']
        network.load_state_dict(checkpoint['model_state_dict'])
        log_string('Use pretrain model')
    except:
        log_string('No existing model, starting training from scratch...')
        start_epoch = 0

    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(network.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    else:
        optimizer = torch.optim.SGD(network.parameters(),
                                    lr=0.01,
                                    momentum=0.9)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=20,
                                                gamma=0.7)
    global_epoch = 0
    global_step = 0
    best_rot_error = 99.9
    best_xyz_error = 99.9
    best_heatmap_error = 99.9
    best_step_size_error = 99.9
    '''TRANING'''
    logger.info('Start training...')
    for epoch in range(start_epoch, args.epoch):
        log_string('Epoch %d (%d/%s):' %
                   (global_epoch + 1, epoch + 1, args.epoch))
        train_rot_error = []
        train_xyz_error = []
        train_heatmap_error = []
        train_step_size_error = []
        network = network.train()

        scheduler.step()
        for batch_id, data in tqdm(enumerate(trainDataLoader, 0),
                                   total=len(trainDataLoader),
                                   smoothing=0.9):
            optimizer.zero_grad()

            points = data[parameter.pcd_key].numpy()
            points = provider.normalize_data(points)
            points = provider.random_point_dropout(points)
            points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :,
                                                                         0:3])
            points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
            points = torch.Tensor(points)
            points = points.transpose(2, 1)
            heatmap_target = data[parameter.heatmap_key]
            segmentation_target = data[parameter.segmentation_key]
            #print('heatmap size', heatmap_target.size())
            #print('segmentation', segmentation_target.size())
            delta_rot = data[parameter.delta_rot_key]
            delta_xyz = data[parameter.delta_xyz_key]
            unit_delta_xyz = data[parameter.unit_delta_xyz_key]
            step_size = data[parameter.step_size_key]

            if not args.use_cpu:
                points = points.cuda()
                delta_rot = delta_rot.cuda()
                delta_xyz = delta_xyz.cuda()
                heatmap_target = heatmap_target.cuda()
                unit_delta_xyz = unit_delta_xyz.cuda()
                step_size = step_size.cuda()

            heatmap_pred, action_pred, step_size_pred = network(points)
            # action control
            delta_rot_pred_6d = action_pred[:, 0:6]
            delta_rot_pred = compute_rotation_matrix_from_ortho6d(
                delta_rot_pred_6d, args.use_cpu)  # batch*3*3
            delta_xyz_pred = action_pred[:, 6:9].view(-1, 3)  # batch*3

            # loss computation
            loss_heatmap = criterion_rmse(heatmap_pred, heatmap_target)
            loss_r = criterion_rmse(delta_rot_pred, delta_rot)
            #loss_t = (1-criterion_cos(delta_xyz_pred, delta_xyz)).mean() + criterion_rmse(delta_xyz_pred, delta_xyz)
            loss_t = (1 - criterion_cos(delta_xyz_pred, unit_delta_xyz)).mean()
            loss_step_size = criterion_bce(step_size_pred, step_size)
            loss = loss_r + loss_t + loss_heatmap + loss_step_size
            loss.backward()
            optimizer.step()
            global_step += 1

            train_rot_error.append(loss_r.item())
            train_xyz_error.append(loss_t.item())
            train_heatmap_error.append(loss_heatmap.item())
            train_step_size_error.append(loss_step_size.item())

        train_rot_error = sum(train_rot_error) / len(train_rot_error)
        train_xyz_error = sum(train_xyz_error) / len(train_xyz_error)
        train_heatmap_error = sum(train_heatmap_error) / len(
            train_heatmap_error)
        train_step_size_error = sum(train_step_size_error) / len(
            train_step_size_error)
        log_string('Train Rotation Error: %f' % train_rot_error)
        log_string('Train Translation Error: %f' % train_xyz_error)
        log_string('Train Heatmap Error: %f' % train_xyz_error)
        log_string('Train Step size Error: %f' % train_step_size_error)

        with torch.no_grad():
            rot_error, xyz_error, heatmap_error, step_size_error = test(
                network.eval(), validDataLoader, out_channel, criterion_rmse,
                criterion_cos, criterion_bce)

            log_string(
                'Test Rotation Error: %f, Translation Error: %f, Heatmap Error: %f, Step size Error: %f'
                % (rot_error, xyz_error, heatmap_error, step_size_error))
            log_string(
                'Best Rotation Error: %f, Translation Error: %f, Heatmap Error: %f, Step size Error: %f'
                % (best_rot_error, best_xyz_error, best_heatmap_error,
                   best_step_size_error))

            if (rot_error + xyz_error + heatmap_error + step_size_error) < (
                    best_rot_error + best_xyz_error + best_heatmap_error +
                    best_step_size_error):
                best_rot_error = rot_error
                best_xyz_error = xyz_error
                best_heatmap_error = heatmap_error
                best_step_size_error = step_size_error
                best_epoch = epoch + 1
                logger.info('Save model...')
                savepath = str(checkpoints_dir) + '/best_model.pth'
                log_string('Saving at %s' % savepath)
                state = {
                    'epoch': best_epoch,
                    'rot_error': rot_error,
                    'xyz_error': xyz_error,
                    'heatmap_error': heatmap_error,
                    'step_size_error': step_size_error,
                    'model_state_dict': network.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                }
                torch.save(state, savepath)
            global_epoch += 1

    logger.info('End of training...')
Example #29
0
for epoch in range(MAX_EPOCH):

    loss_mean = 0.
    correct = 0.
    total = 0.

    net.train()  # 切换到训练模式
    for i, data in enumerate(trainloader):

        # 读取数据并数据增强
        points, labels = data['points'], data['label'].squeeze().long(
        )  # 需要标签是Long类型
        points = points.data.numpy()
        points = provider.random_point_dropout(points)  # 随机舍弃
        points[:, :,
               0:3] = provider.random_scale_point_cloud(points[:, :,
                                                               0:3])  # 随机放缩
        points[:, :, 0:3] = provider.shift_point_cloud(points[:, :,
                                                              0:3])  # 随机偏移
        points = torch.Tensor(points)
        points = points.transpose(2,
                                  1)  # [Batchsize, N, C] -> [Batchsize, C, N]

        # forward
        points = points.to(device)
        labels = labels.to(device)
        if args.pointnet:
            outputs, trans_feat = net(points)
        else:
            outputs = net(points)

        # backward
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    '''CREATE DIR'''
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
    experiment_dir = Path('./log/')
    experiment_dir.mkdir(exist_ok=True)
    experiment_dir = experiment_dir.joinpath('classification')
    experiment_dir.mkdir(exist_ok=True)
    if args.log_dir is None:
        experiment_dir = experiment_dir.joinpath(timestr)
    else:
        experiment_dir = experiment_dir.joinpath(args.log_dir)
    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = experiment_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = experiment_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)

    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)

    '''DATA LOADING'''
    log_string('Load dataset ...')
    DATA_PATH = 'data/modelnet40_normal_resampled/'

    TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='train',
                                                     normal_channel=args.normal)
    TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='test',
                                                    normal_channel=args.normal)
    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=args.batch_size, shuffle=True, num_workers=4)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=4)

    '''MODEL LOADING'''
    num_class = 40
    MODEL = importlib.import_module(args.model)
    shutil.copy('./models/%s.py' % args.model, str(experiment_dir))
    shutil.copy('./models/pointnet_util_psn.py', str(experiment_dir))

    classifier = MODEL.get_model(num_class,normal_channel=args.normal).cuda()
    criterion = MODEL.get_loss().cuda()

    try:
        checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth')
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
        log_string('Use pretrain model')
    except:
        log_string('No existing model, starting training from scratch...')
        start_epoch = 0


    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(
            classifier.parameters(),
            lr=args.learning_rate,
            betas=(0.9, 0.999),
            eps=1e-08,
            weight_decay=args.decay_rate
        )
    else:
        optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.7)
    global_epoch = 0
    global_step = 0
    best_instance_acc = 0.0
    best_class_acc = 0.0
    mean_correct = []
    best_epoch = 0

    '''TRANING'''
    logger.info('Start training...')
    for epoch in range(start_epoch,args.epoch):
        log_string('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))

        scheduler.step()
        for batch_id, data in tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9):
            points, target = data
            points = points.data.numpy()
            points = provider.random_point_dropout(points)
            points[:,:, 0:3] = provider.random_scale_point_cloud(points[:,:, 0:3])
            points[:,:, 0:3] = provider.shift_point_cloud(points[:,:, 0:3])
            points = torch.Tensor(points)
            target = target[:, 0]

            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()

            classifier = classifier.train()
            pred, trans_feat = classifier(points, False)
            loss = criterion(pred, target.long(), trans_feat)
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.long().data).cpu().sum()
            mean_correct.append(correct.item() / float(points.size()[0]))
            loss.backward()
            optimizer.step()
            global_step += 1

        train_instance_acc = np.mean(mean_correct)
        log_string('Train Instance Accuracy: %f' % train_instance_acc)


        with torch.no_grad():
            instance_acc, class_acc = test(classifier.eval(), testDataLoader)

            if (instance_acc >= best_instance_acc):
                best_instance_acc = instance_acc
                best_epoch = epoch + 1

            if (class_acc >= best_class_acc):
                best_class_acc = class_acc
            log_string('Test Instance Accuracy: %f, Class Accuracy: %f'% (instance_acc, class_acc))
            log_string('Best Instance Accuracy: %f, Class Accuracy: %f'% (best_instance_acc, best_class_acc))

            if (instance_acc >= best_instance_acc):
                logger.info('Save model...')
                savepath = str(checkpoints_dir) + '/best_model.pth'
                log_string('Saving at %s'% savepath)
                state = {
                    'epoch': best_epoch,
                    'instance_acc': instance_acc,
                    'class_acc': class_acc,
                    'model_state_dict': classifier.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                }
                torch.save(state, savepath)
            global_epoch += 1

    logger.info('End of training...')
Example #31
0
def eval_one_epoch(sess, ops, test_writer):
    """ ops: dict mapping from string to tf ops """
    global EPOCH_CNT
    is_training = False

    # Make sure batch data is of same size
    cur_batch_data = np.zeros((BATCH_SIZE,NUM_POINT,TEST_DATASET.num_channel()))
    cur_batch_data_ORIGIN = np.zeros((BATCH_SIZE,NUM_POINT,3))
    cur_batch_label = np.zeros((BATCH_SIZE), dtype=np.int32)

    total_correct = 0
    total_seen = 0
    loss_sum = 0
    batch_idx = 0
    shape_ious = []
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    pred_set = []
    label_set = []
    reconstruction_set = []
    original_set = []
    while TEST_DATASET.has_next_batch():
        batch_data, batch_label = TEST_DATASET.next_batch(augment=False)
        bsize = batch_data.shape[0]
        # for the last batch in the epoch, the bsize:end are from last batch
        cur_batch_data[0:bsize,...] = batch_data
        cur_batch_label[0:bsize] = batch_label
        cur_batch_data_ORIGIN[0:bsize,...] = batch_data[:,:,0:3]


        num_votes=1
        batch_pred_sum = np.zeros((BATCH_SIZE, NUM_CLASSES)) # score for classes
        for vote_idx in range(num_votes):
            # Shuffle point order to achieve different farthest samplings
            shuffled_indices = np.arange(NUM_POINT)
            np.random.shuffle(shuffled_indices)
            if FLAGS.normal:
                #rotated_data = provider.rotate_point_cloud_by_angle_with_normal(cur_batch_data[:, shuffled_indices, :],
                #    vote_idx/float(num_votes) * np.pi * 2)
                #rotated_data = provider.random_point_dropout(cur_batch_data[:, shuffled_indices, :], max_dropout_ratio=0.4)
                #xyz_data = provider.random_scale_point_cloud(cur_batch_data[:, :, 0:3], scale_low=0.9, scale_high=1.1)
                #cur_batch_data[:, :, 0:3] = xyz_data
                rotated_data = cur_batch_data[:, shuffled_indices, :]
            else:
                #rotated_data = provider.rotate_point_cloud_by_angle(cur_batch_data[:, shuffled_indices, :],
                #    vote_idx/float(num_votes) * np.pi * 2)
                #rotated_data =cur_batch_data[:, shuffled_indices, :]
                rotated_data = provider.random_scale_point_cloud(cur_batch_data[:, shuffled_indices, :], scale_low=0.9, scale_high=1.2)
            feed_dict = {ops['pointclouds_pl']: cur_batch_data,
                         ops['labels_pl']: cur_batch_label,
                         ops['is_training_pl']: is_training,
                         ops['pointclouds_pl_ORIGIN']: cur_batch_data_ORIGIN}
            summary, step, loss_val, pred_val, reconstruction = sess.run([ops['merged'], ops['step'],
                ops['loss'], ops['pred'], ops['reconstruction']], feed_dict=feed_dict)
            batch_pred_sum += pred_val
        pred_val = batch_pred_sum/float(num_votes)
        #sess.run(ops['reset_b_IJ'])
        pred_set.append(copy.deepcopy(pred_val))
        label_set.append(copy.deepcopy(cur_batch_label))
        reconstruction_set.append(copy.deepcopy(reconstruction))
        original_set.append(copy.deepcopy(batch_data))
        pred_val = np.argmax(pred_val, 1)
        correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize])
        total_correct += correct
        total_seen += bsize
        loss_sum += loss_val
        batch_idx += 1
        for i in range(0, bsize):
            l = batch_label[i]
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_val[i] == l)

    print(str(datetime.now()))
    print('---- EPOCH %03d EVALUATION ----'%(EPOCH_CNT))
    print('MODEL: %s'%(MODEL_NAME))
    print('eval mean loss: %f' % (loss_sum / float(batch_idx)))
    print('eval accuracy: %f'% (total_correct / float(total_seen)))
    print('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))

    pred_set = np.concatenate(pred_set, axis=0)
    label_set = np.concatenate(label_set, axis=0)
    reconstruction_set = np.concatenate(reconstruction_set, axis=0)
    original_set = np.concatenate(original_set, axis=0)
    
    np.save(os.path.join(FEATURE_DIR, 'test_features.npy'), pred_set)
    np.save(os.path.join(FEATURE_DIR, 'test_labels.npy'), label_set)
    np.save(os.path.join(FEATURE_DIR, 'reconstruction.npy'), reconstruction_set)
    mAPs, _ = retrival_results(os.path.join(FEATURE_DIR, "test_features.npy"),
                 os.path.join(FEATURE_DIR, "test_labels.npy"),
                 os.path.join(FEATURE_DIR, "test_features.npy"),
                 os.path.join(FEATURE_DIR, "test_labels.npy"),
                 save_dir=FEATURE_DIR)
    print('eval test2test mAP: %.5f'%(mAPs[0]))
    '''
    EPOCH_CNT += 1
    
    for i in range(40):
        file_original = '%d_original.jpg' % (i)
        file_reconstruct = '%d_reconstruct.jpg' % (i)
        file_original = os.path.join(FEATURE_DIR, file_original)
        file_reconstruct = os.path.join(FEATURE_DIR, file_reconstruct)
        reconstruct_img = pc_util.point_cloud_three_views(np.squeeze(reconstruction_set[i*20, :, :]))
        original_img = pc_util.point_cloud_three_views(np.squeeze(original_set[i*20, :, :]))
        scipy.misc.imsave(file_reconstruct, reconstruct_img)
        scipy.misc.imsave(file_original, original_img)
        
        f_xyz_original = open(os.path.join(FEATURE_DIR, '%d_original.ply' % (i)),'w')
        f_xyz_original.write(HEADER)
        f_xyz_reconstruct = open(os.path.join(FEATURE_DIR, '%d_reconstruct.ply' % (i)),'w')
        f_xyz_reconstruct.write(HEADER)
        for j in range(1024):
            xyz = np.squeeze(original_set[i*30, :, :])
            f_xyz_original.write('%f %f %f\n'%(xyz[j][0],xyz[j][1],xyz[j][2]))

            xyz = np.squeeze(reconstruction_set[i*30, :, :])
            f_xyz_reconstruct.write('%f %f %f\n'%(xyz[j][0],xyz[j][1],xyz[j][2]))
        f_xyz_original.close()
        f_xyz_reconstruct.close()
    '''
    TEST_DATASET.reset()
    return total_correct/float(total_seen)