예제 #1
0
def object_aug(object_data, object_labels, object_seg):
    object_data1 = provider.jitter_point_cloud(object_data,
                                               sigma=0.001,
                                               clip=0.005)  #给数据加噪一次
    object_data2 = provider.jitter_point_cloud(object_data,
                                               sigma=0.001,
                                               clip=0.005)  #给数据加噪两次
    object_data = np.vstack((object_data1, object_data2))
    object_labels = np.vstack((object_labels, object_labels))
    object_seg = np.vstack((object_seg, object_seg))

    #合并所有数据

    for i in range(num_train_file):
        cur_train_filename = os.path.join(hdf5_data_dir, train_file_list[i])
        cur_data, cur_labels, cur_seg = provider.loadDataFile_with_seg(
            cur_train_filename)
        object_data = np.vstack((object_data, cur_data))
        object_labels = np.vstack((object_labels, cur_labels))
        object_seg = np.vstack((object_seg, cur_seg))
    object_data, object_labels, object_seg = provider.shuffle_data_with_seg(
        object_data, object_labels, object_seg)
    #将数据分成几个文件
    n_object = object_data.shape[0]
    num_every_file = n_object // 8
    for i in range(8):
        f = h5py.File(hdf5_data_dir + '/object_aug' + str(i) + '.h5', 'w')
        f['data'] = object_data[i * (num_every_file):(i + 1) *
                                num_every_file, :, :]
        f['label'] = object_labels[i * (num_every_file):(i + 1) *
                                   num_every_file]
        f['pid'] = object_seg[i * (num_every_file):(i + 1) * num_every_file, :]
        f.close()
예제 #2
0
    def train_one_epoch(self, sess, ops, train_writer):
        """ ops: dict mapping from string to tf ops """
        is_training = True

        # Shuffle train samples
        train_idxs = np.arange(0, self.train_data.__len__(
        ))  # train_idx ,idx is a list [1263, 396,1309 .... 41 398 495]
        np.random.shuffle(train_idxs)
        num_batches = int(math.ceil((1.0 * self.train_sz) / self.batch_sz))

        self.log_string(str(datetime.now()))

        total_correct = 0
        total_seen = 0
        loss_sum = 0
        for batch_idx in range(num_batches):
            start_idx = batch_idx * self.batch_sz
            end_idx = (batch_idx + 1) * self.batch_sz
            batch_data, batch_label, batch_smpw = self.get_batch_wdp(
                self.train_data, train_idxs, start_idx, end_idx)
            # Augment batched point clouds by rotation and jitter
            if FLAGS.extra_dims:
                aug_data = np.concatenate((provider.rotate_point_cloud_z(
                    batch_data[:, :, 0:3]), batch_data[:, :, 3:]),
                                          axis=2)
                aug_data = np.concatenate((provider.jitter_point_cloud(
                    aug_data[:, :, 0:3]), aug_data[:, :, 3:]),
                                          axis=2)
            else:
                aug_data = provider.rotate_point_cloud_z(batch_data)
                aug_data = provider.jitter_point_cloud(aug_data)

            feed_dict = {
                ops['pointclouds_pl']: aug_data,
                ops['labels_pl']: batch_label,
                ops['smpws_pl']: batch_smpw,
                ops['is_training_pl']: is_training,
            }
            summary, step, _, loss_val, pred_val = sess.run(
                [
                    ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                    ops['pred']
                ],
                feed_dict=feed_dict)
            train_writer.add_summary(summary, step)
            # pred_val = np.argmax(pred_val, 2)
            correct = np.sum(pred_val == batch_label)
            total_correct += correct
            total_seen += (self.batch_sz * self.point_sz)
            loss_sum += loss_val
            if (batch_idx + 1) % 5 == 0:
                self.log_string(' -- %03d / %03d --' %
                                (batch_idx + 1, num_batches))
                self.log_string('mean loss: %f' % (loss_sum / 5))
                self.log_string('accuracy: %f' %
                                (total_correct / float(total_seen)))
                total_correct = 0
                total_seen = 0
                loss_sum = 0
def train_one_epoch(sess, ops, train_writer, epoch):
    """ ops: dict mapping from string to tf ops """
    is_training = True
    batch_size = cfg.training.batch_size

    train_idxs = copy.deepcopy(TRAIN_INDICES)
    np.random.shuffle(train_idxs)
    num_batches = len(train_idxs) // batch_size

    loss_sum = 0

    pbar = tqdm(range(num_batches),
                desc=f'train',
                postfix=dict(last_loss_str=''))
    for batch_idx in pbar:
        #  logger.info('----- batch ' + str(batch_idx) + ' -----')
        start_idx = batch_idx * batch_size
        end_idx = (batch_idx + 1) * batch_size

        pcs1, pcs2, translations, rel_angles, pc1centers, pc2centers, pc1angles, pc2angles = provider.load_batch(
            train_idxs[start_idx:end_idx])

        # Augment batched point clouds by jittering
        pcs1 = provider.jitter_point_cloud(pcs1)
        pcs2 = provider.jitter_point_cloud(pcs2)
        feed_dict = {
            ops['pcs1']: pcs1,
            ops['pcs2']: pcs2,
            ops['translations']: translations,
            ops['rel_angles']: rel_angles,
            ops['is_training_pl']: is_training,
            ops['pc1centers']: pc1centers,
            ops['pc2centers']: pc2centers,
            ops['pc1angles']: pc1angles,
            ops['pc2angles']: pc2angles,
        }
        summary, step, _, loss_val, pred_translations, pred_remaining_angle_logits = sess.run(
            [
                ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                ops['pred_translations'], ops['pred_remaining_angle_logits']
            ],
            feed_dict=feed_dict)
        #  step_in_epochs = float(epoch) + float(end_idx / len(train_idxs))
        train_writer.add_summary(summary, step)

        #  pred_val = np.argmax(pred_val, 1)
        #  correct = np.sum(pred_val == current_label[start_idx:end_idx])
        #  total_correct += correct
        #  total_seen += cfg.training.batch_size
        loss_sum += loss_val
        pbar.set_postfix(last_loss_str=f'{loss_val:.5f}')
        #  if batch_idx == 0:
        #  logger.info(np.concatenate([pred_val, transforms], axis=1)[:5,:])

    logger.info('train mean loss: %f' % (loss_sum / float(num_batches)))
    #  logger.info('accuracy: %f' % (total_correct / float(total_seen)))
    train_writer.flush()
예제 #4
0
 def _augment_batch_data(self, batch_data):
     jittered_data = provider.random_scale_point_cloud(batch_data[:, :,
                                                                  0:3])
     jittered_data = provider.shift_point_cloud(jittered_data)
     jittered_data = provider.jitter_point_cloud(jittered_data)
     batch_data[:, :, 0:3] = jittered_data
     return provider.shuffle_points(batch_data)
예제 #5
0
def provide_data():
    while (True):
        BATCH_SIZE = 32
        current_data, current_label = provider.loadDataFile(
            './data/modelnet40_ply_hdf5_2048/train_all.h5')
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            # mantipulation data
            rotated_data = provider.rotate_point_cloud(
                current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)
            # mantipulate labe
            one_hot_labe = np.zeros((BATCH_SIZE, 40))
            one_hot_labe[np.arange(BATCH_SIZE),
                         current_label[start_idx:end_idx]] = 1

            #out['data'] = jittered_data
            #out['labe'] = one_hot_labe
            yield jittered_data, one_hot_labe
예제 #6
0
    def get_batch(self, data_aug=False):
        data, sem_label, ins_label = self.data_queue.get()

        if data_aug and self.split == 'train':
            data[:, :, 0:3] = provider.jitter_point_cloud(data[:, :, 0:3])

        return data, sem_label, ins_label
예제 #7
0
def eval_one_epoch(sess, ops, test_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    #np.random.shuffle(train_file_idxs)

    for fn in range(1):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(
            TRAIN_FILES[train_file_idxs[3]])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        total_correct = 0
        total_seen = 0
        loss = 0

        for batch_idx in range(1):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE
            orig_pcs = current_data[start_idx:end_idx, :, :]
            orig_pcs = np.asarray([pc3nr1, pc3nr2])
            # Augment batched point clouds by rotation and jittering
            #rotated_data = provider.rotate_point_cloud(orig_pcs)
            jittered_data = provider.jitter_point_cloud(orig_pcs)
            used_pcs = jittered_data

            feed_dict = {
                ops['pointclouds_pl']: orig_pcs,
                ops['goal_pcs']: orig_pcs,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
            }
            summary, step, _, loss_val, pred_val = sess.run(
                [
                    ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                    ops['pred']
                ],
                feed_dict=feed_dict)
            test_writer.add_summary(summary, step)
            #pred_val = tf.convert_to_tensor(pred_val)
            #print(pred_val.get_shape().as_list())

            #used_pcs_tensor = tf.convert_to_tensor(used_pcs)
            #print(used_pcs_tensor.get_shape().as_list())
            #loss = tf.reduce_sum(tf.square(tf.subtract(pred_val, used_pcs_tensor)))
            #loss = tf.reduce_sum(tf.square(tf.subtract(pred_val, used_pcs_tensor)))
            plotPC(used_pcs[0])
            plotPC(pred_val[0])
            plotPC(used_pcs[1])
            plotPC(pred_val[1])
            loss = np.sum(np.square(np.subtract(pred_val, orig_pcs)))
            #print(loss.get_shape().as_list())
            #tf.Print(loss, [loss])
        log_string('mean loss: %f32' % loss)
예제 #8
0
def train_one_epoch(sess, ops, train_writer, adv=True):
    """ ops: dict mapping from string to tf ops """
    is_training = True
    
    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)
    
    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:,0:NUM_POINT,:]  # [:,0:NUM_POINT,:]
        
        current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))            
        current_label = np.squeeze(current_label)
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        
        total_correct = 0
        total_seen = 0
        loss_sum = 0
       
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            
            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)

            if adv == True:
                adv_data = perturb(jittered_data,current_label[start_idx:end_idx],sess,ops,EPS,ADV_STEP,EPS/10)
                feed_dict_adv = {ops['pointclouds_pl']: adv_data,
                             ops['labels_pl']: current_label[start_idx:end_idx],
                             ops['is_training_pl']: is_training, ops['is_outer']: True}

                summary, step, _, loss_val, pred_val, bn_decay = sess.run([ops['merged'], ops['step'],
                    ops['train_op'], ops['loss'], ops['pred'], ops['bn_decay']], feed_dict=feed_dict_adv)
            else:
            
                feed_dict = {ops['pointclouds_pl']: jittered_data,
                             ops['labels_pl']: current_label[start_idx:end_idx],
                             ops['is_training_pl']: is_training,ops['is_outer']: True}

                summary, step, _, loss_val, pred_val, bn_decay = sess.run([ops['merged'], ops['step'],
                        ops['train_op'], ops['loss'], ops['pred'], ops['bn_decay']], feed_dict=feed_dict)


            # log_string('bn_decay: %f' % bn_decay)
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val

        
        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
예제 #9
0
    def _augment_pc(self, pc):
        """
        Augments point cloud with jitter and dropout according to config

        Arguments:
            pc {np.ndarray} -- Nx3 point cloud

        Returns:
            np.ndarray -- augmented point cloud
        """

        # not used because no artificial occlusion
        if 'occlusion_nclusters' in self._pc_augm_config and self._pc_augm_config[
                'occlusion_nclusters'] > 0:
            pc = self.apply_dropout(
                pc, self._pc_augm_config['occlusion_nclusters'],
                self._pc_augm_config['occlusion_dropout_rate'])

        if 'sigma' in self._pc_augm_config and self._pc_augm_config[
                'sigma'] > 0:
            pc = provider.jitter_point_cloud(
                pc[np.newaxis, :, :],
                sigma=self._pc_augm_config['sigma'],
                clip=self._pc_augm_config['clip'])[0]

        return pc[:, :3]
예제 #10
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    current_data, current_label, current_mask = data_utils.get_current_data_withmask_h5(
        TRAIN_DATA, TRAIN_LABELS, TRAIN_MASKS, NUM_POINT)

    current_label = np.squeeze(current_label)
    current_mask = np.squeeze(current_mask)

    num_batches = current_data.shape[0] // BATCH_SIZE

    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_correct_seg = 0
    classify_loss_sum = 0
    seg_loss_sum = 0
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE

        # Augment batched point clouds by rotation and jittering
        rotated_data = provider.rotate_point_cloud(
            current_data[start_idx:end_idx, :, :])
        jittered_data = provider.jitter_point_cloud(rotated_data)
        feed_dict = {
            ops['pointclouds_pl']: jittered_data,
            ops['labels_pl']: current_label[start_idx:end_idx],
            ops['masks_pl']: current_mask[start_idx:end_idx],
            ops['is_training_pl']: is_training,
        }
        summary, step, _, loss_val, pred_val, seg_val, classify_loss, seg_loss = sess.run(
            [
                ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                ops['pred'], ops['seg_pred'], ops['classify_loss'],
                ops['seg_loss']
            ],
            feed_dict=feed_dict)
        train_writer.add_summary(summary, step)
        pred_val = np.argmax(pred_val, 1)
        correct = np.sum(pred_val == current_label[start_idx:end_idx])

        seg_val = np.argmax(seg_val, 2)
        seg_correct = np.sum(seg_val == current_mask[start_idx:end_idx])
        total_correct_seg += seg_correct

        total_correct += correct
        total_seen += BATCH_SIZE
        loss_sum += loss_val
        classify_loss_sum += classify_loss
        seg_loss_sum += seg_loss

    log_string('mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('classify mean loss: %f' %
               (classify_loss_sum / float(num_batches)))
    log_string('seg mean loss: %f' % (seg_loss_sum / float(num_batches)))
    log_string('accuracy: %f' % (total_correct / float(total_seen)))
    log_string('seg accuracy: %f' % (total_correct_seg /
                                     (float(total_seen) * NUM_POINT)))
예제 #11
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))  # 每一次的5个train文件的顺序都是不一样的
    np.random.shuffle(train_file_idxs)

    for fn in range(len(TRAIN_FILES)):  # 对每一个train文件
        log_string('----train file' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(
            TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:, 0:
                                    NUM_POINT, :]  # 采样1024个点,current代表这个文件中所有的点云
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label))  # 每个点云之间的顺序被打乱
        current_label = np.squeeze(current_label)  # 移除数组中单一维度的数据

        file_size = current_data.shape[0]  # 点云的总数
        num_batches = file_size // BATCH_SIZE  # 需要几个batch

        total_correct = 0
        total_seen = 0
        loss_sum = 0

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(
                current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)
            jittered_data = provider.random_scale_point_cloud(jittered_data)
            jittered_data = provider.rotate_perturbation_point_cloud(
                jittered_data)
            jittered_data = provider.shift_point_cloud(jittered_data)

            feed_dict = {
                ops['pointclouds_pl']: jittered_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
            }  # feed_dict的key一定是place_holder
            summary, step, _, loss_val, pred_val = sess.run(
                [
                    ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                    ops['pred']
                ],
                feed_dict=feed_dict)
            print("--train file:{}, batch_idx:{},step:{}".format(
                str(fn), str(batch_idx), str(step)))
            train_writer.add_summary(summary, step)  # 只有train才保存训练过程的曲线
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val

        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
예제 #12
0
def train_one_epoch(sess, ops, train_writer, dataset, verbose=True):
  """
  Train model for one epoch
  """
  global EPOCH_CNT
  is_training = True

  # Shuffle train samples
  train_idxs = np.arange(0, len(dataset))
  np.random.shuffle(train_idxs)

  num_batches = len(dataset) / FLAGS['BATCH_SIZE'] # discards samples if dataset not divisible by batch size

  log_string('[' + str(datetime.now()) + ' | EPOCH ' + str(EPOCH_CNT) + '] Starting training.', printout=False)

  loss_sum, batch_print_steps = 0, 10
  for batch_idx in range(num_batches):
    start_idx, end_idx = batch_idx * FLAGS['BATCH_SIZE'], (batch_idx + 1) * FLAGS['BATCH_SIZE']
    batch_data, batch_label = get_batch(dataset, train_idxs, start_idx, end_idx)
    # Perturb point clouds:
    batch_data[:,:,:3] = provider.jitter_point_cloud(batch_data[:,:,:3])
    batch_data[:,:,:3] = provider.rotate_perturbation_point_cloud(batch_data[:,:,:3])
    batch_data[:,:,:3] = provider.shift_point_cloud(batch_data[:,:,:3])
    batch_data[:,:,:3] = provider.random_point_dropout(batch_data[:,:,:3],
                                                       max_dropout_ratio=FLAGS['MAX_POINT_DROPOUT_RATIO'])
    feed_dict = {ops['pointclouds_pl']: batch_data,
                 ops['labels_pl']: batch_label,
                 ops['is_training_pl']: is_training}
    summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['train_op'],
                                                     ops['loss'], ops['pred']], feed_dict=feed_dict)
    train_writer.add_summary(summary, step)
    loss_sum += loss_val
    if batch_idx % batch_print_steps == 0:
      log_string('[Batch %03d] Mean Loss: %f' % ((batch_idx + 1), (loss_sum / batch_print_steps)), printout=verbose)
      loss_sum = 0
예제 #13
0
파일: train.py 프로젝트: jtpils/SpiderCNN
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)

    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label, normal_data = provider.loadDataFile_with_normal(
            TRAIN_FILES[train_file_idxs[fn]])
        normal_data = normal_data[:, 0:NUM_POINT, :]
        current_data = current_data[:, 0:NUM_POINT, :]
        current_data, current_label, shuffle_idx = provider.shuffle_data(
            current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)
        normal_data = normal_data[shuffle_idx, ...]

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        total_correct = 0
        total_seen = 0
        loss_sum = 0

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(
                current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)
            input_data = np.concatenate(
                (jittered_data, normal_data[start_idx:end_idx, :, :]), 2)
            #random point dropout
            input_data = provider.random_point_dropout(input_data)

            feed_dict = {
                ops['pointclouds_pl']: input_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
            }
            summary, step, _, loss_val, pred_val = sess.run(
                [
                    ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                    ops['pred']
                ],
                feed_dict=feed_dict)
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val

        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
예제 #14
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    # 随机打乱训练数据
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)

    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(
            TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        total_correct = 0
        total_seen = 0
        loss_sum = 0

        # 在一个epoch 中逐个mini-batch训练直至遍历完一遍训练集。计算总分类正确数total_correct和已遍历样本数

        # total_senn,总损失loss_sum.
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            # Augment batched point clouds by rotation and jittering
            # 调用provider中rotate_point_cloud
            rotated_data = provider.rotate_point_cloud(
                current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)
            feed_dict = {
                ops['pointclouds_pl']: jittered_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
            }
            summary, step, _, loss_val, pred_val = sess.run(
                [
                    ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                    ops['pred']
                ],
                feed_dict=feed_dict)
            # 训练,使用 tf 的 session 运行设计的框架,ops['pred'] 为整个网络,feed_dict 为网络提供的数据
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val

        # 记录平均loss,以及平均accuracy。
        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
예제 #15
0
def augment_batch_data(batch_data):
    rotated_data = provider.rotate_point_cloud_with_normal(batch_data)
    jittered_data = provider.random_scale_point_cloud(rotated_data[:, :, 0:3])
    jittered_data = provider.rotate_perturbation_point_cloud(jittered_data)
    jittered_data = provider.shift_point_cloud(jittered_data)
    jittered_data = provider.jitter_point_cloud(jittered_data)
    rotated_data[:, :, 0:3] = jittered_data
    return rotated_data
예제 #16
0
 def _augment_batch_data(self, batch_data):
     rotated_data = provider.rotate_point_cloud(batch_data)
     rotated_data = provider.rotate_perturbation_point_cloud(rotated_data)
     jittered_data = provider.random_scale_point_cloud(rotated_data[:,:,0:3])
     jittered_data = provider.shift_point_cloud(jittered_data)
     jittered_data = provider.jitter_point_cloud(jittered_data)
     rotated_data[:,:,0:3] = jittered_data
     return provider.shuffle_points(rotated_data)
예제 #17
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)

    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(
            TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        total_correct = 0
        total_seen = 0
        loss_sum = 0

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE
            """Mixup"""
            print("Batch: %d", batch_idx)
            batch_data, batch_label_a, batch_label_b,lam = \
                mixup_data(current_data[start_idx:end_idx, :, :], current_label[start_idx:end_idx], FLAGS.alpha)

            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(batch_data)
            jittered_data = provider.jitter_point_cloud(rotated_data)
            feed_dict = {
                ops['pointclouds_pl']: jittered_data,
                ops['labels_a_pl']: batch_label_a,
                ops['labels_b_pl']: batch_label_b,
                ops['is_training_pl']: is_training,
                ops['lam_pl']: lam
            }
            summary, step, _, loss_val, pred_val = sess.run(
                [
                    ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                    ops['pred']
                ],
                feed_dict=feed_dict)
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct_a = np.sum(pred_val == batch_label_a)
            correct_b = np.sum(pred_val == batch_label_b)
            total_correct += (lam * correct_a + (1 - lam) * correct_b)
            total_seen += BATCH_SIZE
            loss_sum += loss_val

        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
예제 #18
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)

    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(
            TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        total_correct = 0
        total_seen = 0
        loss_sum = 0

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(
                current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)
            feed_dict = {
                ops['pointclouds_pl']: jittered_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
            }
            summary, step, _, loss_val, pred_val, centroids = sess.run(
                [
                    ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                    ops['pred'], ops['centroids']
                ],
                feed_dict=feed_dict)
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val
            if np.random.rand() <= 0.001:
                h5r = h5py.File(
                    (LOG_DIR + '/demo/centroids' + str(step).zfill(8) + '.h5'),
                    'w')
                h5r.create_dataset('data', data=centroids)
                h5r.close()

        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
예제 #19
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    start_time = time.time()

    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)

    for fn in range(len(TRAIN_FILES)):
        log_string("----" + str(fn) + "-----")
        current_data, current_label = provider.loadDataFile(
            TRAIN_FILES[train_file_idxs[fn]]
        )
        current_data = current_data[:, 0:NUM_POINT, :]
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label)
        )
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        total_correct = 0
        total_seen = 0
        loss_sum = 0

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(
                current_data[start_idx:end_idx, :, :]
            )
            jittered_data = provider.jitter_point_cloud(rotated_data)
            feed_dict = {
                ops["pointclouds_pl"]: jittered_data,
                ops["labels_pl"]: current_label[start_idx:end_idx],
                ops["is_training_pl"]: is_training,
            }
            summary, step, _, loss_val, pred_val = sess.run(
                [ops["merged"], ops["step"], ops["train_op"], ops["loss"], ops["pred"]],
                feed_dict=feed_dict,
            )
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val

        log_string("mean loss: %f" % (loss_sum / float(num_batches)))
        log_string("accuracy: %f" % (total_correct / float(total_seen)))

    duration = time.time() - start_time
    log_string("epoch duration (minutes): %.4f" % (duration / 60.0))
예제 #20
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)

    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(
            TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        total_correct = 0
        total_seen = 0
        loss_sum = 0

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(
                current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)
            feed_dict = {
                ops['pointclouds_pl']: jittered_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
            }
            summary, step, _, loss_val, pred_val = sess.run(
                [
                    ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                    ops['pred']
                ],
                feed_dict=feed_dict)
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val

        meanlosslogstr = str(loss_sum / float(num_batches))
        accuracylogstr = str(total_correct / float(total_seen))
        with open(os.path.join(LOG_DIR, 'trainlog.txt'), 'a') as myfile:
            myfile.write(meanlosslogstr + ',' + accuracylogstr + '\n')

        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
예제 #21
0
def augment(i, cur_data, cur_label, cur_meta, points, labels, meta):
    # rotated_data = provider.rotate_point_cloud(np.expand_dims(cur_data[i], axis=0))
    jittered_data = provider.jitter_point_cloud(
        np.expand_dims(cur_data[i], axis=0))
    translated_data = provider.translate_point_cloud(jittered_data)
    points.append(np.squeeze(translated_data))
    labels.append(cur_label[i])
    meta.append(cur_meta[i])
    return points, labels, meta
예제 #22
0
def train_one_epoch(sess, ops, gmm, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)

    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]], compensate = False)
        # points_idx = range(0,NUM_POINT)
        points_idx = np.random.choice(range(0,2048),NUM_POINT)
        current_data = current_data[:, points_idx, :]
        current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size / BATCH_SIZE

        loss_sum = 0

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            # Augment batched point clouds by rotation and jittering

            augmented_data = current_data[start_idx:end_idx, :, :]
            if augment_scale:
                augmented_data = provider.scale_point_cloud(augmented_data, smin=0.66, smax=1.5)
            if augment_rotation:
                augmented_data = provider.rotate_point_cloud(augmented_data)
            if augment_translation:
                augmented_data = provider.translate_point_cloud(augmented_data, tval = 0.2)
            if augment_jitter:
                augmented_data = provider.jitter_point_cloud(augmented_data, sigma=0.01,
                                                        clip=0.05)  # default sigma=0.01, clip=0.05
            if augment_outlier:
                augmented_data = provider.insert_outliers_to_point_cloud(augmented_data, outlier_ratio=0.02)



            feed_dict = {ops['points_pl']: augmented_data,
                         ops['labels_pl']: current_label[start_idx:end_idx],
                         ops['w_pl']: gmm.weights_,
                         ops['mu_pl']: gmm.means_,
                         ops['sigma_pl']: np.sqrt(gmm.covariances_),
                         ops['is_training_pl']: is_training, }
            summary, step, _, loss_val, reconstructed_points_val = sess.run([ops['merged'], ops['step'],
                                                             ops['train_op'], ops['loss'], ops['reconstructed_points']],
                                                            feed_dict=feed_dict)
            train_writer.add_summary(summary, step)

            loss_sum += loss_val

        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True
    
    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)
    
    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:,0:NUM_POINT,:]
        current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))            
        current_label = np.squeeze(current_label)
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        
        total_correct = 0
        total_seen = 0
        loss_sum = 0
       
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            
            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)
            jittered_data_json = {'point_clouds': jittered_data.tolist()}

            # Etract features pointnet
            response_pointnet = requests.post(pointnet_url, json=jittered_data_json)
            pointnet_features = np.array(response_pointnet.json()['features'])

            # Etract features dgcnn
            response_dgcnn  = requests.post(dgcnn_url, json=jittered_data_json)
            dgcnn_features = np.array(response_dgcnn.json()['features'])

            # Concatenate
            point_features = np.concatenate((pointnet_features, dgcnn_features), axis=-1)

            # Train
            feed_dict = {ops['features_pl']: point_features,
                         ops['labels_pl']: current_label[start_idx:end_idx],
                         ops['is_training_pl']: is_training,}
            summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
                ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val
        
        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
예제 #24
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train samples
    train_idxs = np.arange(0, len(TRAIN_DATASET))
    np.random.shuffle(train_idxs)
    num_batches = int(len(TRAIN_DATASET) / BATCH_SIZE)

    log_string(str(datetime.now()))

    total_correct = 0
    total_seen = 0
    loss_sum = 0
    pp_loss_sum = 0
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        batch_data, batch_label, batch_cls_label, batch_pp_idx, batch_pp_label = get_batch(
            TRAIN_DATASET, train_idxs, start_idx, end_idx)
        # Augment batched point clouds by rotation and jittering
        #aug_data = batch_data
        #aug_data = provider.random_scale_point_cloud(batch_data)
        batch_data[:, :, 0:3] = provider.jitter_point_cloud(batch_data[:, :,
                                                                       0:3])
        feed_dict = {
            ops['pointclouds_pl']: batch_data,
            ops['labels_pl']: batch_label,
            ops['cls_labels_pl']: batch_cls_label,
            ops['pp_idx_pl']: batch_pp_idx,
            ops['pp_labels_pl']: batch_pp_label,
            ops['is_training_pl']: is_training,
        }
        summary, step, _, loss_val, pp_loss_val, pred_val, _ = sess.run(
            [
                ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                ops['pp_loss'], ops['pred'], ops['pp_pred']
            ],
            feed_dict=feed_dict)
        train_writer.add_summary(summary, step)
        pred_val = np.argmax(pred_val, 2)
        correct = np.sum(pred_val == batch_label)
        total_correct += correct
        total_seen += (BATCH_SIZE * NUM_POINT)
        loss_sum += loss_val
        pp_loss_sum += pp_loss_val

        if (batch_idx + 1) % 10 == 0:
            log_string(' -- %03d / %03d --' % (batch_idx + 1, num_batches))
            log_string('mean total_loss: %f' % (loss_sum / 10))
            log_string('mean pp_loss: %f' % (pp_loss_sum / 10))
            log_string('accuracy: %f' % (total_correct / float(total_seen)))
            total_correct = 0
            total_seen = 0
            loss_sum = 0
            pp_loss_sum = 0
예제 #25
0
파일: train.py 프로젝트: ajhamdi/AdvPC
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    #train_file_idxs = np.arange(0, len(TRAIN_FILES))
    #np.random.shuffle(train_file_idxs)

    current_data, current_label = provider.loadDataFile(TRAIN_FILES)

    print(current_data.shape, current_label.shape)
    #current_data = current_data[:,0:NUM_POINT,:]
    current_data, current_label, _ = provider.shuffle_data(
        current_data, np.squeeze(current_label))
    #print('2')
    current_label = np.squeeze(current_label)
    #print('3')
    file_size = current_data.shape[0]
    num_batches = file_size // BATCH_SIZE

    total_correct = 0
    total_seen = 0
    loss_sum = 0
    #import pdb; pdb.set_trace()
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE

        # Augment batched point clouds by rotation and jittering
        rotated_data = provider.rotate_point_cloud(
            current_data[start_idx:end_idx, :, :])
        jittered_data = provider.jitter_point_cloud(rotated_data)
        jittered_data = provider.random_scale_point_cloud(jittered_data)
        jittered_data = provider.rotate_perturbation_point_cloud(jittered_data)
        jittered_data = provider.shift_point_cloud(jittered_data)
        #np.save('outfile.npy', jittered_data[0,...])
        feed_dict = {
            ops['pointclouds_pl']: jittered_data,
            ops['labels_pl']: current_label[start_idx:end_idx],
            ops['is_training_pl']: is_training,
        }
        summary, step, _, loss_val, pred_val = sess.run([
            ops['merged'], ops['step'], ops['train_op'], ops['loss'],
            ops['pred']
        ],
                                                        feed_dict=feed_dict)
        train_writer.add_summary(summary, step)
        pred_val = np.argmax(pred_val, 1)
        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        total_correct += correct
        total_seen += BATCH_SIZE
        loss_sum += loss_val
    print('-' * 10, 'train', '-' * 10)
    #import pdb; pdb.set_trace()
    log_string('mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('accuracy: %f' % (total_correct / float(total_seen)))
예제 #26
0
def train_one_epoch(sess, ops, gmm, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    if (".h5" in TRAIN_FILE):
        current_data, current_label = data_utils.get_current_data_h5(TRAIN_DATA, TRAIN_LABELS, NUM_POINT)
    else:
        current_data, current_label = data_utils.get_current_data(TRAIN_DATA, TRAIN_LABELS, NUM_POINT)


    current_label = np.squeeze(current_label)

    num_batches = current_data.shape[0]//BATCH_SIZE

    total_correct = 0
    total_seen = 0
    loss_sum = 0

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE

        # Augment batched point clouds by rotation and jittering

        augmented_data = current_data[start_idx:end_idx, :, :]
        if augment_scale:
            augmented_data = provider.scale_point_cloud(augmented_data, smin=0.66, smax=1.5)
        if augment_rotation:
            augmented_data = provider.rotate_point_cloud(augmented_data)
        if augment_translation:
            augmented_data = provider.translate_point_cloud(augmented_data, tval = 0.2)
        if augment_jitter:
            augmented_data = provider.jitter_point_cloud(augmented_data, sigma=0.01,
                                                    clip=0.05)  # default sigma=0.01, clip=0.05
        if augment_outlier:
            augmented_data = provider.insert_outliers_to_point_cloud(augmented_data, outlier_ratio=0.02)

        feed_dict = {ops['points_pl']: augmented_data,
                     ops['labels_pl']: current_label[start_idx:end_idx],
                     ops['w_pl']: gmm.weights_,
                     ops['mu_pl']: gmm.means_,
                     ops['sigma_pl']: np.sqrt(gmm.covariances_),
                     ops['is_training_pl']: is_training, }
        summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
                                                         ops['train_op'], ops['loss'], ops['pred']],
                                                        feed_dict=feed_dict)

        train_writer.add_summary(summary, step)
        pred_val = np.argmax(pred_val, 1)
        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        total_correct += correct
        total_seen += BATCH_SIZE
        loss_sum += loss_val

    log_string('mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('accuracy: %f' % (total_correct / float(total_seen)))
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True
    
    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)
    
    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        # Load data and labels from the files.
        current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:,0:NUM_POINT,:]
        # Shuffle the data in the training set.
        current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))            
        current_label = np.squeeze(current_label)
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        
        total_correct = 0
        total_seen = 0
        loss_sum = 0
       
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            
            # Augment batched point clouds by rotating, jittering, shifting, 
            # and scaling.
            rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)
            jittered_data = provider.random_scale_point_cloud(jittered_data)
            jittered_data = provider.rotate_perturbation_point_cloud(jittered_data)
            jittered_data = provider.shift_point_cloud(jittered_data)
            
            # Input the augmented point cloud and labels to the graph.
            feed_dict = {ops['pointclouds_pl']: jittered_data,
                         ops['labels_pl']: current_label[start_idx:end_idx],
                         ops['is_training_pl']: is_training,}
            
            # Calculate the loss and accuracy of the input batch data.            
            summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
                ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
            
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val
        
        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
예제 #28
0
def train_one_epoch(CURR_DATA, CURR_LABELS, sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    # train_file_idxs = np.arange(0, len(TRAIN_FILES))
    # np.random.shuffle(train_file_idxs)

    train_files_idx = np.arange(0, CURR_DATA.shape[0])
    np.random.shuffle(train_files_idx)

    CURR_DATA, CURR_LABELS = CURR_DATA[
        train_files_idx, :, :], CURR_LABELS[train_files_idx]
    # for fn in range(len(TRAIN_FILES)):
    #     log_string('----' + str(fn) + '-----')
    #     current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
    #     current_data = current_data[:,0:NUM_POINT,:]
    #     current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
    #     current_label = np.squeeze(current_label)

    file_size = CURR_DATA.shape[0]
    num_batches = file_size // BATCH_SIZE

    total_correct = 0
    total_seen = 0
    loss_sum = 0

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE

        # Augment batched point clouds by rotation and jittering
        rotated_data = provider.rotate_point_cloud(
            CURR_DATA[start_idx:end_idx, :, :])
        jittered_data = provider.jitter_point_cloud(rotated_data)
        feed_dict = {
            ops['pointclouds_pl']: jittered_data,
            ops['labels_pl']: CURR_LABELS[start_idx:end_idx],
            ops['is_training_pl']: is_training,
        }
        summary, step, _, loss_val, pred_val = sess.run([
            ops['merged'], ops['step'], ops['train_op'], ops['loss'],
            ops['pred']
        ],
                                                        feed_dict=feed_dict)
        train_writer.add_summary(summary, step)
        pred_val = np.argmax(pred_val, 1)
        correct = np.sum(pred_val == CURR_LABELS[start_idx:end_idx])
        total_correct += correct
        total_seen += BATCH_SIZE
        loss_sum += loss_val

    log_string('mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('accuracy: %f' % (total_correct / float(total_seen)))
예제 #29
0
def train_one_epoch(config, sess, ops, epoch):
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)

    for fn in range(len(TRAIN_FILES)):
        current_data, current_label = provider.loadDataFile(
            TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:, 0:config.num_points, :]
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)
        file_size = current_data.shape[0]
        num_batches = file_size // config.batch_size

        total_correct = 0
        total_seen = 0
        losses = []

        for batch_idx in range(num_batches):
            start_idx = batch_idx * config.batch_size
            end_idx = (batch_idx + 1) * config.batch_size

            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(
                current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)

            feed_dict = {
                ops['pointclouds_pl']: jittered_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
            }
            step, _, loss_val, pred_val = sess.run(
                [ops['step'], ops['train_op'], ops['loss'], ops['pred']],
                feed_dict=feed_dict)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += config.batch_size
            losses.append(loss_val)
            if batch_idx % max(config.train_log_frq / config.batch_size,
                               1) == 0:
                acc = total_correct / float(total_seen)
                loss = np.mean(losses)
                losses = []
                log(
                    config.log_file,
                    'TRAINING EPOCH {} - accuracy: {}    loss: {}'.format(
                        epoch, acc, loss))
                LOSS_LOGGER.log(loss, epoch, "train_loss")
                ACC_LOGGER.log(acc, epoch, "train_accuracy")
예제 #30
0
 def get_example(self, i):
     if self.augment:
         rotated_data = provider.rotate_point_cloud(self.data[i:i +
                                                              1, :, :])
         jittered_data = provider.jitter_point_cloud(rotated_data)
         point_data = jittered_data[0]
     else:
         point_data = self.data[i]
     point_data = np.transpose(point_data.astype(np.float32), (1, 0))[:, :,
                                                                      None]
     return point_data, self.label[i]
예제 #31
0
 def _augment_batch_data(self, batch_data):
     if self.normal_channel:
         rotated_data = provider.rotate_point_cloud_with_normal(batch_data)
         rotated_data = provider.rotate_perturbation_point_cloud_with_normal(rotated_data)
     else:
         rotated_data = provider.rotate_point_cloud(batch_data)
         rotated_data = provider.rotate_perturbation_point_cloud(rotated_data)
 
     jittered_data = provider.random_scale_point_cloud(rotated_data[:,:,0:3])
     jittered_data = provider.shift_point_cloud(jittered_data)
     jittered_data = provider.jitter_point_cloud(jittered_data)
     rotated_data[:,:,0:3] = jittered_data
     return provider.shuffle_points(rotated_data)
예제 #32
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True
    
    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)
    
    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:,0:NUM_POINT,:]
        current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))            
        current_label = np.squeeze(current_label)
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        
        total_correct = 0
        total_seen = 0
        loss_sum = 0
       
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            
            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)
            feed_dict = {ops['pointclouds_pl']: jittered_data,
                         ops['labels_pl']: current_label[start_idx:end_idx],
                         ops['is_training_pl']: is_training,}
            summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
                ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val
        
        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
예제 #33
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True
    
    # Shuffle train samples
    train_idxs = np.arange(0, len(TRAIN_DATASET))
    np.random.shuffle(train_idxs)
    num_batches = len(TRAIN_DATASET)/BATCH_SIZE
    
    log_string(str(datetime.now()))

    total_correct = 0
    total_seen = 0
    loss_sum = 0
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx+1) * BATCH_SIZE
        batch_data, batch_label, batch_cls_label = get_batch(TRAIN_DATASET, train_idxs, start_idx, end_idx)
        # Augment batched point clouds by rotation and jittering
        #aug_data = batch_data
        #aug_data = provider.random_scale_point_cloud(batch_data)
        batch_data[:,:,0:3] = provider.jitter_point_cloud(batch_data[:,:,0:3])
        feed_dict = {ops['pointclouds_pl']: batch_data,
                     ops['labels_pl']: batch_label,
                     ops['cls_labels_pl']: batch_cls_label,
                     ops['is_training_pl']: is_training,}
        summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
            ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
        train_writer.add_summary(summary, step)
        pred_val = np.argmax(pred_val, 2)
        correct = np.sum(pred_val == batch_label)
        total_correct += correct
        total_seen += (BATCH_SIZE*NUM_POINT)
        loss_sum += loss_val

        if (batch_idx+1)%10 == 0:
            log_string(' -- %03d / %03d --' % (batch_idx+1, num_batches))
            log_string('mean loss: %f' % (loss_sum / 10))
            log_string('accuracy: %f' % (total_correct / float(total_seen)))
            total_correct = 0
            total_seen = 0
            loss_sum = 0