def train_one_epoch(sess, ops, train_writer, adv=True): """ ops: dict mapping from string to tf ops """ is_training = True # Shuffle train files train_file_idxs = np.arange(0, len(TRAIN_FILES)) np.random.shuffle(train_file_idxs) for fn in range(len(TRAIN_FILES)): log_string('----' + str(fn) + '-----') current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]]) current_data = current_data[:,0:NUM_POINT,:] # [:,0:NUM_POINT,:] current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) current_label = np.squeeze(current_label) file_size = current_data.shape[0] num_batches = file_size // BATCH_SIZE total_correct = 0 total_seen = 0 loss_sum = 0 for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx+1) * BATCH_SIZE # Augment batched point clouds by rotation and jittering rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :]) jittered_data = provider.jitter_point_cloud(rotated_data) if adv == True: adv_data = perturb(jittered_data,current_label[start_idx:end_idx],sess,ops,EPS,ADV_STEP,EPS/10) feed_dict_adv = {ops['pointclouds_pl']: adv_data, ops['labels_pl']: current_label[start_idx:end_idx], ops['is_training_pl']: is_training, ops['is_outer']: True} summary, step, _, loss_val, pred_val, bn_decay = sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred'], ops['bn_decay']], feed_dict=feed_dict_adv) else: feed_dict = {ops['pointclouds_pl']: jittered_data, ops['labels_pl']: current_label[start_idx:end_idx], ops['is_training_pl']: is_training,ops['is_outer']: True} summary, step, _, loss_val, pred_val, bn_decay = sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred'], ops['bn_decay']], feed_dict=feed_dict) # log_string('bn_decay: %f' % bn_decay) train_writer.add_summary(summary, step) pred_val = np.argmax(pred_val, 1) correct = np.sum(pred_val == current_label[start_idx:end_idx]) total_correct += correct total_seen += BATCH_SIZE loss_sum += loss_val log_string('mean loss: %f' % (loss_sum / float(num_batches))) log_string('accuracy: %f' % (total_correct / float(total_seen)))
def train_one_epoch(sess, ops, train_writer): """ ops: dict mapping from string to tf ops """ is_training = True current_data, current_label, current_mask = data_utils.get_current_data_withmask_h5( TRAIN_DATA, TRAIN_LABELS, TRAIN_MASKS, NUM_POINT) current_label = np.squeeze(current_label) current_mask = np.squeeze(current_mask) num_batches = current_data.shape[0] // BATCH_SIZE total_correct = 0 total_seen = 0 loss_sum = 0 total_correct_seg = 0 classify_loss_sum = 0 seg_loss_sum = 0 for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx + 1) * BATCH_SIZE # Augment batched point clouds by rotation and jittering rotated_data = provider.rotate_point_cloud( current_data[start_idx:end_idx, :, :]) jittered_data = provider.jitter_point_cloud(rotated_data) feed_dict = { ops['pointclouds_pl']: jittered_data, ops['labels_pl']: current_label[start_idx:end_idx], ops['masks_pl']: current_mask[start_idx:end_idx], ops['is_training_pl']: is_training, } summary, step, _, loss_val, pred_val, seg_val, classify_loss, seg_loss = sess.run( [ ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred'], ops['seg_pred'], ops['classify_loss'], ops['seg_loss'] ], feed_dict=feed_dict) train_writer.add_summary(summary, step) pred_val = np.argmax(pred_val, 1) correct = np.sum(pred_val == current_label[start_idx:end_idx]) seg_val = np.argmax(seg_val, 2) seg_correct = np.sum(seg_val == current_mask[start_idx:end_idx]) total_correct_seg += seg_correct total_correct += correct total_seen += BATCH_SIZE loss_sum += loss_val classify_loss_sum += classify_loss seg_loss_sum += seg_loss log_string('mean loss: %f' % (loss_sum / float(num_batches))) log_string('classify mean loss: %f' % (classify_loss_sum / float(num_batches))) log_string('seg mean loss: %f' % (seg_loss_sum / float(num_batches))) log_string('accuracy: %f' % (total_correct / float(total_seen))) log_string('seg accuracy: %f' % (total_correct_seg / (float(total_seen) * NUM_POINT)))
def provide_data(): while (True): BATCH_SIZE = 32 current_data, current_label = provider.loadDataFile( './data/modelnet40_ply_hdf5_2048/train_all.h5') current_data, current_label, _ = provider.shuffle_data( current_data, np.squeeze(current_label)) current_label = np.squeeze(current_label) file_size = current_data.shape[0] num_batches = file_size // BATCH_SIZE for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx + 1) * BATCH_SIZE # mantipulation data rotated_data = provider.rotate_point_cloud( current_data[start_idx:end_idx, :, :]) jittered_data = provider.jitter_point_cloud(rotated_data) # mantipulate labe one_hot_labe = np.zeros((BATCH_SIZE, 40)) one_hot_labe[np.arange(BATCH_SIZE), current_label[start_idx:end_idx]] = 1 #out['data'] = jittered_data #out['labe'] = one_hot_labe yield jittered_data, one_hot_labe
def train_one_epoch(sess, ops, train_writer): """ ops: dict mapping from string to tf ops """ is_training = True # Shuffle train files train_file_idxs = np.arange(0, len(TRAIN_FILES)) # 每一次的5个train文件的顺序都是不一样的 np.random.shuffle(train_file_idxs) for fn in range(len(TRAIN_FILES)): # 对每一个train文件 log_string('----train file' + str(fn) + '-----') current_data, current_label = provider.loadDataFile( TRAIN_FILES[train_file_idxs[fn]]) current_data = current_data[:, 0: NUM_POINT, :] # 采样1024个点,current代表这个文件中所有的点云 current_data, current_label, _ = provider.shuffle_data( current_data, np.squeeze(current_label)) # 每个点云之间的顺序被打乱 current_label = np.squeeze(current_label) # 移除数组中单一维度的数据 file_size = current_data.shape[0] # 点云的总数 num_batches = file_size // BATCH_SIZE # 需要几个batch total_correct = 0 total_seen = 0 loss_sum = 0 for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx + 1) * BATCH_SIZE # Augment batched point clouds by rotation and jittering rotated_data = provider.rotate_point_cloud( current_data[start_idx:end_idx, :, :]) jittered_data = provider.jitter_point_cloud(rotated_data) jittered_data = provider.random_scale_point_cloud(jittered_data) jittered_data = provider.rotate_perturbation_point_cloud( jittered_data) jittered_data = provider.shift_point_cloud(jittered_data) feed_dict = { ops['pointclouds_pl']: jittered_data, ops['labels_pl']: current_label[start_idx:end_idx], ops['is_training_pl']: is_training, } # feed_dict的key一定是place_holder summary, step, _, loss_val, pred_val = sess.run( [ ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred'] ], feed_dict=feed_dict) print("--train file:{}, batch_idx:{},step:{}".format( str(fn), str(batch_idx), str(step))) train_writer.add_summary(summary, step) # 只有train才保存训练过程的曲线 pred_val = np.argmax(pred_val, 1) correct = np.sum(pred_val == current_label[start_idx:end_idx]) total_correct += correct total_seen += BATCH_SIZE loss_sum += loss_val log_string('mean loss: %f' % (loss_sum / float(num_batches))) log_string('accuracy: %f' % (total_correct / float(total_seen)))
def train_one_epoch(sess, ops, train_writer): """ ops: dict mapping from string to tf ops """ is_training = True # Shuffle train files train_file_idxs = np.arange(0, len(TRAIN_FILES)) np.random.shuffle(train_file_idxs) for fn in range(len(TRAIN_FILES)): log_string('----' + str(fn) + '-----') current_data, current_label, normal_data = provider.loadDataFile_with_normal( TRAIN_FILES[train_file_idxs[fn]]) normal_data = normal_data[:, 0:NUM_POINT, :] current_data = current_data[:, 0:NUM_POINT, :] current_data, current_label, shuffle_idx = provider.shuffle_data( current_data, np.squeeze(current_label)) current_label = np.squeeze(current_label) normal_data = normal_data[shuffle_idx, ...] file_size = current_data.shape[0] num_batches = file_size // BATCH_SIZE total_correct = 0 total_seen = 0 loss_sum = 0 for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx + 1) * BATCH_SIZE # Augment batched point clouds by rotation and jittering rotated_data = provider.rotate_point_cloud( current_data[start_idx:end_idx, :, :]) jittered_data = provider.jitter_point_cloud(rotated_data) input_data = np.concatenate( (jittered_data, normal_data[start_idx:end_idx, :, :]), 2) #random point dropout input_data = provider.random_point_dropout(input_data) feed_dict = { ops['pointclouds_pl']: input_data, ops['labels_pl']: current_label[start_idx:end_idx], ops['is_training_pl']: is_training, } summary, step, _, loss_val, pred_val = sess.run( [ ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred'] ], feed_dict=feed_dict) train_writer.add_summary(summary, step) pred_val = np.argmax(pred_val, 1) correct = np.sum(pred_val == current_label[start_idx:end_idx]) total_correct += correct total_seen += BATCH_SIZE loss_sum += loss_val log_string('mean loss: %f' % (loss_sum / float(num_batches))) log_string('accuracy: %f' % (total_correct / float(total_seen)))
def train_one_epoch(sess, ops, train_writer): """ ops: dict mapping from string to tf ops """ is_training = True # Shuffle train files # 随机打乱训练数据 train_file_idxs = np.arange(0, len(TRAIN_FILES)) np.random.shuffle(train_file_idxs) for fn in range(len(TRAIN_FILES)): log_string('----' + str(fn) + '-----') current_data, current_label = provider.loadDataFile( TRAIN_FILES[train_file_idxs[fn]]) current_data = current_data[:, 0:NUM_POINT, :] current_data, current_label, _ = provider.shuffle_data( current_data, np.squeeze(current_label)) current_label = np.squeeze(current_label) file_size = current_data.shape[0] num_batches = file_size // BATCH_SIZE total_correct = 0 total_seen = 0 loss_sum = 0 # 在一个epoch 中逐个mini-batch训练直至遍历完一遍训练集。计算总分类正确数total_correct和已遍历样本数 # total_senn,总损失loss_sum. for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx + 1) * BATCH_SIZE # Augment batched point clouds by rotation and jittering # 调用provider中rotate_point_cloud rotated_data = provider.rotate_point_cloud( current_data[start_idx:end_idx, :, :]) jittered_data = provider.jitter_point_cloud(rotated_data) feed_dict = { ops['pointclouds_pl']: jittered_data, ops['labels_pl']: current_label[start_idx:end_idx], ops['is_training_pl']: is_training, } summary, step, _, loss_val, pred_val = sess.run( [ ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred'] ], feed_dict=feed_dict) # 训练,使用 tf 的 session 运行设计的框架,ops['pred'] 为整个网络,feed_dict 为网络提供的数据 train_writer.add_summary(summary, step) pred_val = np.argmax(pred_val, 1) correct = np.sum(pred_val == current_label[start_idx:end_idx]) total_correct += correct total_seen += BATCH_SIZE loss_sum += loss_val # 记录平均loss,以及平均accuracy。 log_string('mean loss: %f' % (loss_sum / float(num_batches))) log_string('accuracy: %f' % (total_correct / float(total_seen)))
def _augment_batch_data(self, batch_data): rotated_data = provider.rotate_point_cloud(batch_data) rotated_data = provider.rotate_perturbation_point_cloud(rotated_data) jittered_data = provider.random_scale_point_cloud(rotated_data[:,:,0:3]) jittered_data = provider.shift_point_cloud(jittered_data) jittered_data = provider.jitter_point_cloud(jittered_data) rotated_data[:,:,0:3] = jittered_data return provider.shuffle_points(rotated_data)
def train_one_epoch(sess, ops, train_writer): """ ops: dict mapping from string to tf ops """ is_training = True # Shuffle train files train_file_idxs = np.arange(0, len(TRAIN_FILES)) np.random.shuffle(train_file_idxs) for fn in range(len(TRAIN_FILES)): log_string('----' + str(fn) + '-----') current_data, current_label = provider.loadDataFile( TRAIN_FILES[train_file_idxs[fn]]) current_data = current_data[:, 0:NUM_POINT, :] current_data, current_label, _ = provider.shuffle_data( current_data, np.squeeze(current_label)) current_label = np.squeeze(current_label) file_size = current_data.shape[0] num_batches = file_size // BATCH_SIZE total_correct = 0 total_seen = 0 loss_sum = 0 for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx + 1) * BATCH_SIZE # Augment batched point clouds by rotation and jittering rotated_data = provider.rotate_point_cloud( current_data[start_idx:end_idx, :, :]) jittered_data = provider.jitter_point_cloud(rotated_data) feed_dict = { ops['pointclouds_pl']: jittered_data, ops['labels_pl']: current_label[start_idx:end_idx], ops['is_training_pl']: is_training, } summary, step, _, loss_val, pred_val, centroids = sess.run( [ ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred'], ops['centroids'] ], feed_dict=feed_dict) train_writer.add_summary(summary, step) pred_val = np.argmax(pred_val, 1) correct = np.sum(pred_val == current_label[start_idx:end_idx]) total_correct += correct total_seen += BATCH_SIZE loss_sum += loss_val if np.random.rand() <= 0.001: h5r = h5py.File( (LOG_DIR + '/demo/centroids' + str(step).zfill(8) + '.h5'), 'w') h5r.create_dataset('data', data=centroids) h5r.close() log_string('mean loss: %f' % (loss_sum / float(num_batches))) log_string('accuracy: %f' % (total_correct / float(total_seen)))
def train_one_epoch(sess, ops, train_writer): """ ops: dict mapping from string to tf ops """ start_time = time.time() is_training = True # Shuffle train files train_file_idxs = np.arange(0, len(TRAIN_FILES)) np.random.shuffle(train_file_idxs) for fn in range(len(TRAIN_FILES)): log_string("----" + str(fn) + "-----") current_data, current_label = provider.loadDataFile( TRAIN_FILES[train_file_idxs[fn]] ) current_data = current_data[:, 0:NUM_POINT, :] current_data, current_label, _ = provider.shuffle_data( current_data, np.squeeze(current_label) ) current_label = np.squeeze(current_label) file_size = current_data.shape[0] num_batches = file_size // BATCH_SIZE total_correct = 0 total_seen = 0 loss_sum = 0 for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx + 1) * BATCH_SIZE # Augment batched point clouds by rotation and jittering rotated_data = provider.rotate_point_cloud( current_data[start_idx:end_idx, :, :] ) jittered_data = provider.jitter_point_cloud(rotated_data) feed_dict = { ops["pointclouds_pl"]: jittered_data, ops["labels_pl"]: current_label[start_idx:end_idx], ops["is_training_pl"]: is_training, } summary, step, _, loss_val, pred_val = sess.run( [ops["merged"], ops["step"], ops["train_op"], ops["loss"], ops["pred"]], feed_dict=feed_dict, ) train_writer.add_summary(summary, step) pred_val = np.argmax(pred_val, 1) correct = np.sum(pred_val == current_label[start_idx:end_idx]) total_correct += correct total_seen += BATCH_SIZE loss_sum += loss_val log_string("mean loss: %f" % (loss_sum / float(num_batches))) log_string("accuracy: %f" % (total_correct / float(total_seen))) duration = time.time() - start_time log_string("epoch duration (minutes): %.4f" % (duration / 60.0))
def train_one_epoch(sess, ops, train_writer): """ ops: dict mapping from string to tf ops """ is_training = True # Shuffle train files train_file_idxs = np.arange(0, len(TRAIN_FILES)) np.random.shuffle(train_file_idxs) for fn in range(len(TRAIN_FILES)): log_string('----' + str(fn) + '-----') current_data, current_label = provider.loadDataFile( TRAIN_FILES[train_file_idxs[fn]]) current_data = current_data[:, 0:NUM_POINT, :] current_data, current_label, _ = provider.shuffle_data( current_data, np.squeeze(current_label)) current_label = np.squeeze(current_label) file_size = current_data.shape[0] num_batches = file_size // BATCH_SIZE total_correct = 0 total_seen = 0 loss_sum = 0 for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx + 1) * BATCH_SIZE """Mixup""" print("Batch: %d", batch_idx) batch_data, batch_label_a, batch_label_b,lam = \ mixup_data(current_data[start_idx:end_idx, :, :], current_label[start_idx:end_idx], FLAGS.alpha) # Augment batched point clouds by rotation and jittering rotated_data = provider.rotate_point_cloud(batch_data) jittered_data = provider.jitter_point_cloud(rotated_data) feed_dict = { ops['pointclouds_pl']: jittered_data, ops['labels_a_pl']: batch_label_a, ops['labels_b_pl']: batch_label_b, ops['is_training_pl']: is_training, ops['lam_pl']: lam } summary, step, _, loss_val, pred_val = sess.run( [ ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred'] ], feed_dict=feed_dict) train_writer.add_summary(summary, step) pred_val = np.argmax(pred_val, 1) correct_a = np.sum(pred_val == batch_label_a) correct_b = np.sum(pred_val == batch_label_b) total_correct += (lam * correct_a + (1 - lam) * correct_b) total_seen += BATCH_SIZE loss_sum += loss_val log_string('mean loss: %f' % (loss_sum / float(num_batches))) log_string('accuracy: %f' % (total_correct / float(total_seen)))
def train_one_epoch(sess, ops, gmm, train_writer): """ ops: dict mapping from string to tf ops """ is_training = True # Shuffle train files train_file_idxs = np.arange(0, len(TRAIN_FILES)) np.random.shuffle(train_file_idxs) for fn in range(len(TRAIN_FILES)): log_string('----' + str(fn) + '-----') current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]], compensate = False) # points_idx = range(0,NUM_POINT) points_idx = np.random.choice(range(0,2048),NUM_POINT) current_data = current_data[:, points_idx, :] current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) current_label = np.squeeze(current_label) file_size = current_data.shape[0] num_batches = file_size / BATCH_SIZE loss_sum = 0 for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx + 1) * BATCH_SIZE # Augment batched point clouds by rotation and jittering augmented_data = current_data[start_idx:end_idx, :, :] if augment_scale: augmented_data = provider.scale_point_cloud(augmented_data, smin=0.66, smax=1.5) if augment_rotation: augmented_data = provider.rotate_point_cloud(augmented_data) if augment_translation: augmented_data = provider.translate_point_cloud(augmented_data, tval = 0.2) if augment_jitter: augmented_data = provider.jitter_point_cloud(augmented_data, sigma=0.01, clip=0.05) # default sigma=0.01, clip=0.05 if augment_outlier: augmented_data = provider.insert_outliers_to_point_cloud(augmented_data, outlier_ratio=0.02) feed_dict = {ops['points_pl']: augmented_data, ops['labels_pl']: current_label[start_idx:end_idx], ops['w_pl']: gmm.weights_, ops['mu_pl']: gmm.means_, ops['sigma_pl']: np.sqrt(gmm.covariances_), ops['is_training_pl']: is_training, } summary, step, _, loss_val, reconstructed_points_val = sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['reconstructed_points']], feed_dict=feed_dict) train_writer.add_summary(summary, step) loss_sum += loss_val log_string('mean loss: %f' % (loss_sum / float(num_batches)))
def train_one_epoch(sess, ops, train_writer): """ ops: dict mapping from string to tf ops """ is_training = True # Shuffle train files train_file_idxs = np.arange(0, len(TRAIN_FILES)) np.random.shuffle(train_file_idxs) for fn in range(len(TRAIN_FILES)): log_string('----' + str(fn) + '-----') current_data, current_label = provider.loadDataFile( TRAIN_FILES[train_file_idxs[fn]]) current_data = current_data[:, 0:NUM_POINT, :] current_data, current_label, _ = provider.shuffle_data( current_data, np.squeeze(current_label)) current_label = np.squeeze(current_label) file_size = current_data.shape[0] num_batches = file_size // BATCH_SIZE total_correct = 0 total_seen = 0 loss_sum = 0 for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx + 1) * BATCH_SIZE # Augment batched point clouds by rotation and jittering rotated_data = provider.rotate_point_cloud( current_data[start_idx:end_idx, :, :]) jittered_data = provider.jitter_point_cloud(rotated_data) feed_dict = { ops['pointclouds_pl']: jittered_data, ops['labels_pl']: current_label[start_idx:end_idx], ops['is_training_pl']: is_training, } summary, step, _, loss_val, pred_val = sess.run( [ ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred'] ], feed_dict=feed_dict) train_writer.add_summary(summary, step) pred_val = np.argmax(pred_val, 1) correct = np.sum(pred_val == current_label[start_idx:end_idx]) total_correct += correct total_seen += BATCH_SIZE loss_sum += loss_val meanlosslogstr = str(loss_sum / float(num_batches)) accuracylogstr = str(total_correct / float(total_seen)) with open(os.path.join(LOG_DIR, 'trainlog.txt'), 'a') as myfile: myfile.write(meanlosslogstr + ',' + accuracylogstr + '\n') log_string('mean loss: %f' % (loss_sum / float(num_batches))) log_string('accuracy: %f' % (total_correct / float(total_seen)))
def train_one_epoch(sess, ops, train_writer): """ ops: dict mapping from string to tf ops """ is_training = True # Shuffle train files train_file_idxs = np.arange(0, len(TRAIN_FILES)) np.random.shuffle(train_file_idxs) for fn in range(len(TRAIN_FILES)): log_string('----' + str(fn) + '-----') current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]]) current_data = current_data[:,0:NUM_POINT,:] current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) current_label = np.squeeze(current_label) file_size = current_data.shape[0] num_batches = file_size // BATCH_SIZE total_correct = 0 total_seen = 0 loss_sum = 0 for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx+1) * BATCH_SIZE # Augment batched point clouds by rotation and jittering rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :]) jittered_data = provider.jitter_point_cloud(rotated_data) jittered_data_json = {'point_clouds': jittered_data.tolist()} # Etract features pointnet response_pointnet = requests.post(pointnet_url, json=jittered_data_json) pointnet_features = np.array(response_pointnet.json()['features']) # Etract features dgcnn response_dgcnn = requests.post(dgcnn_url, json=jittered_data_json) dgcnn_features = np.array(response_dgcnn.json()['features']) # Concatenate point_features = np.concatenate((pointnet_features, dgcnn_features), axis=-1) # Train feed_dict = {ops['features_pl']: point_features, ops['labels_pl']: current_label[start_idx:end_idx], ops['is_training_pl']: is_training,} summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict) train_writer.add_summary(summary, step) pred_val = np.argmax(pred_val, 1) correct = np.sum(pred_val == current_label[start_idx:end_idx]) total_correct += correct total_seen += BATCH_SIZE loss_sum += loss_val log_string('mean loss: %f' % (loss_sum / float(num_batches))) log_string('accuracy: %f' % (total_correct / float(total_seen)))
def eval_one_epoch(sess, ops, test_writer, train_config): global BATCH_CNT, EXCLUDE_TIME """ ops: dict mapping from string to tf ops """ is_training = False total_correct = 0 total_seen = 0 loss_sum = 0 total_seen_class = [0 for _ in range(NUM_CLASSES)] total_correct_class = [0 for _ in range(NUM_CLASSES)] for fn in range(len(TEST_FILES)): current_data, current_label = provider.loadDataFile(TEST_FILES[fn]) current_data = current_data[:, 0:NUM_POINT, :] current_label = np.squeeze(current_label) file_size = current_data.shape[0] num_batches = file_size // BATCH_SIZE for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx + 1) * BATCH_SIZE ### evaluation with different transformations applied to the data for _ in range(5): t = time() rotated_data = provider.rotate_point_cloud( current_data[start_idx:end_idx, :, :]) if train_config[1] is True: mat_data = provider.get_MAT(rotated_data) else: mat_data = rotated_data jittered_data = mat_data EXCLUDE_TIME += time() - t feed_dict = { ops['pointclouds_pl']: jittered_data, ops['labels_pl']: current_label[start_idx:end_idx], ops['is_training_pl']: is_training } summary, step, loss_val, pred_val = sess.run( [ops['merged'], ops['step'], ops['loss'], ops['pred']], feed_dict=feed_dict) pred_val = np.argmax(pred_val, 1) correct = np.sum(pred_val == current_label[start_idx:end_idx]) total_correct += correct total_seen += BATCH_SIZE loss_sum += (loss_val * BATCH_SIZE) for i in range(start_idx, end_idx): l = current_label[i] total_seen_class[l] += 1 total_correct_class[l] += (pred_val[i - start_idx] == l) log_string( 'Batch %-5d, eval mean loss: %-10.2f, eval accuracy: %-10.2f, eval avg class acc: %-10.2f' % (BATCH_CNT, loss_sum / float(total_seen), total_correct / float(total_seen), np.mean( np.array(total_correct_class) / np.array(total_seen_class, dtype=np.float))))
def train_one_epoch(sess, ops, train_writer): """ ops: dict mapping from string to tf ops """ is_training = True # Shuffle train files #train_file_idxs = np.arange(0, len(TRAIN_FILES)) #np.random.shuffle(train_file_idxs) current_data, current_label = provider.loadDataFile(TRAIN_FILES) print(current_data.shape, current_label.shape) #current_data = current_data[:,0:NUM_POINT,:] current_data, current_label, _ = provider.shuffle_data( current_data, np.squeeze(current_label)) #print('2') current_label = np.squeeze(current_label) #print('3') file_size = current_data.shape[0] num_batches = file_size // BATCH_SIZE total_correct = 0 total_seen = 0 loss_sum = 0 #import pdb; pdb.set_trace() for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx + 1) * BATCH_SIZE # Augment batched point clouds by rotation and jittering rotated_data = provider.rotate_point_cloud( current_data[start_idx:end_idx, :, :]) jittered_data = provider.jitter_point_cloud(rotated_data) jittered_data = provider.random_scale_point_cloud(jittered_data) jittered_data = provider.rotate_perturbation_point_cloud(jittered_data) jittered_data = provider.shift_point_cloud(jittered_data) #np.save('outfile.npy', jittered_data[0,...]) feed_dict = { ops['pointclouds_pl']: jittered_data, ops['labels_pl']: current_label[start_idx:end_idx], ops['is_training_pl']: is_training, } summary, step, _, loss_val, pred_val = sess.run([ ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred'] ], feed_dict=feed_dict) train_writer.add_summary(summary, step) pred_val = np.argmax(pred_val, 1) correct = np.sum(pred_val == current_label[start_idx:end_idx]) total_correct += correct total_seen += BATCH_SIZE loss_sum += loss_val print('-' * 10, 'train', '-' * 10) #import pdb; pdb.set_trace() log_string('mean loss: %f' % (loss_sum / float(num_batches))) log_string('accuracy: %f' % (total_correct / float(total_seen)))
def train_one_epoch(sess, ops, gmm, train_writer): """ ops: dict mapping from string to tf ops """ is_training = True if (".h5" in TRAIN_FILE): current_data, current_label = data_utils.get_current_data_h5(TRAIN_DATA, TRAIN_LABELS, NUM_POINT) else: current_data, current_label = data_utils.get_current_data(TRAIN_DATA, TRAIN_LABELS, NUM_POINT) current_label = np.squeeze(current_label) num_batches = current_data.shape[0]//BATCH_SIZE total_correct = 0 total_seen = 0 loss_sum = 0 for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx + 1) * BATCH_SIZE # Augment batched point clouds by rotation and jittering augmented_data = current_data[start_idx:end_idx, :, :] if augment_scale: augmented_data = provider.scale_point_cloud(augmented_data, smin=0.66, smax=1.5) if augment_rotation: augmented_data = provider.rotate_point_cloud(augmented_data) if augment_translation: augmented_data = provider.translate_point_cloud(augmented_data, tval = 0.2) if augment_jitter: augmented_data = provider.jitter_point_cloud(augmented_data, sigma=0.01, clip=0.05) # default sigma=0.01, clip=0.05 if augment_outlier: augmented_data = provider.insert_outliers_to_point_cloud(augmented_data, outlier_ratio=0.02) feed_dict = {ops['points_pl']: augmented_data, ops['labels_pl']: current_label[start_idx:end_idx], ops['w_pl']: gmm.weights_, ops['mu_pl']: gmm.means_, ops['sigma_pl']: np.sqrt(gmm.covariances_), ops['is_training_pl']: is_training, } summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict) train_writer.add_summary(summary, step) pred_val = np.argmax(pred_val, 1) correct = np.sum(pred_val == current_label[start_idx:end_idx]) total_correct += correct total_seen += BATCH_SIZE loss_sum += loss_val log_string('mean loss: %f' % (loss_sum / float(num_batches))) log_string('accuracy: %f' % (total_correct / float(total_seen)))
def trainDataPreHandle(train_file_idxs): current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs]) current_data = current_data[:, 0:NUM_POINT, :] #chose data current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) current_data = provider.rotate_point_cloud(current_data) current_data = provider.jitter_point_cloud(current_data) current_data = current_data[:, :, :, np.newaxis] current_label = np.squeeze(current_label) #label current_label = keras.utils.to_categorical(current_label, num_classes=40) #40 classes print("load data", current_data.shape, current_label.shape) return current_data,current_label
def train_one_epoch(sess, ops, train_writer): """ ops: dict mapping from string to tf ops """ is_training = True # Shuffle train files train_file_idxs = np.arange(0, len(TRAIN_FILES)) np.random.shuffle(train_file_idxs) for fn in range(len(TRAIN_FILES)): log_string('----' + str(fn) + '-----') # Load data and labels from the files. current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]]) current_data = current_data[:,0:NUM_POINT,:] # Shuffle the data in the training set. current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) current_label = np.squeeze(current_label) file_size = current_data.shape[0] num_batches = file_size // BATCH_SIZE total_correct = 0 total_seen = 0 loss_sum = 0 for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx+1) * BATCH_SIZE # Augment batched point clouds by rotating, jittering, shifting, # and scaling. rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :]) jittered_data = provider.jitter_point_cloud(rotated_data) jittered_data = provider.random_scale_point_cloud(jittered_data) jittered_data = provider.rotate_perturbation_point_cloud(jittered_data) jittered_data = provider.shift_point_cloud(jittered_data) # Input the augmented point cloud and labels to the graph. feed_dict = {ops['pointclouds_pl']: jittered_data, ops['labels_pl']: current_label[start_idx:end_idx], ops['is_training_pl']: is_training,} # Calculate the loss and accuracy of the input batch data. summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict) train_writer.add_summary(summary, step) pred_val = np.argmax(pred_val, 1) correct = np.sum(pred_val == current_label[start_idx:end_idx]) total_correct += correct total_seen += BATCH_SIZE loss_sum += loss_val log_string('mean loss: %f' % (loss_sum / float(num_batches))) log_string('accuracy: %f' % (total_correct / float(total_seen)))
def train_one_epoch(CURR_DATA, CURR_LABELS, sess, ops, train_writer): """ ops: dict mapping from string to tf ops """ is_training = True # Shuffle train files # train_file_idxs = np.arange(0, len(TRAIN_FILES)) # np.random.shuffle(train_file_idxs) train_files_idx = np.arange(0, CURR_DATA.shape[0]) np.random.shuffle(train_files_idx) CURR_DATA, CURR_LABELS = CURR_DATA[ train_files_idx, :, :], CURR_LABELS[train_files_idx] # for fn in range(len(TRAIN_FILES)): # log_string('----' + str(fn) + '-----') # current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]]) # current_data = current_data[:,0:NUM_POINT,:] # current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) # current_label = np.squeeze(current_label) file_size = CURR_DATA.shape[0] num_batches = file_size // BATCH_SIZE total_correct = 0 total_seen = 0 loss_sum = 0 for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx + 1) * BATCH_SIZE # Augment batched point clouds by rotation and jittering rotated_data = provider.rotate_point_cloud( CURR_DATA[start_idx:end_idx, :, :]) jittered_data = provider.jitter_point_cloud(rotated_data) feed_dict = { ops['pointclouds_pl']: jittered_data, ops['labels_pl']: CURR_LABELS[start_idx:end_idx], ops['is_training_pl']: is_training, } summary, step, _, loss_val, pred_val = sess.run([ ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred'] ], feed_dict=feed_dict) train_writer.add_summary(summary, step) pred_val = np.argmax(pred_val, 1) correct = np.sum(pred_val == CURR_LABELS[start_idx:end_idx]) total_correct += correct total_seen += BATCH_SIZE loss_sum += loss_val log_string('mean loss: %f' % (loss_sum / float(num_batches))) log_string('accuracy: %f' % (total_correct / float(total_seen)))
def get_example(self, i): if self.augment: rotated_data = provider.rotate_point_cloud(self.data[i:i + 1, :, :]) jittered_data = provider.jitter_point_cloud(rotated_data) point_data = jittered_data[0] else: point_data = self.data[i] point_data = np.transpose(point_data.astype(np.float32), (1, 0))[:, :, None] return point_data, self.label[i]
def train_one_epoch(config, sess, ops, epoch): is_training = True # Shuffle train files train_file_idxs = np.arange(0, len(TRAIN_FILES)) np.random.shuffle(train_file_idxs) for fn in range(len(TRAIN_FILES)): current_data, current_label = provider.loadDataFile( TRAIN_FILES[train_file_idxs[fn]]) current_data = current_data[:, 0:config.num_points, :] current_data, current_label, _ = provider.shuffle_data( current_data, np.squeeze(current_label)) current_label = np.squeeze(current_label) file_size = current_data.shape[0] num_batches = file_size // config.batch_size total_correct = 0 total_seen = 0 losses = [] for batch_idx in range(num_batches): start_idx = batch_idx * config.batch_size end_idx = (batch_idx + 1) * config.batch_size # Augment batched point clouds by rotation and jittering rotated_data = provider.rotate_point_cloud( current_data[start_idx:end_idx, :, :]) jittered_data = provider.jitter_point_cloud(rotated_data) feed_dict = { ops['pointclouds_pl']: jittered_data, ops['labels_pl']: current_label[start_idx:end_idx], ops['is_training_pl']: is_training, } step, _, loss_val, pred_val = sess.run( [ops['step'], ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict) pred_val = np.argmax(pred_val, 1) correct = np.sum(pred_val == current_label[start_idx:end_idx]) total_correct += correct total_seen += config.batch_size losses.append(loss_val) if batch_idx % max(config.train_log_frq / config.batch_size, 1) == 0: acc = total_correct / float(total_seen) loss = np.mean(losses) losses = [] log( config.log_file, 'TRAINING EPOCH {} - accuracy: {} loss: {}'.format( epoch, acc, loss)) LOSS_LOGGER.log(loss, epoch, "train_loss") ACC_LOGGER.log(acc, epoch, "train_accuracy")
def _augment_batch_data(self, batch_data): if self.normal_channel: rotated_data = provider.rotate_point_cloud_with_normal(batch_data) rotated_data = provider.rotate_perturbation_point_cloud_with_normal(rotated_data) else: rotated_data = provider.rotate_point_cloud(batch_data) rotated_data = provider.rotate_perturbation_point_cloud(rotated_data) jittered_data = provider.random_scale_point_cloud(rotated_data[:,:,0:3]) jittered_data = provider.shift_point_cloud(jittered_data) jittered_data = provider.jitter_point_cloud(jittered_data) rotated_data[:,:,0:3] = jittered_data return provider.shuffle_points(rotated_data)
def train_one_epoch(sess, ops, train_writer): """ ops: dict mapping from string to tf ops """ is_training = True # Shuffle train files train_file_idxs = np.arange(0, len(TRAIN_FILES)) np.random.shuffle(train_file_idxs) loss_sum = 0 total_seen = 0 for fn in range(len(TRAIN_FILES)): #for fn in range(1): #use only first file for less data #log_string('----' + str(fn) + '-----') current_data, current_label = provider.loadDataFile( TRAIN_FILES[train_file_idxs[fn]]) #current_data, current_label = provider.loadDataFile(TRAIN_FILES[0]) current_data = current_data[:, 0:NUM_POINT, :] current_data, current_label, _ = provider.shuffle_data( current_data, np.squeeze(current_label)) current_label = np.squeeze(current_label) file_size = current_data.shape[0] num_batches = file_size // BATCH_SIZE total_correct = 0 current_data_orig = np.copy(current_data) #sort the goal pointcloud for i in range(len(current_data_orig)): current_data_orig[i] = current_data_orig[i][np.lexsort( np.fliplr(current_data_orig[i]).T)] for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx + 1) * BATCH_SIZE # Augment batched point clouds by rotation and jittering rotated_data = provider.rotate_point_cloud( current_data[start_idx:end_idx, :, :]) #jittered_data = provider.jitter_point_cloud(current_data[start_idx:end_idx, :, :]) #jittered_data = current_data[start_idx:end_idx, :, :] jittered_data = rotated_data feed_dict = { ops['pointclouds_pl']: jittered_data, ops['goal_pcs']: current_data_orig[start_idx:end_idx, :, :], ops['labels_pl']: current_label[start_idx:end_idx], ops['is_training_pl']: is_training, } summary, step, _, loss_val, pred_val, encoding = sess.run( [ ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred'], ops['enc'] ], feed_dict=feed_dict) train_writer.add_summary(summary, step)
def train_one_epoch(): # Shuffle training files to vary the order of training files(hdf5) at each epoch train_file_idxs = np.arange(0, len(TRAIN_FILES)) np.random.shuffle(train_file_idxs) for fn in range(len(TRAIN_FILES)): log_string('----' + str(fn) + '-----', log_file) current_data, current_label = provider.loadDataFile( TRAIN_FILES[train_file_idxs[fn]]) current_data = current_data[:, 0:NUM_POINT, :] current_data, current_label, _ = provider.shuffle_data( current_data, np.squeeze(current_label)) current_label = np.squeeze(current_label) #sess.run(iterator.initializer, feed_dict={data_pl: current_data, label_pl: current_label}) total_num_samples = current_data.shape[0] num_batches = total_num_samples // BATCH_SIZE total_correct = 0 total_seen = 0 all_total_loss = 0 all_classify_loss = 0 for batch_idx in range(num_batches): total_seen += BATCH_SIZE start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx + 1) * BATCH_SIZE rotated_data = provider.rotate_point_cloud( current_data[start_idx:end_idx]) jittered_data = provider.jitter_point_cloud(rotated_data) current_batch_data = jittered_data current_batch_label = current_label[start_idx:end_idx] # display_point(current_batch_data[0], 127 * np.ones_like(current_batch_data[0])) _, step, total_loss_train, classify_loss_train, mat_diff_loss_train, summary_train, logits_train = \ sess.run([optim_op, batch, total_loss_ts, classify_loss_ts, mat_diff_loss_ts, merged_summary, logits_ts], feed_dict={pts_pl: current_batch_data, labels_pl: current_batch_label, is_training_pl: True, keepprob_pl: 0.7}) train_writer.add_summary(summary_train, step) all_total_loss += total_loss_train all_classify_loss += classify_loss_train total_correct += np.sum( np.argmax(logits_train, 1) == current_batch_label) log_string( 'mean total loss: {:.4f}'.format(all_total_loss / float(num_batches)), log_file) log_string( 'mean classify loss: {:.4f}'.format( all_classify_loss / float(num_batches)), log_file) log_string( 'accuracy: {:.4f}'.format(total_correct / float(total_seen)), log_file)
def train_one_epoch(sess, ops, train_writer): """ ops: dict mapping from string to tf ops """ is_training = True # Shuffle train files train_file_idxs = np.arange(0, len(TRAIN_FILES)) np.random.shuffle(train_file_idxs) for fn in range(len(TRAIN_FILES)): log_string('----' + str(fn) + '-----') current_data, current_label = provider.loadDataFile( os.path.join('../data/modelnet40_ply_hdf5_2048/', TRAIN_FILES[train_file_idxs[fn]])) current_data = current_data[:, 0:NUM_POINT, :] current_data, current_label = provider.shuffle_data( current_data, np.squeeze(current_label)) current_label = np.squeeze(current_label) file_size = current_data.shape[0] num_batches = file_size // BATCH_SIZE total_correct = 0 total_seen = 0 loss_sum = 0 for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx + 1) * BATCH_SIZE # rotation rotated_data = provider.rotate_point_cloud( current_data[start_idx:end_idx, :, :]) # z rotation feed_dict = { ops['pointclouds_pl']: rotated_data, ops['labels_pl']: current_label[start_idx:end_idx], ops['is_training_pl']: is_training, } summary, step, _, loss_val, pred_val = sess.run( [ ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['prediction_op'] ], feed_dict=feed_dict) train_writer.add_summary(summary, step) correct = np.sum(pred_val == current_label[start_idx:end_idx]) total_correct += correct total_seen += BATCH_SIZE loss_sum += loss_val log_string('mean loss: %f' % (loss_sum / float(num_batches))) log_string('accuracy: %f' % (total_correct / float(total_seen)))
def train_one_epoch(sess, ops, train_writer): """ ops: dict mapping from string to tf ops """ is_training = True log_string(str(datetime.now())) #Shuffle data # data_utils.shuffle_points(TRAIN_DATA) #get current data, shuffle and set to numpy array with desired num_point # current_data, current_label = data_utils.get_current_data(TRAIN_DATA, TRAIN_LABELS, NUM_POINT) if (".h5" in TRAIN_FILE): current_data, current_label = data_utils.get_current_data_h5( TRAIN_DATA, TRAIN_LABELS, NUM_POINT) else: current_data, current_label = data_utils.get_current_data( TRAIN_DATA, TRAIN_LABELS, NUM_POINT) current_label = np.squeeze(current_label) num_batches = current_data.shape[0] // BATCH_SIZE total_correct = 0 total_seen = 0 loss_sum = 0 for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx + 1) * BATCH_SIZE # Augment batched point clouds by rotation and jittering rotated_data = provider.rotate_point_cloud( current_data[start_idx:end_idx, :, :]) jittered_data = provider.jitter_point_cloud(rotated_data) feed_dict = { ops['pointclouds_pl']: jittered_data, ops['labels_pl']: current_label[start_idx:end_idx], ops['is_training_pl']: is_training, } summary, step, _, loss_val, pred_val = sess.run([ ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred'] ], feed_dict=feed_dict) train_writer.add_summary(summary, step) pred_val = np.argmax(pred_val, 1) correct = np.sum(pred_val == current_label[start_idx:end_idx]) total_correct += correct total_seen += BATCH_SIZE loss_sum += loss_val log_string('mean loss: %f' % (loss_sum / float(num_batches))) log_string('accuracy: %f' % (total_correct / float(total_seen)))
def provide_data(sess2): BATCH_SIZE = FLAGS.batch_size current_data, current_label = provider.loadDataFile('./data/h5/traincompleteall.h5') current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) current_label = np.squeeze(current_label.astype(int)) partial_data, partial_label = provider.loadDataFile('./data/h5/trainall.h5') partial_data, partial_label, _ = provider.shuffle_data(partial_data, np.squeeze(partial_label)) partial_label = np.squeeze(partial_label.astype(int)) file_size = current_data.shape[0] num_batches = file_size // BATCH_SIZE pointclouds_pl = sess2.graph.get_tensor_by_name('Placeholder:0') labels_pl = sess2.graph.get_tensor_by_name('Placeholder_1:0') is_train_pl = sess2.graph.get_tensor_by_name('Placeholder_2:0') for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx + 1) * BATCH_SIZE # mantipulation data rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :]) jittered_data = provider.jitter_point_cloud(rotated_data) # mantipulate labe one_hot_labe1 = np.zeros((BATCH_SIZE, 41)) one_hot_labe1[np.arange(BATCH_SIZE), current_label[start_idx:end_idx]] = 1 one_hot_labe2 = np.zeros((BATCH_SIZE, 41)) one_hot_labe2[np.arange(BATCH_SIZE), partial_label[start_idx:end_idx]] = 1 is_train = False feed_dict = {pointclouds_pl: provider.rotate_point_cloud(partial_data[start_idx:end_idx, :, :]), labels_pl: partial_label[start_idx:end_idx], is_train_pl: is_train} G_features = sess2.run(sess2.graph.get_tensor_by_name('maxpool/maxpool:0'), feed_dict=feed_dict) #out['data'] = jittered_data #out['labe'] = one_hot_labe yield jittered_data, one_hot_labe1, current_label[start_idx:end_idx], np.squeeze(G_features), one_hot_labe2, partial_label[start_idx:end_idx]
def get_batch(self, start_idx, end_idx): if self.is_train: # Augment batched point clouds by rotation and jittering rotated_data = provider.rotate_point_cloud(self.current_data[start_idx:end_idx, :, :]) jittered_data = provider.jitter_point_cloud(rotated_data) label = self.current_label[start_idx:end_idx] return mx.io.DataBatch(data=[nd.array(jittered_data)], label=[nd.array(label)], provide_data=self.provide_data, provide_label=self.provide_label) else: data = self.current_data[start_idx:end_idx, :, :] label = self.current_label[start_idx:end_idx] return mx.io.DataBatch(data=[nd.array(data)], label=[nd.array(label)], provide_data=self.provide_data, provide_label=self.provide_label)
def augment_batch_data(batch_data): if FLAGS.normal: rotated_data = provider.rotate_point_cloud_with_normal(batch_data) rotated_data = provider.rotate_perturbation_point_cloud_with_normal( rotated_data) else: rotated_data = provider.rotate_point_cloud(batch_data) rotated_data = provider.rotate_perturbation_point_cloud(rotated_data) jittered_data = provider.random_scale_point_cloud(rotated_data[:, :, 0:3]) jittered_data = provider.shift_point_cloud(jittered_data) jittered_data = provider.jitter_point_cloud(jittered_data) rotated_data[:, :, 0:3] = jittered_data return rotated_data
def train_one_epoch(sess, ops, train_writer): """ ops: dict mapping from string to tf ops """ is_training = True # Shuffle train files train_file_idxs = np.arange(0, len(TRAIN_FILES)) np.random.shuffle(train_file_idxs) for fn in range(len(TRAIN_FILES)): log_string('----' + str(fn) + '-----') current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]]) current_data = current_data[:,0:NUM_POINT,:] current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) current_label = np.squeeze(current_label) file_size = current_data.shape[0] num_batches = file_size // BATCH_SIZE total_correct = 0 total_seen = 0 loss_sum = 0 for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx+1) * BATCH_SIZE # Augment batched point clouds by rotation and jittering rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx,:,:,]) jittered_data = provider.jitter_point_cloud(rotated_data) if CROPOUT_TYPE == 'bounding_sphere': cropped_data = provider.cropout_point_cloud(jittered_data, FLAGS.max_trans_dist, random_trans_dist=True, close=FLAGS.close) elif CROPOUT_TYPE == 'bubble': cropped_data = provider.bubble_cropout(jittered_data, FLAGS.max_trans_dist, random_bubble_radius=False, close=FLAGS.close) else: print("cropeout_type does not exist") return feed_dict = {ops['pointclouds_pl']: cropped_data, ops['labels_pl']: current_label[start_idx:end_idx], ops['is_training_pl']: is_training,} summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict) train_writer.add_summary(summary, step) pred_val = np.argmax(pred_val, 1) correct = np.sum(pred_val == current_label[start_idx:end_idx]) total_correct += correct total_seen += BATCH_SIZE loss_sum += loss_val log_string('mean loss: %f' % (loss_sum / float(num_batches))) log_string('accuracy: %f' % (total_correct / float(total_seen)))
def train_one_epoch(sess, ops, train_writer): """ ops: dict mapping from string to tf ops """ is_training = True # Shuffle train files train_file_idxs = np.arange(0, len(TRAIN_FILES)) np.random.shuffle(train_file_idxs) for fn in range(len(TRAIN_FILES)): log_string('----' + str(fn) + '-----') current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]]) current_data = current_data[:,0:NUM_POINT,:] current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) current_label = np.squeeze(current_label) file_size = current_data.shape[0] num_batches = file_size // BATCH_SIZE total_correct = 0 total_seen = 0 loss_sum = 0 for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx+1) * BATCH_SIZE # Augment batched point clouds by rotation and jittering rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :]) jittered_data = provider.jitter_point_cloud(rotated_data) feed_dict = {ops['pointclouds_pl']: jittered_data, ops['labels_pl']: current_label[start_idx:end_idx], ops['is_training_pl']: is_training,} summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict) train_writer.add_summary(summary, step) pred_val = np.argmax(pred_val, 1) correct = np.sum(pred_val == current_label[start_idx:end_idx]) total_correct += correct total_seen += BATCH_SIZE loss_sum += loss_val log_string('mean loss: %f' % (loss_sum / float(num_batches))) log_string('accuracy: %f' % (total_correct / float(total_seen)))