Exemplo n.º 1
0
def load_data(
    train_files,
    test_files,
    num_points=1024,
    shuffle=False,
    rotate=False,
    rotate_val=False,
):
    data = []
    label = []

    train_file_num = np.arange(len(train_files))

    for file_num in train_file_num:
        current_data, current_label = provider.loadDataFile(
            train_files[file_num])
        current_data = current_data[:, :num_points, :]

        if shuffle:
            current_data, current_label, _ = provider.shuffle_data(
                current_data, np.squeeze(current_label))
            current_label = np.expand_dims(current_label, axis=-1)

        data.append(current_data)
        label.append(current_label)

    data = np.concatenate(data, axis=0)
    label = np.concatenate(label, axis=0)

    test_data = []
    test_label = []

    test_file_num = np.arange(len(test_files))

    for file_num in test_file_num:
        current_data, current_label = provider.loadDataFile(
            train_files[file_num])
        current_data = current_data[:, :num_points, :]

        if shuffle:
            current_data, current_label, _ = provider.shuffle_data(
                current_data, np.squeeze(current_label))
            current_label = np.expand_dims(current_label, axis=-1)

        test_data.append(current_data)
        test_label.append(current_label)

    test_data = np.concatenate(test_data, axis=0)
    test_label = np.concatenate(test_label, axis=0)

    if rotate:
        data = rotate_point_cloud(data)
    if rotate_val:
        test_data = rotate_point_cloud(test_data)

    return (data, label), (test_data, test_label)
def data_aug(method=0):
    ''' 输入参数
        method:0--------增加噪声
               1--------旋转物体
    '''
    object_data = np.zeros((1, 2048, 3))
    object_labels = np.zeros((1), np.int32)
    for fn in range(len(TRAIN_FILES)):
        print('loading the file' + str(fn))
        current_data, current_label = provider.loadDataFile(TRAIN_FILES[fn])
        for label in range(current_label.shape[0]):
            if current_label[label] == 10 or current_label[
                    label] == 38 or current_label[label] == 32:
                object_data = np.vstack(
                    (object_data,
                     current_data[label, :, :].reshape(1, 2048, 3)))
                object_labels = np.vstack(
                    (object_labels, current_label[label]))

    object_data = np.delete(object_data, 0, axis=0)
    object_labels = np.delete(object_labels, 0, axis=0)
    #加噪后的数据
    if method == 0:
        object_data = provider.jitter_point_cloud(object_data,
                                                  sigma=0.001,
                                                  clip=0.005)
    elif method == 1:
        need_to_rotate_labels = object_labels
        # 旋转6个角度
        for i in range(6):
            print(i)
            rotation_angle = i * (np.pi / 3.)
            rotate_data = provider.rotate_point_cloud_by_angle(
                object_data, rotation_angle)
            object_data = np.vstack((object_data, rotate_data))
            object_labels = np.vstack((object_labels, need_to_rotate_labels))

    for i in range(len(TRAIN_FILES)):
        current_data, current_label = provider.loadDataFile(TRAIN_FILES[fn])
        object_data = np.vstack((object_data, current_data))
        object_labels = np.vstack((object_labels, current_label))
        object_data, object_labels, _ = provider.shuffle_data(
            object_data, object_labels)
        n_object = object_data.shape[0]
        num_each_file = n_object // 6
        for i in range(6):
            f = h5py.File(data_dir + '/object_aug_rotate' + str(i) + '.h5',
                          'w')
            f['data'] = object_data[(i * num_each_file):(i + 1) *
                                    num_each_file, :, :]
            f['label'] = object_labels[i * (num_each_file):(i + 1) *
                                       num_each_file]
            f.close()
Exemplo n.º 3
0
def load_data(train_filepath, test_filepath):
    train_x, train_y = loadDataFile(train_filepath)
    test_x, test_y = loadDataFile(test_filepath)

    train_loader = torch.utils.data.DataLoader(dataset=MyDataset(
        train_x, train_y),
                                               batch_size=BATCH_SIZE,
                                               shuffle=True,
                                               drop_last=True)
    test_loader = torch.utils.data.DataLoader(dataset=MyDataset(
        test_x, test_y),
                                              batch_size=BATCH_SIZE,
                                              shuffle=False,
                                              drop_last=True)
    return train_loader, test_loader
Exemplo n.º 4
0
def eval_one_epoch(sess, ops):
    """ ops: dict mapping from string to tf ops """
    start_time = time.time()

    is_training_sampler = False
    is_training_classifier = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]

    for fn in range(len(TEST_FILES)):
        log_string("----" + str(fn) + "-----")
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:, 0:NUM_IN_POINTS, :]
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            feed_dict = {
                ops["pointclouds_pl"]: current_data[start_idx:end_idx, :, :],
                ops["labels_pl"]: current_label[start_idx:end_idx],
                ops["is_training_sampler_pl"]: is_training_sampler,
                ops["is_training_classifier_pl"]: is_training_classifier,
            }
            summary, step, loss_val, pred_val = sess.run(
                [ops["merged"], ops["step"], ops["loss"], ops["pred"]],
                feed_dict=feed_dict,
            )
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val * BATCH_SIZE
            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += pred_val[i - start_idx] == l

    log_string("eval mean loss: %f" % (loss_sum / float(total_seen)))
    log_string("eval accuracy: %f" % (total_correct / float(total_seen)))
    log_string(
        "eval avg class acc: %f"
        % (
            np.mean(
                np.array(total_correct_class)
                / np.array(total_seen_class, dtype=np.float)
            )
        )
    )
    log_string("total_seen: %f" % (total_seen))

    duration = time.time() - start_time
    log_string("eval duration (minutes): %.4f" % (duration / 60.0))
def train_adversarial_one_epoch(sess, ops, train_writer, need_label, target_label):
    is_training = False
    # Merge all train file
    #train_file_idxs = np.arange(0,len(TRAIN_FILES))
    all_train_data, all_train_label = provider.loadDataFile(TRAIN_FILES[0])#None, None

    #for fn in range(len(TRAIN_FILES)):
    #    tmp_data, tmp_label = provider.loadDataFile(TRAIN_FILES[fn])
    #    all_train_data = merge(all_train_data,tmp_data)
    #    all_train_label = merge(all_train_label,tmp_label)

    need_data = all_train_data[np.reshape(all_train_label == need_label,(-1)),...]
    need_data = need_data[:BATCH_SIZE,:NUM_POINT,:]

    target_label = np.tile(np.array([target_label]),[BATCH_SIZE,1])
    target_label = np.squeeze(target_label)

    loss_sum = 0
    feed_dict = {ops['pointclouds_pl']: need_data,
                 ops['labels_pl']: target_label,
                 ops['is_training_pl']: is_training,}

    summary, step, _, loss_val, pert_loss_val, pred_val, pert_vec ,lr_val= sess.run([ops['merged'], ops['step'],ops['train_op'], ops['loss'], ops['pert_loss'], ops['pred'], ops['pert_vec'], ops['lr']], feed_dict=feed_dict)
    train_writer.add_summary(summary, step)
    pred_val = np.argmax(pred_val, 1)
    correct = np.sum(pred_val == target_label)
    loss_sum += loss_val

    log_string('mean loss: %f' % (loss_sum / 1.0))
    log_string('pert loss: %f' % (pert_loss_val / 1.0))
    log_string('accuracy: %f' % (correct / float(BATCH_SIZE)))
    log_string(' lr: %f' % (lr_val / 1.0))
    log_string(' batch: %f' % (step / 1.0))
    return need_data, pert_vec
Exemplo n.º 6
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)

    myGen = provider.myDataGenerator(2048)

    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        print()
        current_data, current_label = provider.loadDataFile(myGen)
        current_data = current_data[:, 0:NUM_POINT, :]
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        total_correct = 0
        total_seen = 0
        loss_sum = 0

        for batch_idx in tqdm(range(num_batches)):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            # Augment batched point clouds by rotation and jittering
            rotated_data = current_data[start_idx:end_idx, :, :]
            # rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
            # jittered_data = provider.jitter_point_cloud(rotated_data)
            # jittered_data = provider.random_scale_point_cloud(jittered_data)
            # jittered_data = provider.rotate_perturbation_point_cloud(jittered_data)
            # jittered_data = provider.shift_point_cloud(jittered_data)

            feed_dict = {
                ops['pointclouds_pl']: rotated_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
            }
            summary, step, _, loss_val, pred_val = sess.run(
                [
                    ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                    ops['pred']
                ],
                feed_dict=feed_dict)
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val

            #print(loss_sum)

        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
Exemplo n.º 7
0
def validate(sess2, test_writer, epoch):
    log_string('validate VVVVVVVVVVVVVVVVVVVVVVVVVV')
    val_data, val_label = provider.loadDataFile(LOG_DIR + '/for_validate.h5')
    pointclouds_pl = sess2.graph.get_tensor_by_name('Placeholder:0')
    labels_pl = sess2.graph.get_tensor_by_name('Placeholder_1:0')
    is_train_pl = sess2.graph.get_tensor_by_name('Placeholder_2:0')

    num_batches = val_data.shape[0] // BATCH_SIZE
    acc = 0
    los = 0
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        feed_dict = {pointclouds_pl: val_data[start_idx:end_idx, :, :],
                     labels_pl: val_label[start_idx:end_idx],
                     is_train_pl: False, }
        accuracy, loss = sess2.run([sess2.graph.get_tensor_by_name('div:0'),
                                  sess2.graph.get_tensor_by_name('add:0')],
                                  feed_dict=feed_dict)
        acc += accuracy
        los += loss

    summary = tf.Summary(value=[tf.Summary.Value(tag='accuracy', simple_value=acc)])
    test_writer.add_summary(summary, epoch)
    summary = tf.Summary(value=[tf.Summary.Value(tag='loss', simple_value=los)])
    test_writer.add_summary(summary, epoch)
Exemplo n.º 8
0
def eval_all_pointcloud(sess, ops, num_votes=1, topk=1):
    '''
    Code to change....
    '''
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    global_features = []
    labels = np.array([])

    for fn in range(len(TEST_FILES)):
        log_string('----' + str(fn) + '----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_label = np.squeeze(current_label)
        labels = np.append(labels, current_label)
        print(labels)
        print(current_data.shape)

        file_size = current_data.shape[0]
        print(file_size)

        for pc_idx in range(file_size):
            #print(id_count)
            for vote_idx in range(num_votes):
                rotated_data = provider.rotate_point_cloud_by_angle(
                    current_data[pc_idx:pc_idx + 1, :, :],
                    vote_idx / float(num_votes) * np.pi * 2)

                feed_dict = {
                    ops['pointclouds_pl']: rotated_data,
                    ops['labels_pl']: current_label[pc_idx:pc_idx + 1],
                    ops['is_training_pl']: is_training
                }

                loss_val, net_val = sess.run([ops['loss'], ops['net']],
                                             feed_dict=feed_dict)

                global_features.append(np.squeeze(net_val['pc_maxpool']))

    global_features = np.array(global_features)
    print "global_features :: ", global_features.shape
    print "labels :: ", labels.shape

    global_features = tsne.tsne(global_features, 2, global_features.shape[1])
    Plot.scatter(global_features[:, 0],
                 global_features[:, 1],
                 30,
                 c=4 * labels,
                 cmap='jet')

    for i, txt in enumerate(labels):
        if i % 10 == 0:
            Plot.annotate(txt, (global_features[i, 0], global_features[i, 1]))

    Plot.show()
 def loadModelNet40(self):
     file_path_name = askopenfilenames()
     self.path_labelText.set(file_path_name[0].split('/')[-1])
     self.display_count = 0
     # ModelNet40 official train/test split
     TRAIN_FILES = provider.getDataFiles(file_path_name[0])
     # Shuffle train files
     train_file_idxs = np.arange(0, len(TRAIN_FILES))
     np.random.shuffle(train_file_idxs)
     for fn in range(len(TRAIN_FILES)):
         print("目前讀取的檔案是: %s" % (TRAIN_FILES[train_file_idxs[fn]], ))
         current_data, current_label = provider.loadDataFile(
             TRAIN_FILES[train_file_idxs[fn]])
         current_data, current_label, _ = provider.shuffle_data(
             current_data, np.squeeze(current_label))
         if fn == 0:
             point_cloud_collection = current_data
         else:
             point_cloud_collection = np.vstack(
                 (point_cloud_collection, current_data))
     self.POINT_CLOUD_SET_LIST = point_cloud_collection
     self.idxs = np.arange(0, len(self.POINT_CLOUD_SET_LIST))
     print(self.idxs)
     np.random.shuffle(self.idxs)
     # Load model 進來後停止使用此button
     self.btn_loadModelNet40.configure(state='disabled')
     self.btn_loadMyOwnData.configure(state='normal')
     self.Confirm_btn.configure(state='normal')
     self.Confirm_btn_own_data.configure(state='disabled')
Exemplo n.º 10
0
    def __init__(self, train_test):
        all_files = provider.getDataFiles(
            'indoor3d_sem_seg_hdf5_data/all_files.txt')
        room_filelist = [
            line.rstrip()
            for line in open('indoor3d_sem_seg_hdf5_data/room_filelist.txt')
        ]
        self.data = []
        self.label = []
        for h5_filename in all_files:
            data_batch, label_batch = provider.loadDataFile(h5_filename)
            self.data.append(data_batch)
            self.label.append(label_batch)
        self.data = np.concatenate(self.data, 0)
        self.label = np.concatenate(self.label, 0)
        print(self.data.shape)
        print(self.label.shape)

        test_area = 'Area_' + str(6)
        train_idxs = []
        test_idxs = []
        for i, room_name in enumerate(room_filelist):
            if test_area in room_name:
                test_idxs.append(i)
            else:
                train_idxs.append(i)
        if train_test == "test":
            self.data = self.data[test_idxs, ...]
            self.label = self.label[test_idxs]
        else:
            self.data = self.data[train_idxs, ...]
            self.label = self.label[train_idxs]
        self.label = torch.tensor(self.label, dtype=torch.long)
Exemplo n.º 11
0
def forward_batch2get_min_max(pb_file, input_node, inp_quant, num=10):
    maxes = {}
    mins = {}

    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)

    for fn in range(len(TRAIN_FILES)):
        print('----' + str(fn) + '----')
        current_data, current_label = provider.loadDataFile(
            TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_label = np.squeeze(current_label)
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label))
        print(current_data.shape)

        file_size = current_data.shape[0]
        print(file_size)

        num_votes = 1
        for f_idx in range(file_size):
            if f_idx > num:
                break
            for vote_idx in range(num_votes):

                data = current_data[f_idx:f_idx + 1, :, :]
                label = current_label[f_idx]
                print(label, end=',')
                forward_graph(pb_file, input_node, data, inp_quant, maxes,
                              mins)

    return mins, maxes
Exemplo n.º 12
0
def provide_data():
    while (True):
        BATCH_SIZE = 32
        current_data, current_label = provider.loadDataFile(
            './data/modelnet40_ply_hdf5_2048/train_all.h5')
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            # mantipulation data
            rotated_data = provider.rotate_point_cloud(
                current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)
            # mantipulate labe
            one_hot_labe = np.zeros((BATCH_SIZE, 40))
            one_hot_labe[np.arange(BATCH_SIZE),
                         current_label[start_idx:end_idx]] = 1

            #out['data'] = jittered_data
            #out['labe'] = one_hot_labe
            yield jittered_data, one_hot_labe
Exemplo n.º 13
0
def load(files, points=1024, shuffle=False, rotate=False):
    data = []
    label = []

    file_num = np.arange(len(files))

    for file_num in file_num:
        current_data, current_label = provider.loadDataFile(files[file_num])
        current_data = current_data[:, :points, :]

        if shuffle:
            current_data, current_label, _ = provider.shuffle_data(
                current_data, np.squeeze(current_label)
            )
            current_label = np.expand_dims(current_label, axis=-1)

        data.append(current_data)
        label.append(current_label)

    data = np.concatenate(data, axis=0)
    label = np.concatenate(label, axis=0)

    if rotate:
        data = rotate_point_cloud(data)

    return (data, label)
Exemplo n.º 14
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)

    for fn in range(len(TRAIN_FILES)):
        #for fn in range(1): #use only first file for less data
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(
            TRAIN_FILES[train_file_idxs[fn]])
        #current_data, current_label = provider.loadDataFile(TRAIN_FILES[0])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        total_correct = 0
        total_seen = 0
        loss_sum = 0
        current_data_orig = np.copy(current_data)
        #sort the goal pointcloud
        for i in range(len(current_data_orig)):
            current_data_orig[i] = current_data_orig[i][np.lexsort(
                np.fliplr(current_data_orig[i]).T)]

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            # Augment batched point clouds by rotation and jittering
            #rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
            #jittered_data = provider.jitter_point_cloud(current_data[start_idx:end_idx, :, :])
            jittered_data = current_data[start_idx:end_idx, :, :]
            feed_dict = {
                ops['pointclouds_pl']: jittered_data,
                ops['goal_pcs']: current_data_orig[start_idx:end_idx, :, :],
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
            }
            summary, step, _, loss_val, pred_val, encoding = sess.run(
                [
                    ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                    ops['pred'], ops['enc']
                ],
                feed_dict=feed_dict)
            train_writer.add_summary(summary, step)
            #total_correct += correct
            #total_seen += BATCH_SIZE
            loss_sum = loss_sum + np.sum(
                np.square(
                    np.subtract(pred_val,
                                current_data_orig[start_idx:end_idx, :, :])))

        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
Exemplo n.º 15
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))  # 每一次的5个train文件的顺序都是不一样的
    np.random.shuffle(train_file_idxs)

    for fn in range(len(TRAIN_FILES)):  # 对每一个train文件
        log_string('----train file' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(
            TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:, 0:
                                    NUM_POINT, :]  # 采样1024个点,current代表这个文件中所有的点云
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label))  # 每个点云之间的顺序被打乱
        current_label = np.squeeze(current_label)  # 移除数组中单一维度的数据

        file_size = current_data.shape[0]  # 点云的总数
        num_batches = file_size // BATCH_SIZE  # 需要几个batch

        total_correct = 0
        total_seen = 0
        loss_sum = 0

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(
                current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)
            jittered_data = provider.random_scale_point_cloud(jittered_data)
            jittered_data = provider.rotate_perturbation_point_cloud(
                jittered_data)
            jittered_data = provider.shift_point_cloud(jittered_data)

            feed_dict = {
                ops['pointclouds_pl']: jittered_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
            }  # feed_dict的key一定是place_holder
            summary, step, _, loss_val, pred_val = sess.run(
                [
                    ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                    ops['pred']
                ],
                feed_dict=feed_dict)
            print("--train file:{}, batch_idx:{},step:{}".format(
                str(fn), str(batch_idx), str(step)))
            train_writer.add_summary(summary, step)  # 只有train才保存训练过程的曲线
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val

        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
Exemplo n.º 16
0
def eval_one_epoch(sess, ops, test_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    #np.random.shuffle(train_file_idxs)

    for fn in range(1):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(
            TRAIN_FILES[train_file_idxs[3]])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        total_correct = 0
        total_seen = 0
        loss = 0

        for batch_idx in range(1):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE
            orig_pcs = current_data[start_idx:end_idx, :, :]
            orig_pcs = np.asarray([pc3nr1, pc3nr2])
            # Augment batched point clouds by rotation and jittering
            #rotated_data = provider.rotate_point_cloud(orig_pcs)
            jittered_data = provider.jitter_point_cloud(orig_pcs)
            used_pcs = jittered_data

            feed_dict = {
                ops['pointclouds_pl']: orig_pcs,
                ops['goal_pcs']: orig_pcs,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
            }
            summary, step, _, loss_val, pred_val = sess.run(
                [
                    ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                    ops['pred']
                ],
                feed_dict=feed_dict)
            test_writer.add_summary(summary, step)
            #pred_val = tf.convert_to_tensor(pred_val)
            #print(pred_val.get_shape().as_list())

            #used_pcs_tensor = tf.convert_to_tensor(used_pcs)
            #print(used_pcs_tensor.get_shape().as_list())
            #loss = tf.reduce_sum(tf.square(tf.subtract(pred_val, used_pcs_tensor)))
            #loss = tf.reduce_sum(tf.square(tf.subtract(pred_val, used_pcs_tensor)))
            plotPC(used_pcs[0])
            plotPC(pred_val[0])
            plotPC(used_pcs[1])
            plotPC(pred_val[1])
            loss = np.sum(np.square(np.subtract(pred_val, orig_pcs)))
            #print(loss.get_shape().as_list())
            #tf.Print(loss, [loss])
        log_string('mean loss: %f32' % loss)
Exemplo n.º 17
0
def train_one_epoch(sess, ops, train_writer, adv=True):
    """ ops: dict mapping from string to tf ops """
    is_training = True
    
    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)
    
    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:,0:NUM_POINT,:]  # [:,0:NUM_POINT,:]
        
        current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))            
        current_label = np.squeeze(current_label)
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        
        total_correct = 0
        total_seen = 0
        loss_sum = 0
       
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            
            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)

            if adv == True:
                adv_data = perturb(jittered_data,current_label[start_idx:end_idx],sess,ops,EPS,ADV_STEP,EPS/10)
                feed_dict_adv = {ops['pointclouds_pl']: adv_data,
                             ops['labels_pl']: current_label[start_idx:end_idx],
                             ops['is_training_pl']: is_training, ops['is_outer']: True}

                summary, step, _, loss_val, pred_val, bn_decay = sess.run([ops['merged'], ops['step'],
                    ops['train_op'], ops['loss'], ops['pred'], ops['bn_decay']], feed_dict=feed_dict_adv)
            else:
            
                feed_dict = {ops['pointclouds_pl']: jittered_data,
                             ops['labels_pl']: current_label[start_idx:end_idx],
                             ops['is_training_pl']: is_training,ops['is_outer']: True}

                summary, step, _, loss_val, pred_val, bn_decay = sess.run([ops['merged'], ops['step'],
                        ops['train_op'], ops['loss'], ops['pred'], ops['bn_decay']], feed_dict=feed_dict)


            # log_string('bn_decay: %f' % bn_decay)
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val

        
        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
Exemplo n.º 18
0
def get_data():

    ALL_FILES = provider.getDataFiles(
        'indoor3d_sem_seg_hdf5_data/all_files.txt')
    room_filelist = [
        line.rstrip()
        for line in open('indoor3d_sem_seg_hdf5_data/room_filelist.txt')
    ]

    # Load ALL data
    data_batch_list = []
    label_batch_list = []
    for h5_filename in ALL_FILES:
        data_batch, label_batch = provider.loadDataFile(h5_filename)
        data_batch_list.append(data_batch)
        label_batch_list.append(label_batch)

    test_area = 'Area_' + str(FLAGS.test_area)
    train_idxs = []
    test_idxs = []
    for i, room_name in enumerate(room_filelist):
        if test_area in room_name:
            test_idxs.append(i)
        else:
            train_idxs.append(i)

    data_batches = np.concatenate(data_batch_list, 0)
    label_batches = np.concatenate(label_batch_list, 0)

    train_data = data_batches[train_idxs, ...]
    train_label = label_batches[train_idxs]
    test_data = data_batches[test_idxs, ...]
    test_label = label_batches[test_idxs]

    return train_data, train_label, test_data, test_label
Exemplo n.º 19
0
def eval_my_data(sess,ops,num_votes=1):

    log_string('----'+str("Predict ply_data_test0.h5[0:10]")+'----')
    current_data, current_label = provider.loadDataFile(TEST_FILES[0])
    current_data = current_data[:,0:NUM_POINT,:]    # current_data (2048, 1024, 3)
    current_label = np.squeeze(current_label)       # 去掉维数为1的维度   current_label(2048)

    start_idx=0
    end_idx=10
    vote_idx=0
    is_training = False
    rotated_data = provider.rotate_point_cloud_by_angle(current_data[start_idx:end_idx, :, :],
                                                        vote_idx/float(num_votes) * np.pi * 2)      # B*N*3的点云
    feed_dict = {ops['pointclouds_pl']: rotated_data,
                 ops['labels_pl']: current_label[start_idx:end_idx],
                 ops['is_training_pl']: is_training}
    loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                  feed_dict=feed_dict)

    # 得到预测的类别
    pred_val = np.argmax(pred_val, 1)     # 预测的类别
    print("pred_val:{}".format(pred_val))  # 应该是一个10*1的矩阵
    print("current_label:{}".format(current_label[start_idx:end_idx]))   # 10*1的矩阵

    # 可视化点云并且打印结果
    visAnd_pred(current_data[start_idx:end_idx,:,:],pred_val,current_label[start_idx:end_idx] )

    print("--------------END---------------")
Exemplo n.º 20
0
def test_ensemble(num_votes):
    with tf.Graph().as_default():
        total_pred_first_value = evaluate_first_model(num_votes)
    with tf.Graph().as_default():
        total_pred_second_value = evaluate_second_model(num_votes)
    #ensemble_pred = np.maximum(total_pred_first_value,total_pred_second_value)
    ensemble_pred = (total_pred_first_value + total_pred_second_value) / 2
    #ensemble_pred = total_pred_first_value
    pred_val = np.argmax(ensemble_pred, 1)  #当前所有模型预测结果取最大值
    test_size = pred_val.shape[0] - 1  #减去第一个全0行
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    total_correct = 0
    start = 1
    for fn in range(len(TEST_FILES)):
        log_string('----' + str(fn) + '----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_label = np.squeeze(current_label)
        current_size = current_label.shape[0]
        end = start + current_size
        correct = np.sum(pred_val[start:end] == current_label[0:current_size])
        for i in range(current_size):
            l = current_label[i]
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_val[start + i] == l)
        start = end
        total_correct += correct
        #计算平均类别精确度

    log_string('eval accuracy: %f' % (total_correct / float(test_size)))
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))
def eval_one_epoch_modelnet(sess, ops, test_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]

    for fn in range(len(TEST_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_label = np.squeeze(current_label)

        total_correct, total_seen, loss_sum, total_seen_class, total_correct_class = eval_batches(
            current_data, current_label, sess, ops, is_training, test_writer,
            total_correct, total_seen, loss_sum, total_seen_class,
            total_correct_class)

    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))
Exemplo n.º 22
0
def find_models(category, model, templates, case):
    # model:		No of models to be stored for a particular category.
    # category: 	Name of the category to be stored.
    # templates:	Array having templates (BxNx3)
    # case:			Which files to be used? (test/train)

    if case == 'test':
        FILES = TEST_FILES
    if case == 'train':
        FILES = TRAIN_FILES
    print(FILES)
    count = 0  # Counter to find number of models.
    for train_idx in range(len(
            FILES)):  # Loop over all the training files from ModelNet40 data.
        current_data, current_label = provider.loadDataFile(
            FILES[train_idx])  # Load data of from a file.
        for i in range(current_data.shape[0]):
            if count < model and shapes.index(category) == current_label[i]:
                # import transforms3d.euler as t3d
                # rot = t3d.euler2mat(0*np.pi/1	80, 0*np.pi/180, 90*np.pi/180, 'szyx')
                # templates.append((np.dot(rot, current_data[i].T).T))
                templates.append(
                    current_data[i] / 2.0
                )  # Append data if it belongs to the category and less than given number of models.
                count += 1
    return templates
Exemplo n.º 23
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True
    num_batches = NUM_TRAIN / BATCH_SIZE
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    random.shuffle(TRAIN_KEYS)
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        keys_batch = TRAIN_KEYS[start_idx:end_idx]
        batch_data, batch_label = provider.loadDataFile(keys_batch)
        feed_dict = {
            ops['img_pl']: batch_data,
            ops['labels_pl']: batch_label,
            ops['is_training_pl']: is_training,
        }
        summary, step, _, loss_val, pred_val = sess.run([
            ops['merged'], ops['step'], ops['train_op'], ops['loss'],
            ops['pred']
        ],
                                                        feed_dict=feed_dict)
        train_writer.add_summary(summary, step)
        pred_val = np.argmax(pred_val, 1)
        correct = np.sum(pred_val == batch_label)
        total_correct += correct
        total_seen += BATCH_SIZE
        loss_sum += loss_val

    log_string('mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('accuracy: %f' % (total_correct / float(total_seen)))
Exemplo n.º 24
0
 def load_datafile(self, file_idx):
     self.current_data, self.current_label = provider.loadDataFile(self.files[self.train_file_idxs[file_idx]])
     self.current_data = self.current_data[:, 0:self.num_point, :]
     self.current_data, self.current_label, _ = provider.shuffle_data(self.current_data, np.squeeze(self.current_label))
     self.current_label = np.squeeze(self.current_label)
     file_size = self.current_data.shape[0]
     self.num_batches = file_size // self.batch_size
Exemplo n.º 25
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    # 随机打乱训练数据
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)

    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(
            TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        total_correct = 0
        total_seen = 0
        loss_sum = 0

        # 在一个epoch 中逐个mini-batch训练直至遍历完一遍训练集。计算总分类正确数total_correct和已遍历样本数

        # total_senn,总损失loss_sum.
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            # Augment batched point clouds by rotation and jittering
            # 调用provider中rotate_point_cloud
            rotated_data = provider.rotate_point_cloud(
                current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)
            feed_dict = {
                ops['pointclouds_pl']: jittered_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
            }
            summary, step, _, loss_val, pred_val = sess.run(
                [
                    ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                    ops['pred']
                ],
                feed_dict=feed_dict)
            # 训练,使用 tf 的 session 运行设计的框架,ops['pred'] 为整个网络,feed_dict 为网络提供的数据
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val

        # 记录平均loss,以及平均accuracy。
        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
Exemplo n.º 26
0
def eval_one_epoch(sess, ops, pc_size, topk=1):
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, "pred_label.txt"), "w")
    for fn in range(len(TEST_FILES)):
        filename = os.path.split(TEST_FILES[fn])
        file_path = DATA_PATH + "/" + filename[1]
        current_data, current_label = provider.loadDataFile(file_path)
        current_data = current_data[:, 0:pc_size, :]
        current_label = np.squeeze(current_label)
        print(current_data.shape)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        print(file_size)

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE
            cur_batch_size = end_idx - start_idx

            # Aggregating BEG
            batch_loss_sum = 0  # sum of losses for the batch
            batch_pred_sum = np.zeros(
                (cur_batch_size, NUM_CLASSES))  # score for classes
            batch_pred_classes = np.zeros(
                (cur_batch_size, NUM_CLASSES))  # 0/1 for classes
            feed_dict = {
                ops["pointclouds_pl"]: current_data[start_idx:end_idx, :, :],
                ops["labels_pl"]: current_label[start_idx:end_idx],
                ops["is_training_pl"]: is_training,
            }
            loss_val, pred_val = sess.run([ops["loss"], ops["pred"]],
                                          feed_dict=feed_dict)
            batch_pred_sum += pred_val
            batch_pred_val = np.argmax(pred_val, 1)
            for el_idx in range(cur_batch_size):
                batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
            batch_loss_sum += loss_val * cur_batch_size / float(1)
            pred_val = np.argmax(batch_pred_sum, 1)
            # Aggregating END

            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += cur_batch_size
            loss_sum += batch_loss_sum

            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += pred_val[i - start_idx] == l
                fout.write("%d, %d\n" % (pred_val[i - start_idx], l))

    log_string("sample size: %d, eval accuracy: %f" %
               (pc_size, total_correct / float(total_seen)))
Exemplo n.º 27
0
def eval_one_epoch(sess, ops, eval_writer):
    # Arguments:
    # sess: 		Tensorflow session to handle tensors.
    # ops:		Dictionary for tensors of Network_L
    # ops19: 		Dictionary for tensors of Network19
    # templates:	Training Point Cloud data.
    # poses: 		Training pose data.

    is_training = False
    display_ptClouds = False

    test_file_idxs = np.arange(0, len(TEST_FILES))
    np.random.shuffle(test_file_idxs)

    for fn in range(len(TEST_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(
            TEST_FILES[test_file_idxs[fn]])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_data, _, _ = provider.shuffle_data(current_data,
                                                   np.squeeze(current_label))

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        loss_sum = 0  # Total Loss in each batch.

        for fn in range(num_batches):
            start_idx = fn * BATCH_SIZE  # Start index of poses.
            end_idx = (fn + 1) * BATCH_SIZE  # End index of poses.

            template_data = current_data[start_idx:end_idx, :, :]

            # To visualize the source and point clouds:
            if display_ptClouds:
                helper.display_clouds_data(template_data[0])

            # Feed the placeholders of Network_L with source data and template data obtained from N-Iterations.
            feed_dict = {
                ops['source_pointclouds_pl']: template_data,
                ops['is_training_pl']: is_training
            }

            # Ask the network to predict transformation, calculate loss using distance between actual points.
            summary, step, loss_val = sess.run(
                [ops['merged'], ops['step'], ops['loss']], feed_dict=feed_dict)
            eval_writer.add_summary(
                summary, step)  # Add all the summary to the tensorboard.

            # Display Loss Value.
            print("Batch: {} & Loss: {}\r".format(fn, loss_val)),
            sys.stdout.flush()

            # Add loss for each batch.
            loss_sum += loss_val

        log_string(
            'Eval Mean loss: %f' %
            (loss_sum / num_batches))  # Store and display mean loss of epoch.
Exemplo n.º 28
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)

    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(
            TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        total_correct = 0
        total_seen = 0
        loss_sum = 0

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(
                current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)
            feed_dict = {
                ops['pointclouds_pl']: jittered_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
            }
            summary, step, _, loss_val, pred_val, centroids = sess.run(
                [
                    ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                    ops['pred'], ops['centroids']
                ],
                feed_dict=feed_dict)
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val
            if np.random.rand() <= 0.001:
                h5r = h5py.File(
                    (LOG_DIR + '/demo/centroids' + str(step).zfill(8) + '.h5'),
                    'w')
                h5r.create_dataset('data', data=centroids)
                h5r.close()

        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
Exemplo n.º 29
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)

    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(
            TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        total_correct = 0
        total_seen = 0
        loss_sum = 0

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE
            """Mixup"""
            print("Batch: %d", batch_idx)
            batch_data, batch_label_a, batch_label_b,lam = \
                mixup_data(current_data[start_idx:end_idx, :, :], current_label[start_idx:end_idx], FLAGS.alpha)

            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(batch_data)
            jittered_data = provider.jitter_point_cloud(rotated_data)
            feed_dict = {
                ops['pointclouds_pl']: jittered_data,
                ops['labels_a_pl']: batch_label_a,
                ops['labels_b_pl']: batch_label_b,
                ops['is_training_pl']: is_training,
                ops['lam_pl']: lam
            }
            summary, step, _, loss_val, pred_val = sess.run(
                [
                    ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                    ops['pred']
                ],
                feed_dict=feed_dict)
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct_a = np.sum(pred_val == batch_label_a)
            correct_b = np.sum(pred_val == batch_label_b)
            total_correct += (lam * correct_a + (1 - lam) * correct_b)
            total_seen += BATCH_SIZE
            loss_sum += loss_val

        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
Exemplo n.º 30
0
def eval_one_epoch(sess, ops, test_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
    for fn in range(len(TEST_FILES)):
        log_string('----test file ' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:, :, 0:NUM_EVENTS, :]
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = start_idx + BATCH_SIZE

            batch_data = current_data[start_idx:end_idx, ...]
            batch_label = current_label[start_idx:end_idx, ...]

            feed_dict = {
                ops['eventclouds']: batch_data,
                ops['labels']: batch_label,
                ops['is_training']: is_training
            }
            summary, step, loss_val, pred_val = sess.run(
                [ops['merged'], ops['step'], ops['loss'], ops['pred']],
                feed_dict=feed_dict)

            test_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == batch_label)
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val

            for i in range(BATCH_SIZE):
                l = batch_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += (pred_val[i] == l)
                fout.write('%d, %d\n' % (pred_val[i], l))

    log_string('eval mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))

    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float)
    for i in range(NUM_CLASSES):
        name = GESTURE_NAMES[i]
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
Exemplo n.º 31
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True
    
    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)
    
    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:,0:NUM_POINT,:]
        current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))            
        current_label = np.squeeze(current_label)
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        
        total_correct = 0
        total_seen = 0
        loss_sum = 0
       
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            
            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)
            feed_dict = {ops['pointclouds_pl']: jittered_data,
                         ops['labels_pl']: current_label[start_idx:end_idx],
                         ops['is_training_pl']: is_training,}
            summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
                ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val
        
        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
Exemplo n.º 32
0
def eval_one_epoch(sess, ops, test_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    
    for fn in range(len(TEST_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:,0:NUM_POINT,:]
        current_label = np.squeeze(current_label)
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE

            feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
                         ops['labels_pl']: current_label[start_idx:end_idx],
                         ops['is_training_pl']: is_training}
            summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
                ops['loss'], ops['pred']], feed_dict=feed_dict)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += (loss_val*BATCH_SIZE)
            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += (pred_val[i-start_idx] == l)
            
    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
Exemplo n.º 33
0
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
#BN_DECAY_DECAY_STEP = float(DECAY_STEP * 2)
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99

HOSTNAME = socket.gethostname()

ALL_FILES = provider.getDataFiles('indoor3d_sem_seg_hdf5_data/all_files.txt')
room_filelist = [line.rstrip() for line in open('indoor3d_sem_seg_hdf5_data/room_filelist.txt')]

# Load ALL data
data_batch_list = []
label_batch_list = []
for h5_filename in ALL_FILES:
    data_batch, label_batch = provider.loadDataFile(h5_filename)
    data_batch_list.append(data_batch)
    label_batch_list.append(label_batch)
data_batches = np.concatenate(data_batch_list, 0)
label_batches = np.concatenate(label_batch_list, 0)
print(data_batches.shape)
print(label_batches.shape)

test_area = 'Area_'+str(FLAGS.test_area)
train_idxs = []
test_idxs = []
for i,room_name in enumerate(room_filelist):
    if test_area in room_name:
        test_idxs.append(i)
    else:
        train_idxs.append(i)
Exemplo n.º 34
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
    for fn in range(len(TEST_FILES)):
        log_string('----'+str(fn)+'----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:,0:NUM_POINT,:]
        current_label = np.squeeze(current_label)
        print(current_data.shape)
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        print(file_size)
        
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            cur_batch_size = end_idx - start_idx
            
            # Aggregating BEG
            batch_loss_sum = 0 # sum of losses for the batch
            batch_pred_sum = np.zeros((cur_batch_size, NUM_CLASSES)) # score for classes
            batch_pred_classes = np.zeros((cur_batch_size, NUM_CLASSES)) # 0/1 for classes
            for vote_idx in range(num_votes):
                rotated_data = provider.rotate_point_cloud_by_angle(current_data[start_idx:end_idx, :, :],
                                                  vote_idx/float(num_votes) * np.pi * 2)
                feed_dict = {ops['pointclouds_pl']: rotated_data,
                             ops['labels_pl']: current_label[start_idx:end_idx],
                             ops['is_training_pl']: is_training}
                loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)
                batch_pred_sum += pred_val
                batch_pred_val = np.argmax(pred_val, 1)
                for el_idx in range(cur_batch_size):
                    batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
                batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
            # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
            # pred_val = np.argmax(batch_pred_classes, 1)
            pred_val = np.argmax(batch_pred_sum, 1)
            # Aggregating END
            
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
            total_correct += correct
            total_seen += cur_batch_size
            loss_sum += batch_loss_sum

            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += (pred_val[i-start_idx] == l)
                fout.write('%d, %d\n' % (pred_val[i-start_idx], l))
                
                if pred_val[i-start_idx] != l and FLAGS.visu: # ERROR CASE, DUMP!
                    img_filename = '%d_label_%s_pred_%s.jpg' % (error_cnt, SHAPE_NAMES[l],
                                                           SHAPE_NAMES[pred_val[i-start_idx]])
                    img_filename = os.path.join(DUMP_DIR, img_filename)
                    output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, :, :]))
                    scipy.misc.imsave(img_filename, output_img)
                    error_cnt += 1
                
    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
    
    class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))