Ejemplo n.º 1
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    is_training = False

    # Make sure batch data is of same size
    cur_batch_data = np.zeros((BATCH_SIZE,NUM_POINT,TEST_DATASET.num_channel()))
    cur_batch_label = np.zeros((BATCH_SIZE), dtype=np.int32)

    total_correct = 0
    total_seen = 0
    loss_sum = 0
    batch_idx = 0
    shape_ious = []
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]

    while TEST_DATASET.has_next_batch():
        batch_data, batch_label = TEST_DATASET.next_batch(augment=False)
        bsize = batch_data.shape[0]
        print('Batch: %03d, batch size: %d'%(batch_idx, bsize))
        # for the last batch in the epoch, the bsize:end are from last batch
        cur_batch_data[0:bsize,...] = batch_data
        cur_batch_label[0:bsize] = batch_label

        batch_pred_sum = np.zeros((BATCH_SIZE, NUM_CLASSES)) # score for classes
        for vote_idx in range(num_votes):
            # Shuffle point order to achieve different farthest samplings
            shuffled_indices = np.arange(NUM_POINT)
            np.random.shuffle(shuffled_indices)
            if FLAGS.normal:
                rotated_data = provider.rotate_point_cloud_by_angle_with_normal(cur_batch_data[:, shuffled_indices, :],
                    vote_idx/float(num_votes) * np.pi * 2)
            else:
                rotated_data = provider.rotate_point_cloud_by_angle(cur_batch_data[:, shuffled_indices, :],
                    vote_idx/float(num_votes) * np.pi * 2)
            feed_dict = {ops['pointclouds_pl']: rotated_data,
                         ops['labels_pl']: cur_batch_label,
                         ops['is_training_pl']: is_training}
            loss_val, pred_val = sess.run([ops['loss'], ops['pred']], feed_dict=feed_dict)
            batch_pred_sum += pred_val
        pred_val = np.argmax(batch_pred_sum, 1)
        correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize])
        total_correct += correct
        total_seen += bsize
        loss_sum += loss_val
        batch_idx += 1
        for i in range(bsize):
            l = batch_label[i]
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_val[i] == l)
    
    log_string('eval mean loss: %f' % (loss_sum / float(batch_idx)))
    log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))

    class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
Ejemplo n.º 2
0
def eval_one_epoch(config, sess, ops, topk=1, epoch=0):
    is_training = False

    # Make sure batch data is of same size
    cur_batch_data = np.zeros(
        (config.batch_size, config.num_points, TEST_DATASET.num_channel()))
    cur_batch_label = np.zeros((config.batch_size), dtype=np.int32)

    total_correct = 0
    total_seen = 0
    loss_sum = 0
    batch_idx = 0
    shape_ious = []

    predictions = []
    labels = []

    while TEST_DATASET.has_next_batch():
        batch_data, batch_label = TEST_DATASET.next_batch(augment=False)
        bsize = batch_data.shape[0]
        # for the last batch in the epoch, the bsize:end are from last batch
        cur_batch_data[0:bsize, ...] = batch_data
        cur_batch_label[0:bsize] = batch_label

        batch_pred_sum = np.zeros(
            (config.batch_size, config.num_classes))  # score for classes
        for vote_idx in range(config.num_votes):
            # Shuffle point order to achieve different farthest samplings
            shuffled_indices = np.arange(config.num_points)
            np.random.shuffle(shuffled_indices)
            if config.normal:
                rotated_data = provider.rotate_point_cloud_by_angle_with_normal(
                    cur_batch_data[:, shuffled_indices, :],
                    vote_idx / float(config.num_votes) * np.pi * 2)
            else:
                rotated_data = provider.rotate_point_cloud_by_angle(
                    cur_batch_data[:, shuffled_indices, :],
                    vote_idx / float(config.num_votes) * np.pi * 2)
            feed_dict = {
                ops['pointclouds_pl']: rotated_data,
                ops['labels_pl']: cur_batch_label,
                ops['is_training_pl']: is_training
            }
            loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)
            batch_pred_sum += pred_val
        pred_val = np.argmax(batch_pred_sum, 1)
        correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize])

        predictions += pred_val[0:bsize].tolist()
        labels += batch_label[0:bsize].tolist()

        total_correct += correct
        total_seen += bsize
        loss_sum += loss_val
        batch_idx += 1

    loss = (loss_sum / float(batch_idx))
    acc = (total_correct / float(total_seen))
    log(config.log_file,
        "EVALUATING epoch {} - loss: {} acc: {} ".format(epoch, loss, acc))
    if config.test:
        import Evaluation_tools as et
        eval_file = os.path.join(config.log_dir, '{}.txt'.format(config.name))
        et.write_eval_file(config.data, eval_file, predictions, labels,
                           config.name)
        et.make_matrix(config.data, eval_file, config.log_dir)
    else:
        LOSS_LOGGER.log(loss, epoch, "eval_loss")
        ACC_LOGGER.log(acc, epoch, "eval_accuracy")
        TEST_DATASET.reset()
        return total_correct / float(total_seen)
Ejemplo n.º 3
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, "pred_label_%s.txt" % INFER_SET), "w")
    for fn in range(len(INFER_FILES)):
        log_string("----" + str(fn) + "----")
        current_data, current_label = provider.loadDataFile(INFER_FILES[fn])

        pred_label = np.zeros_like(current_label)

        current_data = current_data[:, 0:NUM_POINT, :]
        current_label = np.squeeze(current_label)
        print(current_data.shape)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        print(file_size)

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE
            cur_batch_size = end_idx - start_idx

            # Aggregating BEG
            batch_loss_sum = 0  # sum of losses for the batch
            batch_pred_sum = np.zeros(
                (cur_batch_size, NUM_CLASSES)
            )  # score for classes
            batch_pred_classes = np.zeros(
                (cur_batch_size, NUM_CLASSES)
            )  # 0/1 for classes
            for vote_idx in range(num_votes):
                rotated_data = provider.rotate_point_cloud_by_angle(
                    current_data[start_idx:end_idx, :, :],
                    vote_idx / float(num_votes) * np.pi * 2,
                )
                feed_dict = {
                    ops["pointclouds_pl"]: rotated_data,
                    ops["labels_pl"]: current_label[start_idx:end_idx],
                    ops["is_training_pl"]: is_training,
                }
                loss_val, pred_val = sess.run(
                    [ops["loss"], ops["pred"]], feed_dict=feed_dict
                )
                batch_pred_sum += pred_val
                batch_pred_val = np.argmax(pred_val, 1)
                for el_idx in range(cur_batch_size):
                    batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
                batch_loss_sum += loss_val * cur_batch_size / float(num_votes)
            # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
            # pred_val = np.argmax(batch_pred_classes, 1)
            pred_val = np.argmax(batch_pred_sum, 1)
            # Aggregating END

            pred_label[start_idx:end_idx] = np.expand_dims(pred_val, axis=1)

            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
            total_correct += correct
            total_seen += cur_batch_size
            loss_sum += batch_loss_sum

            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += pred_val[i - start_idx] == l
                fout.write("%d, %d\n" % (pred_val[i - start_idx], l))

        file_name = os.path.split(INFER_FILES[fn])
        data_prep_util.save_h5_label(
            os.path.join(PRED_LABEL_DIR, file_name[1]), pred_label, label_dtype
        )

    log_string("eval mean loss: %f" % (loss_sum / float(total_seen)))
    log_string("eval accuracy: %f" % (total_correct / float(total_seen)))
    log_string(
        "eval avg class acc: %f"
        % (
            np.mean(
                np.array(total_correct_class)
                / np.array(total_seen_class, dtype=np.float)
            )
        )
    )

    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float
    )
    for i, name in enumerate(SHAPE_NAMES):
        log_string("%10s:\t%0.3f" % (name, class_accuracies[i]))
Ejemplo n.º 4
0
    def eval_one_time(self, topk=1, data=0):
        is_training = False

        # Make sure batch data is of same size
        #cur_batch_data = np.zeros((BATCH_SIZE,NUM_POINT,TEST_DATASET.num_channel()))
        #cur_batch_label = np.zeros((BATCH_SIZE), dtype=np.int32)
        cur_batch_data = np.zeros((1, data.shape[0], data.shape[1]))
        cur_batch_label = np.zeros((1), dtype=np.int32)

        total_correct = 0
        total_seen = 0
        loss_sum = 0
        batch_idx = 0
        shape_ious = []
        total_seen_class = [0 for _ in range(self.NUM_CLASSES)]
        total_correct_class = [0 for _ in range(self.NUM_CLASSES)]

        batch_data, batch_label = data, 4  #TEST_DATASET.next_batch(augment=False)
        bsize = batch_data.shape[0]

        #print('Batch: %03d, batch size: %d'%(batch_idx, bsize))
        # for the last batch in the epoch, the bsize:end are from last batch
        cur_batch_data[0:bsize, ...] = batch_data
        cur_batch_label[0] = batch_label

        batch_pred_sum = np.zeros(
            (self.BATCH_SIZE, self.NUM_CLASSES))  # score for classes

        for vote_idx in range(self.NUM_VOTES):
            # Shuffle point order to achieve different farthest samplings
            shuffled_indices = np.arange(self.NUM_POINT)
            np.random.shuffle(shuffled_indices)
            if self.NORMAL:
                rotated_data = provider.rotate_point_cloud_by_angle_with_normal(
                    cur_batch_data[:, shuffled_indices, :],
                    vote_idx / float(self.NUM_VOTES) * np.pi * 2)
            else:
                rotated_data = provider.rotate_point_cloud_by_angle(
                    cur_batch_data[:, shuffled_indices, :],
                    vote_idx / float(self.NUM_VOTES) * np.pi * 2)
            feed_dict = {
                self.ops['pointclouds_pl']: rotated_data,
                self.ops['labels_pl']: cur_batch_label,
                self.ops['is_training_pl']: is_training
            }
            loss_val, pred_val = self.sess.run(
                [self.ops['loss'], self.ops['pred']], feed_dict=feed_dict)
            batch_pred_sum += pred_val
        min_pred_val = np.argmin(batch_pred_sum, 1)
        pred_val = np.argmax(batch_pred_sum, 1)
        """
        batch_pred_sum[0][pred_val[0]] = -100000000000000000000
        second_pred_val = np.argmax(batch_pred_sum, 1)
        batch_pred_sum[0][second_pred_val[0]] = -100000000000000000000
        third_pred_val = np.argmax(batch_pred_sum, 1)
        """
        #print heapq.nlargest(3, range(len(batch_pred_sum)), batch_pred_sum.take)
        print(Fore.RED + self.SHAPE_NAMES[pred_val[0]])
        print(Style.RESET_ALL)

        #print self.print_letters(self.SHAPE_NAMES[second_pred_val[0]])# self.SHAPE_NAMES[third_pred_val[0]]#, self.SHAPE_NAMES[min_pred_val[0]]
        """
Ejemplo n.º 5
0
def eval_one_epoch(sess, ops, test_writer, testp_writer):
    global EPOCH_CNT
    is_training = False
    num_votes = 4

    cur_batch_data = np.zeros((BATCH_SIZE, NUM_POINT, TEST_DATASET.num_channel()))
    cur_batch_label = np.zeros((BATCH_SIZE), dtype=np.int32)
    cur_batch_dir = np.zeros((BATCH_SIZE, NUM_POINT, 6))

    total_correct = 0
    total_seen = 0
    loss_sum = 0
    batch_idx = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]

    log_string(str(datetime.now()))
    log_string('---- EPOCH %03d EVALUATION ----' % (EPOCH_CNT))

    while TEST_DATASET.has_next_batch():
        batch_data, batch_label, batch_dir = TEST_DATASET.next_batch(augment=False)
        bsize = batch_data.shape[0]

        cur_batch_data[0:bsize, ...] = batch_data
        cur_batch_label[0:bsize] = batch_label
        cur_batch_dir[0:bsize, ...] = batch_dir

        batch_pred_sum = np.zeros((BATCH_SIZE, NUM_CLASSES))
        for vote_idx in range(num_votes):
            shuffled_indices = np.arange(NUM_POINT)
            np.random.shuffle(shuffled_indices)
            if FLAGS.normal:
                rotated_data, rotated_dir = provider.rotate_point_cloud_by_angle_with_normal(cur_batch_data[:, shuffled_indices, :],
                                                                                             cur_batch_dir[:, shuffled_indices, :],
                                                                                             vote_idx / float(num_votes) * np.pi * 2)
            else:
                rotated_data, rotated_dir = provider.rotate_point_cloud_by_angle(cur_batch_data[:, shuffled_indices, :],
                                                                                 cur_batch_dir[:, shuffled_indices, :],
                                                                                 vote_idx / float(num_votes) * np.pi * 2)
            feed_dict = {ops['pointclouds_pl']: rotated_data,
                         ops['dir_pl']: rotated_dir,
                         ops['labels_pl']: cur_batch_label,
                         ops['is_training_pl']: is_training}
            summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
                                                          ops['loss'], ops['pred']], feed_dict=feed_dict)
            batch_pred_sum += pred_val
        test_writer.add_summary(summary, step)
        pred_val = np.argmax(batch_pred_sum, 1)
        correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize])
        total_correct += correct
        total_seen += bsize
        loss_sum += loss_val
        batch_idx += 1
        for i in range(0, bsize):
            l = batch_label[i]
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_val[i] == l)

    dev_summary = tf.Summary()
    dev_summary.value.add(tag="loss", simple_value=loss_sum / float(batch_idx))
    dev_summary.value.add(tag="accuracy", simple_value=total_correct / float(total_seen))
    dev_summary.value.add(tag="avg class acc", simple_value=np.mean(
        np.array(total_correct_class) / np.array(total_seen_class, dtype=np.float)))
    testp_writer.add_summary(dev_summary, step)

    log_string('eval mean loss: %f' % (loss_sum / float(batch_idx)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (
    np.mean(np.array(total_correct_class) / np.array(total_seen_class, dtype=np.float))))
    EPOCH_CNT += 1

    TEST_DATASET.reset()
    return total_correct / float(total_seen)
Ejemplo n.º 6
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')

    if (".h5" in TEST_FILE):
        current_data, current_label = data_utils.get_current_data_h5(
            TEST_DATA, TEST_LABELS, NUM_POINT)
    else:
        current_data, current_label = data_utils.get_current_data(
            TEST_DATA, TEST_LABELS, NUM_POINT)

    current_label = np.squeeze(current_label)

    num_batches = current_data.shape[0] // BATCH_SIZE

    current_pred = []

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        cur_batch_size = end_idx - start_idx

        # Aggregating BEG
        batch_loss_sum = 0  # sum of losses for the batch
        batch_pred_sum = np.zeros(
            (cur_batch_size, NUM_CLASSES))  # score for classes
        batch_pred_classes = np.zeros(
            (cur_batch_size, NUM_CLASSES))  # 0/1 for classes
        for vote_idx in range(num_votes):
            rotated_data = provider.rotate_point_cloud_by_angle(
                current_data[start_idx:end_idx, :, :],
                vote_idx / float(num_votes) * np.pi * 2)

            xforms_np, rotations_np = pf.get_xforms(
                BATCH_SIZE,
                rotation_range=rotation_range_val,
                scaling_range=scaling_range_val,
                order=setting.rotation_order)

            # Augment batched point clouds by rotation and jittering
            feed_dict = {
                ops['pointclouds_pl']: rotated_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
                ops['xforms']: xforms_np,
                ops['rotations']: rotations_np,
                ops['jitter_range']: np.array([jitter_val])
            }

            loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)

            pred_val = np.sum(pred_val, axis=1)
            # pred_val = np.argmax(pred_val, 1)

            batch_pred_sum += pred_val
            batch_pred_val = np.argmax(pred_val, 1)
            for el_idx in range(cur_batch_size):
                batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
            batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
        # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
        # pred_val = np.argmax(batch_pred_classes, 1)
        pred_val = np.argmax(batch_pred_sum, 1)
        # Aggregating END

        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
        total_correct += correct
        total_seen += cur_batch_size
        loss_sum += batch_loss_sum

        for i in range(start_idx, end_idx):
            l = current_label[i]
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_val[i - start_idx] == l)
            fout.write('%s, %s\n' %
                       (SHAPE_NAMES[pred_val[i - start_idx]], SHAPE_NAMES[l]))

            current_pred.append(pred_val[i - start_idx])

    log_string('total seen: %d' % (total_seen))
    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))

    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))

    #Plot confusion matrix
    current_pred = np.array(current_pred)
    groundtruth = current_label.flatten()
    predictions = current_pred.flatten()

    mat = confusion_matrix(groundtruth, predictions)

    plt.style.use('seaborn-paper')
    plt.rcParams["figure.figsize"] = (10, 10)
    ax = plt.subplot(111)
    cmap = plt.cm.Reds
    mat = mat.astype('float') / mat.sum(axis=1)[:, np.newaxis]
    mat = np.nan_to_num(mat, copy=True)

    plt.imshow(mat, interpolation='nearest', cmap=cmap)
    # cbar = plt.colorbar(fraction=0.03, pad=0.05, aspect=30)
    # cbar.ax.tick_params(labelsize=10)
    tick_marks = np.arange(len(SHAPE_NAMES))
    plt.xticks(tick_marks, SHAPE_NAMES, rotation=90)
    plt.yticks(tick_marks, SHAPE_NAMES)

    plt.ylabel('Ground truth')
    plt.xlabel('Prediction')

    for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
                 ax.get_xticklabels() + ax.get_yticklabels()):
        item.set_fontsize(36)

    plt.tight_layout()
    plt.savefig(os.path.join(DUMP_DIR, 'matrix.pdf'))
    plt.show()
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
    for fn in range(len(TEST_FILES)):
        log_string('----' + str(fn) + '----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_label = np.squeeze(current_label)
        print(current_data.shape)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        print(file_size)

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE
            cur_batch_size = end_idx - start_idx

            # Aggregating BEG
            batch_loss_sum = 0  # sum of losses for the batch
            batch_pred_sum = np.zeros(
                (cur_batch_size, NUM_CLASSES))  # score for classes
            batch_pred_classes = np.zeros(
                (cur_batch_size, NUM_CLASSES))  # 0/1 for classes
            for vote_idx in range(num_votes):
                rotated_data = provider.rotate_point_cloud_by_angle(
                    current_data[start_idx:end_idx, :, :],
                    vote_idx / float(num_votes) * np.pi * 2)
                feed_dict = {
                    ops['pointclouds_pl']: rotated_data,
                    ops['labels_pl']: current_label[start_idx:end_idx],
                    ops['is_training_pl']: is_training
                }
                loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                              feed_dict=feed_dict)
                batch_pred_sum += pred_val
                batch_pred_val = np.argmax(pred_val, 1)
                for el_idx in range(cur_batch_size):
                    batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
                batch_loss_sum += (loss_val * cur_batch_size /
                                   float(num_votes))
            # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
            # pred_val = np.argmax(batch_pred_classes, 1)
            pred_val = np.argmax(batch_pred_sum, 1)
            # Aggregating END

            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
            total_correct += correct
            total_seen += cur_batch_size
            loss_sum += batch_loss_sum

            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += (pred_val[i - start_idx] == l)
                fout.write('%d, %d\n' % (pred_val[i - start_idx], l))

    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))

    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
Ejemplo n.º 8
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    is_training = False

    # Make sure batch data is of same size
    cur_batch_data = np.zeros(
        (BATCH_SIZE, NUM_POINT, TEST_DATASET.num_channel()))
    cur_batch_label = np.zeros((BATCH_SIZE), dtype=np.int32)

    total_correct = 0
    total_seen = 0
    loss_sum = 0
    batch_idx = 0
    shape_ious = []
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]

    while TEST_DATASET.has_next_batch():
        batch_data, batch_label = TEST_DATASET.next_batch(augment=False)
        bsize = batch_data.shape[0]
        print('Batch: %03d, batch size: %d' % (batch_idx, bsize))
        # for the last batch in the epoch, the bsize:end are from last batch
        cur_batch_data[0:bsize, ...] = batch_data
        cur_batch_label[0:bsize] = batch_label

        batch_pred_sum = np.zeros(
            (BATCH_SIZE, NUM_CLASSES))  # score for classes
        for vote_idx in range(num_votes):
            # Shuffle point order to achieve different farthest samplings
            shuffled_indices = np.arange(NUM_POINT)
            np.random.shuffle(shuffled_indices)
            if FLAGS.normal:
                rotated_data = provider.rotate_point_cloud_by_angle_with_normal(
                    cur_batch_data[:, shuffled_indices, :],
                    vote_idx / float(num_votes) * np.pi * 2)
            else:
                rotated_data = provider.rotate_point_cloud_by_angle(
                    cur_batch_data[:, shuffled_indices, :],
                    vote_idx / float(num_votes) * np.pi * 2)
            feed_dict = {
                ops['pointclouds_pl']: rotated_data,
                ops['labels_pl']: cur_batch_label,
                ops['is_training_pl']: is_training
            }
            loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)
            batch_pred_sum += pred_val
        pred_val = np.argmax(batch_pred_sum, 1)
        correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize])
        total_correct += correct
        total_seen += bsize
        loss_sum += loss_val
        batch_idx += 1
        for i in range(bsize):
            l = batch_label[i]
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_val[i] == l)

    log_string('eval mean loss: %f' % (loss_sum / float(batch_idx)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))

    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
Ejemplo n.º 9
0
def eval_one_epoch(sess, ops, test_writer):
    """ ops: dict mapping from string to tf ops """
    global EPOCH_CNT
    global BEST_ACC
    global BEST_CLS_ACC

    is_training = False

    # Make sure batch data is of same size
    cur_batch_data = np.zeros(
        (BATCH_SIZE, NUM_POINT, TEST_DATASET.num_channel()))
    cur_batch_label = np.zeros((BATCH_SIZE), dtype=np.int32)

    total_correct = 0
    total_seen = 0
    loss_sum = 0
    batch_idx = 0
    shape_ious = []
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]

    log_string(str(datetime.now()))
    log_string('---- EPOCH %03d EVALUATION ----' % (EPOCH_CNT))

    while TEST_DATASET.has_next_batch():
        batch_data, batch_label = TEST_DATASET.next_batch(augment=False)
        bsize = batch_data.shape[0]
        # print('Batch: %03d, batch size: %d'%(batch_idx, bsize))
        # for the last batch in the epoch, the bsize:end are from last batch
        cur_batch_data[0:bsize, ...] = batch_data
        cur_batch_label[0:bsize] = batch_label

        if ROTATE_FLAG:
            batch_pred_sum = np.zeros(
                (BATCH_SIZE, NUM_CLASSES))  # score for classes
            for vote_idx in range(12):
                # Shuffle point order to achieve different farthest samplings
                shuffled_indices = np.arange(NUM_POINT)
                np.random.shuffle(shuffled_indices)
                if NORMAL_FLAG:
                    rotated_data = provider.rotate_point_cloud_by_angle_with_normal(
                        cur_batch_data[:, shuffled_indices, :],
                        vote_idx / float(12) * np.pi * 2)
                    rotated_data = provider.rotate_perturbation_point_cloud_with_normal(
                        rotated_data)
                else:
                    rotated_data = provider.rotate_point_cloud_by_angle(
                        cur_batch_data[:, shuffled_indices, :],
                        vote_idx / float(12) * np.pi * 2)
                    rotated_data = provider.rotate_perturbation_point_cloud(
                        rotated_data)

                jittered_data = provider.random_scale_point_cloud(
                    rotated_data[:, :, 0:3])

                jittered_data = provider.jitter_point_cloud(jittered_data)
                rotated_data[:, :, 0:3] = jittered_data
                # else:
                # rotated_data = provider.rotate_point_cloud_by_angle(cur_batch_data[:, shuffled_indices, :],
                # vote_idx/float(12) * np.pi * 2)
                feed_dict = {
                    ops['pointclouds_pl']: rotated_data,
                    ops['labels_pl']: cur_batch_label,
                    ops['is_training_pl']: is_training
                }
                loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                              feed_dict=feed_dict)
                batch_pred_sum += pred_val
            pred_val = np.argmax(batch_pred_sum, 1)

        else:
            feed_dict = {
                ops['pointclouds_pl']: cur_batch_data,
                ops['labels_pl']: cur_batch_label,
                ops['is_training_pl']: is_training
            }
            summary, step, loss_val, pred_val = sess.run(
                [ops['merged'], ops['step'], ops['loss'], ops['pred']],
                feed_dict=feed_dict)
            test_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)

        correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize])
        total_correct += correct
        total_seen += bsize
        loss_sum += loss_val
        batch_idx += 1
        for i in range(bsize):
            l = batch_label[i]
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_val[i] == l)

    current_acc = total_correct / float(total_seen)
    current_cls_acc = np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))

    log_string('eval mean loss: %f' % (loss_sum / float(batch_idx)))
    log_string('eval accuracy: %f' % (current_acc))
    log_string('eval avg class acc: %f' % (current_cls_acc))

    best_acc_flag, best_cls_acc_flag = False, False
    if current_acc > BEST_ACC:
        BEST_ACC = current_acc
        best_acc_flag = True
    if current_cls_acc > BEST_CLS_ACC:
        BEST_CLS_ACC = current_cls_acc
        best_cls_acc_flag = True

    log_string('eval best accuracy: %f' % (BEST_ACC))
    log_string('eval best avg class acc: %f' % (BEST_CLS_ACC))

    EPOCH_CNT += 1

    TEST_DATASET.reset()
    return (best_acc_flag, best_cls_acc_flag)
Ejemplo n.º 10
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1, adv=True):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]

    if adv == True:

        error_cnt_adv = 0
        total_correct_adv = 0
        total_seen_adv = 0
        loss_sum_adv = 0
        total_seen_class_adv = [0 for _ in range(NUM_CLASSES)]
        total_correct_class_adv = [0 for _ in range(NUM_CLASSES)]

    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
    for fn in range(len(TEST_FILES)):
        log_string('----' + str(fn) + '----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_label = np.squeeze(current_label)
        print(current_data.shape)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        print(file_size)

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE
            cur_batch_size = end_idx - start_idx

            # Aggregating BEG
            batch_loss_sum = 0  # sum of losses for the batch
            batch_pred_sum = np.zeros(
                (cur_batch_size, NUM_CLASSES))  # score for classes
            batch_pred_classes = np.zeros(
                (cur_batch_size, NUM_CLASSES))  # 0/1 for classes

            for vote_idx in range(num_votes):
                rotated_data = provider.rotate_point_cloud_by_angle(
                    current_data[start_idx:end_idx, :, :],
                    vote_idx / float(num_votes) * np.pi * 2)
                feed_dict = {
                    ops['pointclouds_pl']: rotated_data,
                    ops['labels_pl']: current_label[start_idx:end_idx],
                    ops['is_training_pl']: is_training
                }
                loss_val, pred_val, end_points = sess.run(
                    [ops['loss'], ops['pred'], ops['end_points']],
                    feed_dict=feed_dict)
                batch_pred_sum += pred_val
                batch_pred_val = np.argmax(pred_val, 1)
                for el_idx in range(cur_batch_size):
                    batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
                batch_loss_sum += (loss_val * cur_batch_size /
                                   float(num_votes))
            # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
            # pred_val = np.argmax(batch_pred_classes, 1)
            pred_val = np.argmax(batch_pred_sum, 1)
            # Aggregating END

            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
            total_correct += correct
            total_seen += cur_batch_size
            loss_sum += batch_loss_sum

            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += (pred_val[i - start_idx] == l)
                fout.write('%d, %d\n' % (pred_val[i - start_idx], l))

                if pred_val[
                        i -
                        start_idx] != l and FLAGS.visu:  # ERROR CASE, DUMP!
                    img_filename = '%d_label_%s_pred_%s.jpg' % (
                        error_cnt, SHAPE_NAMES[l],
                        SHAPE_NAMES[pred_val[i - start_idx]])
                    img_filename = os.path.join(DUMP_DIR, img_filename)
                    output_img = pc_util.point_cloud_three_views(
                        np.squeeze(current_data[i, :, :]))
                    scipy.misc.imsave(img_filename, output_img)
                    error_cnt += 1

            if adv == True:
                adv_data = perturb(rotated_data,
                                   current_label[start_idx:end_idx], sess, ops,
                                   EPS, ADV_STEP, EPS / 10)
                np.save(str(batch_idx), adv_data)

                feed_dict_adv = {
                    ops['pointclouds_pl']: adv_data,
                    ops['labels_pl']: current_label[start_idx:end_idx],
                    ops['is_training_pl']: is_training
                }

                loss_val_adv, pred_val_adv, end_points_adv = sess.run(
                    [ops['loss'], ops['pred'], ops['end_points']],
                    feed_dict=feed_dict_adv)

                delta = end_points_adv['global'] - end_points['global']
                bigger = delta[delta > 0.1]
                smaller = delta[delta < -0.1]
                print(bigger.size)
                print(smaller.size)

                pred_val_adv = np.argmax(pred_val_adv, 1)
                correct_adv = np.sum(
                    pred_val_adv == current_label[start_idx:end_idx])
                total_correct_adv += correct_adv
                total_seen_adv += BATCH_SIZE
                loss_sum_adv += (loss_val_adv * BATCH_SIZE)

                for i in range(start_idx, end_idx):
                    l = current_label[i]
                    total_seen_class_adv[l] += 1
                    total_correct_class_adv[l] += (
                        pred_val_adv[i - start_idx] == l)
                    #fout.write('%d, %d\n' % (pred_val_adv[i-start_idx], l))

                    if pred_val_adv[
                            i -
                            start_idx] != l and FLAGS.visu_adv:  # ERROR CASE, DUMP!
                        img_filename = '%d_label_%s_pred_%s_adv' % (
                            error_cnt_adv, SHAPE_NAMES[l],
                            SHAPE_NAMES[pred_val_adv[i - start_idx]])
                        img_filename = os.path.join(DUMP_DIR, img_filename)
                        np.save(
                            img_filename,
                            np.squeeze(adv_data[i -
                                                batch_idx * BATCH_SIZE, :, :]))
                        np.save(img_filename + '_normal',
                                np.squeeze(current_data[i, :, :]))
                        output_img = pc_util.point_cloud_three_views(
                            np.squeeze(adv_data[i -
                                                batch_idx * BATCH_SIZE, :, :]))
                        scipy.misc.imsave(img_filename + '.jpg', output_img)
                        error_cnt_adv += 1

    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))

    if adv == True:
        log_string('adv eval mean loss: %f' %
                   (loss_sum_adv / float(total_seen_adv)))
        log_string('adv eval accuracy: %f' %
                   (total_correct_adv / float(total_seen_adv)))
        log_string('adv eval avg class acc: %f' % (np.mean(
            np.array(total_correct_class_adv) /
            np.array(total_seen_class_adv, dtype=np.float))))

    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
Ejemplo n.º 11
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
    for fn in range(len(TEST_FILES)):
        log_string('----'+str(fn)+'----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:,0:NUM_POINT,:]
        current_label = np.squeeze(current_label)
        print(current_data.shape)
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        print(file_size)
        
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            cur_batch_size = end_idx - start_idx
            
            # Aggregating BEG
            batch_loss_sum = 0 # sum of losses for the batch
            batch_pred_sum = np.zeros((cur_batch_size, NUM_CLASSES)) # score for classes
            batch_pred_classes = np.zeros((cur_batch_size, NUM_CLASSES)) # 0/1 for classes
            for vote_idx in range(num_votes):
                rotated_data = provider.rotate_point_cloud_by_angle(current_data[start_idx:end_idx, :, :],
                                                  vote_idx/float(num_votes) * np.pi * 2)
                feed_dict = {ops['pointclouds_pl']: rotated_data,
                             ops['labels_pl']: current_label[start_idx:end_idx],
                             ops['is_training_pl']: is_training}
                loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)
                batch_pred_sum += pred_val
                batch_pred_val = np.argmax(pred_val, 1)
                for el_idx in range(cur_batch_size):
                    batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
                batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
            # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
            # pred_val = np.argmax(batch_pred_classes, 1)
            pred_val = np.argmax(batch_pred_sum, 1)
            # Aggregating END
            
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
            total_correct += correct
            total_seen += cur_batch_size
            loss_sum += batch_loss_sum

            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += (pred_val[i-start_idx] == l)
                fout.write('%d, %d\n' % (pred_val[i-start_idx], l))
                
                if pred_val[i-start_idx] != l and FLAGS.visu: # ERROR CASE, DUMP!
                    img_filename = '%d_label_%s_pred_%s.jpg' % (error_cnt, SHAPE_NAMES[l],
                                                           SHAPE_NAMES[pred_val[i-start_idx]])
                    img_filename = os.path.join(DUMP_DIR, img_filename)
                    output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, :, :]))
                    scipy.misc.imsave(img_filename, output_img)
                    error_cnt += 1
                
    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
    
    class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
Ejemplo n.º 12
0
def prediction(num_votes):
    is_training = False
    with tf.device('/gpu:' + str(GPU_INDEX)):
        # pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
        is_training_pl = tf.placeholder(tf.bool, shape=())
        # def placeholder_inputs(batch_size, num_point):
        pointclouds_pl = tf.placeholder(tf.float32,
                                        shape=(BATCH_SIZE, NUM_POINT, 3))
        labels_pl = tf.placeholder(tf.int32, shape=(BATCH_SIZE))

        # simple model
        # 尝试多返还一个变量给get model,这样就可以观察到此tensor 的值
        pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl)

        MODEL.get_loss(pred, labels_pl, end_points)
        losses = tf.get_collection('losses')

        # add the current loss to the total loss
        total_loss = tf.add_n(losses, name='total_loss')

        # Add ops to save and restore all the variables.
        saver = tf.train.Saver()

    # Create a session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    config.log_device_placement = False
    sess = tf.Session(config=config)

    # Restore variables from disk.
    saver.restore(sess, MODEL_PATH)
    log_string("Model restored.")
    print('type pred: ', type(pred), 'type l3_xyz : ', type(l3_xyz))
    # 输出结果显示这两个类型都是tensor
    ops = {
        'pointclouds_pl': pointclouds_pl,
        'labels_pl': labels_pl,
        'is_training_pl': is_training_pl,
        'pred': pred,
        'loss': total_loss,
        'l3_feature': l3_xyz
    }  # 最后一行是自己加的
    # eval_one_epoch(sess, ops, num_votes)

    # Make sure batch data is of same size
    cur_batch_data = np.zeros(
        (BATCH_SIZE, NUM_POINT, TEST_DATASET.num_channel()))
    cur_batch_label = np.zeros((BATCH_SIZE), dtype=np.int32)

    total_correct = 0
    total_seen = 0
    loss_sum = 0
    batch_idx = 0

    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    '''
    while TEST_DATASET.has_next_batch():
        batch_data, batch_label = TEST_DATASET.next_batch(augment=False)
        bsize = batch_data.shape[0]
        print('Batch: %03d, batch size: %d'%(batch_idx, bsize))
        # for the last batch in the epoch, the bsize:end are from last batch
        cur_batch_data[0:bsize,...] = batch_data
        cur_batch_label[0:bsize] = batch_label

        batch_pred_sum = np.zeros((BATCH_SIZE, NUM_CLASSES)) # score for classes
        for vote_idx in range(num_votes):
            # Shuffle point order to achieve different farthest samplings
            shuffled_indices = np.arange(NUM_POINT)
            np.random.shuffle(shuffled_indices)
            if FLAGS.normal:
                rotated_data = provider.rotate_point_cloud_by_angle_with_normal(cur_batch_data[:, shuffled_indices, :],
                    vote_idx/float(num_votes) * np.pi * 2)
            else:
                rotated_data = provider.rotate_point_cloud_by_angle(cur_batch_data[:, shuffled_indices, :],
                    vote_idx/float(num_votes) * np.pi * 2)
            feed_dict = {ops['pointclouds_pl']: rotated_data,
                         ops['labels_pl']: cur_batch_label,
                         ops['is_training_pl']: is_training}
            
            loss_val, pred_val = sess.run([ops['loss'], ops['pred']], feed_dict=feed_dict)
            
            #试试看能不能计算这个tensor-----我草成了,能跑能跑!!
            # 下面两种形式都是可以跑的
            #loss_val, pred_val,check_l3 = sess.run([ops['loss'], ops['pred'],ops['l3_feature']], feed_dict=feed_dict)
            check_l3 = sess.run(l3_xyz,feed_dict = feed_dict)

            print('check_l3 shape= ',check_l3.shape)
            print('prediction = ', pred_val.shape)
            batch_pred_sum += pred_val
            
        pred_val = np.argmax(batch_pred_sum, 1)
        correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize])
        total_correct += correct
        total_seen += bsize
        loss_sum += loss_val
        batch_idx += 1
        # print('pred_val = ',pred_val.shape)
        # 一次会测试一个batch size 批次的mesh并给出预测结果.
        for i in range(bsize):
            l = batch_label[i]
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_val[i] == l)
    
    log_string('eval mean loss: %f' % (loss_sum / float(batch_idx)))
    log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
     
    
    class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
    '''
    # my test
    # Shuffle point order to achieve different farthest samplings
    #
    # 该位置写上自己定义的读取想要几个飞机图像的函数
    cur_batch_data, cur_batch_label = chao.get_data()
    #

    shuffled_indices = np.arange(NUM_POINT)
    np.random.shuffle(shuffled_indices)
    rotated_data = provider.rotate_point_cloud_by_angle(
        cur_batch_data[:, shuffled_indices, :], 1 / float(1) * np.pi * 2)
    feed_dict = {
        ops['pointclouds_pl']: rotated_data,
        ops['labels_pl']: cur_batch_label,
        ops['is_training_pl']: is_training
    }
    check_l3 = sess.run(l3_xyz, feed_dict=feed_dict)
    loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                  feed_dict=feed_dict)
    print('check_l3 shape= ', check_l3.shape)
    pred_val = np.argmax(pred_val, 1)

    my_correct = 0
    for i in range(4):
        l = cur_batch_label[i]
        total_seen_class[l] += 1
        my_correct += (pred_val[i] == l)
    accuracies = my_correct / total_seen_class[0]

    print("the classification accuracy is:", accuracies)
    print("total_seen_class = ", total_seen_class[0])
    print('total_correct_class', my_correct.shape)

    chao.draw_pointcloud(cur_batch_data[0, ...])
    chao.draw_pointcloud(cur_batch_data[1, ...])
    chao.draw_pointcloud(cur_batch_data[2, ...])
    chao.draw_pointcloud(cur_batch_data[3, ...])

    # check the feature map difference
    # between bed and plane
    diff1 = sum(sum((check_l3[0, ...] - check_l3[3, ...])**2))
    print('diff1 = ', diff1)
    # between plane
    diff2 = sum(sum((check_l3[0, ...] - check_l3[1, ...])**2))
    print('diff2 = ', diff2)
Ejemplo n.º 13
0
def eval_one_epoch(sess, ops, num_votes=12, topk=1):
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0

    current_data_1 = np.empty([3 * len(TEST_FILES), NUM_POINT, 3], dtype=float)
    current_data_2 = np.empty([3 * len(TEST_FILES), NUM_POINT, 3], dtype=float)
    current_label = np.empty([3 * len(TEST_FILES), 1], dtype=int)

    fn = 0
    count = 0
    while fn < len(TEST_FILES) - 1:

        total_current = []
        a1, a2, _ = provider.loadDataFile_cut_2(TEST_FILES[fn])

        idx = np.random.randint(a1.shape[0], size=NUM_POINT)
        a1 = a1[idx, :]
        idx = np.random.randint(a2.shape[0], size=NUM_POINT)
        a2 = a2[idx, :]
        total_current.append(a1)
        total_current.append(a2)

        fn = fn + 1

        b1, b2, _ = provider.loadDataFile_cut_2(TEST_FILES[fn])

        idx = np.random.randint(b1.shape[0], size=NUM_POINT)
        b1 = b1[idx, :]
        idx = np.random.randint(b2.shape[0], size=NUM_POINT)
        b2 = b2[idx, :]
        total_current.append(b1)
        total_current.append(b2)

        fn = fn + 1

        pair_num = 0
        for index in range(len(total_current)):
            for index2 in range(index + 1, len(total_current)):
                current_data_1[6 * count +
                               pair_num, :, :] = total_current[index]
                current_data_2[6 * count +
                               pair_num, :, :] = total_current[index2]
                if (index < 2) and (index2 >= 2):
                    current_label[6 * count + pair_num, :] = 0
                else:
                    current_label[6 * count + pair_num, :] = 1

                pair_num = pair_num + 1
        count = count + 1

    current_label = np.squeeze(current_label)

    file_size = current_data_1.shape[0]
    num_batches = file_size // BATCH_SIZE
    log_string('file_size: %d' % (file_size))
    log_string('num_batches: %d' % (num_batches))

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        cur_batch_size = end_idx - start_idx
        log_string('batch: %d' % (batch_idx))
        # Aggregating BEG
        batch_loss_sum = 0  # sum of losses for the batch
        batch_pred_sum = np.zeros(
            (cur_batch_size, NUM_CLASSES))  # score for classes
        # batch_pred_classes = np.zeros((cur_batch_size, NUM_CLASSES)) # 0/1 for classes
        for vote_idx in range(num_votes):
            rotated_data_1 = provider.rotate_point_cloud_by_angle(
                current_data_1[start_idx:end_idx, :, :],
                vote_idx / float(num_votes) * np.pi * 2)
            rotated_data_2 = provider.rotate_point_cloud_by_angle(
                current_data_2[start_idx:end_idx, :, :],
                vote_idx / float(num_votes) * np.pi * 2)
            feed_dict = {
                ops['pointclouds_pl_1']: rotated_data_1,
                ops['pointclouds_pl_2']: rotated_data_2,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training
            }

            loss_val, pred_val, _ = sess.run(
                [ops['loss'], ops['pred'], ops['feature']],
                feed_dict=feed_dict)
            batch_pred_sum += pred_val
            # batch_pred_val = np.argmax(pred_val, 1)
            # for el_idx in range(cur_batch_size):
            #     batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
            batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
        # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
        # pred_val = np.argmax(batch_pred_classes, 1)
        pred_val = np.argmax(batch_pred_sum, 1)
        # Aggregating END

        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
        total_correct += correct
        total_seen += cur_batch_size
        loss_sum += batch_loss_sum

    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
Ejemplo n.º 14
0
	bsize = batch_data.shape[0]
	print('Batch: %03d, batch size: %d'%(batch_idx, bsize))
	# for the last batch in the epoch, the bsize:end are from last batch
	cur_batch_data[0:bsize,...] = batch_data
	cur_batch_label[0:bsize] = batch_label

	batch_pred_sum = np.zeros((BATCH_SIZE, NUM_CLASSES)) # score for classes
	for vote_idx in range(num_votes):
		# Shuffle point order to achieve different farthest samplings
		shuffled_indices = np.arange(NUM_POINT)
		np.random.shuffle(shuffled_indices)
		if FLAGS.normal:
			rotated_data = provider.rotate_point_cloud_by_angle_with_normal(cur_batch_data[:, shuffled_indices, :],
				vote_idx/float(num_votes) * np.pi * 2)
		else:
			rotated_data = provider.rotate_point_cloud_by_angle(cur_batch_data[:, shuffled_indices, :],
				vote_idx/float(num_votes) * np.pi * 2)
		is_training = False
		#feed_dict = {input: rotated_data,input_1: cur_batch_label,input_2: is_training}
		
		pred_val = sess.run(output, feed_dict={input:cur_batch_data[:, shuffled_indices, :],input_2:is_training})
		print(pred_val)

class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)
for i, name in enumerate(SHAPE_NAMES):
	log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))


 

# 输出 26
Ejemplo n.º 15
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_C)]
    total_correct_class = [0 for _ in range(NUM_C)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')

    if (".h5" in TEST_FILE):
        current_data, current_label = data_utils.get_current_data_h5(
            TEST_DATA, TEST_LABELS, NUM_POINT)
    else:
        current_data, current_label = data_utils.get_current_data(
            TEST_DATA, TEST_LABELS, NUM_POINT)

    current_label = np.squeeze(current_label)

    ####################################################
    print(current_data.shape)
    print(current_label.shape)

    filtered_data = []
    filtered_label = []
    for i in range(current_label.shape[0]):
        if (current_label[i] in OBJECTDATASET_TO_MODELNET.keys()):
            filtered_label.append(current_label[i])
            filtered_data.append(current_data[i, :])

    filtered_data = np.array(filtered_data)
    filtered_label = np.array(filtered_label)
    print(filtered_data.shape)
    print(filtered_label.shape)

    current_data = filtered_data
    current_label = filtered_label
    ###################################################

    num_batches = current_data.shape[0] // BATCH_SIZE

    current_pred = []

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        cur_batch_size = end_idx - start_idx

        # Aggregating BEG
        batch_loss_sum = 0  # sum of losses for the batch
        batch_pred_sum = np.zeros((cur_batch_size, 40))  # score for classes
        batch_pred_classes = np.zeros((cur_batch_size, 40))  # 0/1 for classes
        for vote_idx in range(num_votes):
            rotated_data = provider.rotate_point_cloud_by_angle(
                current_data[start_idx:end_idx, :, :],
                vote_idx / float(num_votes) * np.pi * 2)
            feed_dict = {
                ops['pointclouds_pl']: rotated_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training
            }
            loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)

            batch_pred_sum += pred_val
            batch_pred_val = np.argmax(pred_val, 1)
            for el_idx in range(cur_batch_size):
                batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
            batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
        # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
        # pred_val = np.argmax(batch_pred_classes, 1)
        pred_val = np.argmax(batch_pred_sum, 1)
        # Aggregating END

        for i in range(start_idx, end_idx):
            total_seen += 1
            if (pred_val[i - start_idx]
                    not in MODELNET_TO_OBJECTDATASET.keys()):
                continue
            pred = MODELNET_TO_OBJECTDATASET[pred_val[i - start_idx]]
            # if (pred_val[i-start_idx] == current_label[i]):
            if (pred == current_label[i]):
                total_correct += 1

        for i in range(start_idx, end_idx):

            l = current_label[i]
            total_seen_class[l] += 1

            if pred_val[i - start_idx] not in MODELNET_TO_OBJECTDATASET:
                pred_label = "NA"
            else:
                pred = MODELNET_TO_OBJECTDATASET[pred_val[i - start_idx]]
                total_correct_class[l] += (pred == l)

                pred_label = SHAPE_NAMES[pred]

            # groundtruth_label = SHAPE_NAMES[MODELNET_TO_OBJECTDATASET[l]]
            groundtruth_label = SHAPE_NAMES[l]

            fout.write('%s, %s\n' % (pred_label, groundtruth_label))

    log_string('total seen: %d' % (total_seen))
    # log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))

    seen_class_accuracies = []
    seen_correct_class = []
    for i in range(len(total_seen_class)):
        if total_seen_class[i] != 0:
            seen_class_accuracies.append(total_seen_class[i])
            seen_correct_class.append(total_correct_class[i])
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(seen_correct_class) /
        np.array(seen_class_accuracies, dtype=np.float))))

    for i, name in enumerate(SHAPE_NAMES):
        if (total_seen_class[i] == 0):
            accuracy = -1
        else:
            accuracy = total_correct_class[i] / float(total_seen_class[i])
        log_string('%10s:\t%0.3f' % (name, accuracy))
Ejemplo n.º 16
0
def eval_one_epoch(config, sess, ops, epoch=0):
    is_training = False
    num_votes = config.num_votes

    total_seen = 0
    loss_sum = 0
    predictions = []
    labels = []
    all = 0
    for fn in range(len(TEST_FILES)):
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:, 0:config.num_points, :]
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // config.batch_size + 1

        for batch_idx in range(num_batches):
            start_idx = batch_idx * config.batch_size
            end_idx = (batch_idx + 1) * config.batch_size
            cur_batch_size = min(end_idx - start_idx,
                                 config.batch_size - end_idx + file_size)

            if cur_batch_size < config.batch_size:
                placeholder_data = np.zeros(
                    ([config.batch_size] + (list(current_data.shape))[1:]))
                placeholder_data[0:cur_batch_size, :, :] = current_data[
                    start_idx:end_idx, :, :]

                placeholder_labels = np.zeros((config.batch_size))
                placeholder_labels[0:cur_batch_size] = current_label[
                    start_idx:end_idx]

                batch_labels = placeholder_labels
                batch_data = placeholder_data
            else:
                batch_data = current_data[start_idx:end_idx, :, :]
                batch_labels = current_label[start_idx:end_idx]

            # Aggregating BEG
            batch_loss_sum = 0  # sum of losses for the batch
            batch_pred_sum = np.zeros(
                (config.batch_size, config.num_classes))  # score for classes

            for vote_idx in range(num_votes):
                rotated_data = provider.rotate_point_cloud_by_angle(
                    batch_data, vote_idx / float(num_votes) * np.pi * 2)
                feed_dict = {
                    ops['pointclouds_pl']: rotated_data,
                    ops['labels_pl']: batch_labels,
                    ops['is_training_pl']: is_training
                }
                loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                              feed_dict=feed_dict)

                batch_pred_sum += pred_val
                batch_loss_sum += (loss_val * cur_batch_size /
                                   float(num_votes))

            pred_val = np.argmax(batch_pred_sum, 1)
            predictions += pred_val.tolist()[0:cur_batch_size]
            labels += current_label[start_idx:end_idx].tolist()

            total_seen += cur_batch_size
            loss_sum += batch_loss_sum

    loss = loss_sum / float(total_seen)
    acc = sum([
        1 if predictions[i] == labels[i] else 0
        for i in range(len(predictions))
    ]) / float(len(predictions))
    log(
        config.log_file,
        'EVALUATION EPOCH {} - accuracy: {}    loss: {}'.format(
            epoch, acc, loss))

    if config.test:
        import Evaluation_tools as et
        eval_file = os.path.join(config.log_dir, '{}.txt'.format(config.name))
        et.write_eval_file(config.data, eval_file, predictions, labels,
                           config.name)
        et.make_matrix(config.data, eval_file, config.log_dir)
    else:
        LOSS_LOGGER.log(loss, epoch, "eval_loss")
        ACC_LOGGER.log(acc, epoch, "eval_accuracy")
Ejemplo n.º 17
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')

    if (".h5" in TEST_FILE):
        current_data, current_label = data_utils.get_current_data_h5(
            TEST_DATA, TEST_LABELS, NUM_POINT)
    else:
        current_data, current_label = data_utils.get_current_data(
            TEST_DATA, TEST_LABELS, NUM_POINT)

    current_label = np.squeeze(current_label)

    num_batches = current_data.shape[0] // BATCH_SIZE

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        cur_batch_size = end_idx - start_idx

        # Aggregating BEG
        batch_loss_sum = 0  # sum of losses for the batch
        batch_pred_sum = np.zeros(
            (cur_batch_size, NUM_CLASSES))  # score for classes
        batch_pred_classes = np.zeros(
            (cur_batch_size, NUM_CLASSES))  # 0/1 for classes
        for vote_idx in range(num_votes):
            rotated_data = provider.rotate_point_cloud_by_angle(
                current_data[start_idx:end_idx, :, :],
                vote_idx / float(num_votes) * np.pi * 2)
            feed_dict = {
                ops['pointclouds_pl']: rotated_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training
            }
            loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)

            batch_pred_sum += pred_val
            batch_pred_val = np.argmax(pred_val, 1)
            for el_idx in range(cur_batch_size):
                batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
            batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
        # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
        # pred_val = np.argmax(batch_pred_classes, 1)
        pred_val = np.argmax(batch_pred_sum, 1)
        # Aggregating END

        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
        total_correct += correct
        total_seen += cur_batch_size
        loss_sum += batch_loss_sum

        for i in range(start_idx, end_idx):
            l = current_label[i]
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_val[i - start_idx] == l)
            fout.write('%s, %s\n' %
                       (SHAPE_NAMES[pred_val[i - start_idx]], SHAPE_NAMES[l]))

            if pred_val[i -
                        start_idx] != l and FLAGS.visu:  # ERROR CASE, DUMP!
                img_filename = '%d_label_%s_pred_%s.jpg' % (
                    error_cnt, SHAPE_NAMES[l],
                    SHAPE_NAMES[pred_val[i - start_idx]])
                img_filename = os.path.join(DUMP_DIR, img_filename)
                output_img = pc_util.point_cloud_three_views(
                    np.squeeze(current_data[i, :, :]))
                scipy.misc.imsave(img_filename, output_img)
                #save ply
                ply_filename = '%d_label_%s_pred_%s.ply' % (
                    error_cnt, SHAPE_NAMES[l],
                    SHAPE_NAMES[pred_val[i - start_idx]])
                ply_filename = os.path.join(DUMP_DIR, ply_filename)
                data_utils.save_ply(np.squeeze(current_data[i, :, :]),
                                    ply_filename)
                error_cnt += 1

    log_string('total seen: %d' % (total_seen))
    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))

    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
Ejemplo n.º 18
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')

    # data_utils.shuffle_points(TEST_DATA)

    # current_data, current_label = data_utils.get_current_data(TEST_DATA, TEST_LABELS, NUM_POINT)
    # current_data, current_label = data_utils.get_current_data_h5(TEST_DATA, TEST_LABELS, NUM_POINT)
    if (".h5" in TEST_FILE):
        current_data, current_label = data_utils.get_current_data_h5(
            TEST_DATA, TEST_LABELS, NUM_POINT)
    else:
        current_data, current_label = data_utils.get_current_data(
            TEST_DATA, TEST_LABELS, NUM_POINT)

    current_label = np.squeeze(current_label)

    ####################################################
    print(current_data.shape)
    print(current_label.shape)

    filtered_data = []
    filtered_label = []
    for i in range(current_label.shape[0]):
        if (current_label[i] in OBJECTDATASET_TO_MODELNET.keys()):
            filtered_label.append(current_label[i])
            filtered_data.append(current_data[i, :])

    filtered_data = np.array(filtered_data)
    filtered_label = np.array(filtered_label)
    print(filtered_data.shape)
    print(filtered_label.shape)

    current_data = filtered_data
    current_label = filtered_label
    ###################################################

    num_batches = current_data.shape[0] // BATCH_SIZE

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        cur_batch_size = end_idx - start_idx

        # Aggregating BEG
        batch_loss_sum = 0  # sum of losses for the batch
        batch_pred_sum = np.zeros((cur_batch_size, 40))  # score for classes
        batch_pred_classes = np.zeros((cur_batch_size, 40))  # 0/1 for classes
        for vote_idx in range(num_votes):
            rotated_data = provider.rotate_point_cloud_by_angle(
                current_data[start_idx:end_idx, :, :],
                vote_idx / float(num_votes) * np.pi * 2)

            xforms_np, rotations_np = pf.get_xforms(
                BATCH_SIZE,
                rotation_range=rotation_range_val,
                scaling_range=scaling_range_val,
                order=setting.rotation_order)

            # Augment batched point clouds by rotation and jittering
            feed_dict = {
                ops['pointclouds_pl']: rotated_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
                ops['xforms']: xforms_np,
                ops['rotations']: rotations_np,
                ops['jitter_range']: np.array([jitter_val])
            }

            loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)

            pred_val = np.sum(pred_val, axis=1)
            # pred_val = np.argmax(pred_val, 1)

            batch_pred_sum += pred_val
            batch_pred_val = np.argmax(pred_val, 1)
            for el_idx in range(cur_batch_size):
                batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
            batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
        # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
        # pred_val = np.argmax(batch_pred_classes, 1)
        pred_val = np.argmax(batch_pred_sum, 1)
        # Aggregating END

        for i in range(start_idx, end_idx):
            total_seen += 1
            if (pred_val[i - start_idx]
                    not in MODELNET_TO_OBJECTDATASET.keys()):
                continue
            pred = MODELNET_TO_OBJECTDATASET[pred_val[i - start_idx]]
            # if (pred_val[i-start_idx] == current_label[i]):
            if (pred == current_label[i]):
                total_correct += 1

        for i in range(start_idx, end_idx):

            l = current_label[i]
            total_seen_class[l] += 1

            if pred_val[i - start_idx] not in MODELNET_TO_OBJECTDATASET:
                pred_label = "NA"
            else:
                pred = MODELNET_TO_OBJECTDATASET[pred_val[i - start_idx]]
                total_correct_class[l] += (pred == l)

                pred_label = SHAPE_NAMES[pred]

            # groundtruth_label = SHAPE_NAMES[MODELNET_TO_OBJECTDATASET[l]]
            groundtruth_label = SHAPE_NAMES[l]

            fout.write('%s, %s\n' % (pred_label, groundtruth_label))

            if pred_val[i -
                        start_idx] != l and FLAGS.visu:  # ERROR CASE, DUMP!
                img_filename = '%d_label_%s_pred_%s.jpg' % (
                    error_cnt, groundtruth_label, pred_label)
                img_filename = os.path.join(DUMP_DIR, img_filename)
                output_img = pc_util.point_cloud_three_views(
                    np.squeeze(current_data[i, :, :]))
                scipy.misc.imsave(img_filename, output_img)

                #save ply
                ply_filename = '%d_label_%s_pred_%s.ply' % (
                    error_cnt, groundtruth_label, pred_label)
                data_utils.save_ply(np.squeeze(current_data[i, :, :]),
                                    ply_filename)
                error_cnt += 1

    log_string('total seen: %d' % (total_seen))
    # log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))

    seen_class_accuracies = []
    seen_correct_class = []
    for i in range(len(total_seen_class)):
        if total_seen_class[i] != 0:
            seen_class_accuracies.append(total_seen_class[i])
            seen_correct_class.append(total_correct_class[i])
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(seen_correct_class) /
        np.array(seen_class_accuracies, dtype=np.float))))

    for i, name in enumerate(SHAPE_NAMES):
        if (total_seen_class[i] == 0):
            accuracy = -1
        else:
            accuracy = total_correct_class[i] / float(total_seen_class[i])
        log_string('%10s:\t%0.3f' % (name, accuracy))
Ejemplo n.º 19
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_correct_seg = 0
    classify_loss_sum = 0
    seg_loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')

    current_data, current_label, current_mask = data_utils.get_current_data_withmask_h5(
        TEST_DATA, TEST_LABELS, TEST_MASKS, NUM_POINT)

    current_label = np.squeeze(current_label)
    current_mask = np.squeeze(current_mask)

    num_batches = current_data.shape[0] // BATCH_SIZE

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        cur_batch_size = end_idx - start_idx

        # Aggregating BEG
        batch_loss_sum = 0  # sum of losses for the batch
        batch_pred_sum = np.zeros(
            (cur_batch_size, NUM_CLASSES))  # score for classes
        batch_seg_sum = np.zeros(
            (cur_batch_size, NUM_POINT, 2))  # score for classes
        batch_pred_classes = np.zeros(
            (cur_batch_size, NUM_CLASSES))  # 0/1 for classes
        for vote_idx in range(num_votes):
            rotated_data = provider.rotate_point_cloud_by_angle(
                current_data[start_idx:end_idx, :, :],
                vote_idx / float(num_votes) * np.pi * 2)

            xforms_np, rotations_np = pf.get_xforms(
                BATCH_SIZE,
                rotation_range=rotation_range_val,
                scaling_range=scaling_range_val,
                order=setting.rotation_order)

            # Augment batched point clouds by rotation and jittering
            feed_dict = {
                ops['pointclouds_pl']: rotated_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['masks_pl']: current_mask[start_idx:end_idx],
                ops['is_training_pl']: is_training,
                ops['xforms']: xforms_np,
                ops['rotations']: rotations_np,
                ops['jitter_range']: np.array([jitter_val])
            }

            loss_val, pred_val, seg_val, classify_loss, seg_loss = sess.run(
                [
                    ops['loss'], ops['pred'], ops['seg_pred'],
                    ops['classify_loss'], ops['seg_loss']
                ],
                feed_dict=feed_dict)

            pred_val = np.sum(pred_val, axis=1)
            # pred_val = np.argmax(pred_val, 1)

            batch_pred_sum += pred_val
            batch_seg_sum += seg_val
            batch_pred_val = np.argmax(pred_val, 1)
            for el_idx in range(cur_batch_size):
                batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
            batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
        # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
        # pred_val = np.argmax(batch_pred_classes, 1)
        pred_val = np.argmax(batch_pred_sum, 1)
        # Aggregating END

        correct = np.sum(pred_val == current_label[start_idx:end_idx])

        seg_val = np.argmax(batch_seg_sum, 2)
        seg_correct = np.sum(seg_val == current_mask[start_idx:end_idx])
        total_correct_seg += seg_correct

        # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
        total_correct += correct
        total_seen += cur_batch_size
        loss_sum += batch_loss_sum

        for i in range(start_idx, end_idx):
            l = current_label[i]
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_val[i - start_idx] == l)
            fout.write('%s, %s\n' %
                       (SHAPE_NAMES[pred_val[i - start_idx]], SHAPE_NAMES[l]))

            gt_mask = current_mask[i]
            pred_mask = seg_val[i - start_idx]

            pred_mask_idx = np.where(pred_mask == 1)[0]
            gt_mask_idx = np.where(gt_mask == 1)[0]
            correct_obj_mask = np.where((pred_mask == gt_mask)
                                        & (pred_mask == 1))[0]

            if (len(correct_obj_mask) == 1):
                continue

            if (i % 20 == 0 and FLAGS.visu_mask):
                ###1)
                img_filename = '%d_label_%s_pred_%s_gtmask.jpg' % (
                    i, SHAPE_NAMES[l], SHAPE_NAMES[pred_val[i - start_idx]])
                img_filename = os.path.join(DUMP_DIR, img_filename)
                output_img = pc_util.point_cloud_three_views(
                    np.squeeze(current_data[i, gt_mask_idx, :]))
                scipy.misc.imsave(img_filename, output_img)

                #save ply
                ply_filename = '%d_label_%s_pred_%s_gtmask.ply' % (
                    i, SHAPE_NAMES[l], SHAPE_NAMES[pred_val[i - start_idx]])
                ply_filename = os.path.join(DUMP_DIR, ply_filename)
                data_utils.save_ply(
                    np.squeeze(current_data[i, gt_mask_idx, :]), ply_filename)

                ###2)
                img_filename = '%d_label_%s_pred_%s_predmask.jpg' % (
                    i, SHAPE_NAMES[l], SHAPE_NAMES[pred_val[i - start_idx]])
                img_filename = os.path.join(DUMP_DIR, img_filename)
                output_img = pc_util.point_cloud_three_views(
                    np.squeeze(current_data[i, pred_mask_idx, :]))
                scipy.misc.imsave(img_filename, output_img)

                #save ply
                ply_filename = '%d_label_%s_pred_%s_predmask.ply' % (
                    i, SHAPE_NAMES[l], SHAPE_NAMES[pred_val[i - start_idx]])
                ply_filename = os.path.join(DUMP_DIR, ply_filename)
                data_utils.save_ply(
                    np.squeeze(current_data[i, pred_mask_idx, :]),
                    ply_filename)

                ###3)
                img_filename = '%d_label_%s_pred_%s_correctpredmask.jpg' % (
                    i, SHAPE_NAMES[l], SHAPE_NAMES[pred_val[i - start_idx]])
                img_filename = os.path.join(DUMP_DIR, img_filename)
                output_img = pc_util.point_cloud_three_views(
                    np.squeeze(current_data[i, correct_obj_mask, :]))
                scipy.misc.imsave(img_filename, output_img)

                #save ply
                ply_filename = '%d_label_%s_pred_%s_correctpredmask.ply' % (
                    i, SHAPE_NAMES[l], SHAPE_NAMES[pred_val[i - start_idx]])
                ply_filename = os.path.join(DUMP_DIR, ply_filename)
                data_utils.save_ply(
                    np.squeeze(current_data[i, correct_obj_mask, :]),
                    ply_filename)

            if pred_val[i -
                        start_idx] != l and FLAGS.visu:  # ERROR CASE, DUMP!
                img_filename = '%d_label_%s_pred_%s.jpg' % (
                    error_cnt, SHAPE_NAMES[l],
                    SHAPE_NAMES[pred_val[i - start_idx]])
                img_filename = os.path.join(DUMP_DIR, img_filename)
                output_img = pc_util.point_cloud_three_views(
                    np.squeeze(current_data[i, :, :]))
                scipy.misc.imsave(img_filename, output_img)

                #save ply
                ply_filename = '%d_label_%s_pred_%s.ply' % (
                    error_cnt, SHAPE_NAMES[l],
                    SHAPE_NAMES[pred_val[i - start_idx]])
                data_utils.save_ply(np.squeeze(current_data[i, :, :]),
                                    ply_filename)
                error_cnt += 1

    log_string('total seen: %d' % (total_seen))
    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))
    log_string('seg accuracy: %f' % (total_correct_seg /
                                     (float(total_seen) * NUM_POINT)))

    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
Ejemplo n.º 20
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
    for fn in range(len(TEST_FILES)):
        log_string('----' + str(fn) + '----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        print(file_size)

        # set by wind:
        # my code is based on batch_size = 1
        # set batch_size = 1 for this file
        for batch_idx in range(file_size):
            start_idx = batch_idx
            end_idx = batch_idx + 1
            cur_batch_size = 1

            #-------------------------------------------------------------------
            # get critical points
            #-------------------------------------------------------------------
            no_influence_position = current_data[start_idx, 0, :].copy()

            global_feature_list = []
            orgin_data = current_data[start_idx, :, :].copy()

            for change_point in range(NUM_POINT):
                current_data[start_idx,
                             change_point, :] = no_influence_position.copy()

            for change_point in range(NUM_POINT):
                current_data[start_idx, change_point, :] = orgin_data[
                    change_point, :].copy()
                # Aggregating BEG
                for vote_idx in range(num_votes):
                    rotated_data = provider.rotate_point_cloud_by_angle(
                        current_data[start_idx:end_idx, :, :],
                        vote_idx / float(num_votes) * np.pi * 2)
                    feed_dict = {
                        ops['pointclouds_pl']: rotated_data,
                        ops['labels_pl']: current_label[start_idx:end_idx],
                        ops['is_training_pl']: is_training
                    }

                    global_feature_val = sess.run(ops['global_feature'],
                                                  feed_dict=feed_dict)

                    global_feature_list.append(global_feature_val)

            critical_points = []
            max_feature = np.zeros(global_feature_list[0].size) - 10
            feature_points = np.zeros(global_feature_list[0].size)
            for index in range(len(global_feature_list)):
                #distance = math.sqrt(((global_feature_list[index] - global_feature_list[-1]) ** 2).sum())
                #distance_list.append(distance)
                top = global_feature_list[index]
                feature_points = np.where(top > max_feature, index,
                                          feature_points)
                max_feature = np.where(top > max_feature, top, max_feature)

            for index in feature_points[0]:
                critical_points.append(orgin_data[int(index), :])
            critical_points = list(set([tuple(t) for t in critical_points]))
            print(len(critical_points))

            img_filename = './test/%d_critical_points.jpg' % (start_idx)
            output_img = pc_util.point_cloud_three_views(
                np.squeeze(critical_points))
            scipy.misc.imsave(img_filename, output_img)

            img_filename = './test/%d_orgin_points.jpg' % (start_idx)
            output_img = pc_util.point_cloud_three_views(
                np.squeeze(orgin_data))
            scipy.misc.imsave(img_filename, output_img)

            #-------------------------------------------------------------------
            # get upper-bound points
            #-------------------------------------------------------------------
            upper_bound_points = np.empty_like(orgin_data.shape)
            upper_bound_points = orgin_data.copy()
            current_data[start_idx, :, :] = orgin_data.copy()

            search_step = 0.02
            stand_feature = np.empty_like(global_feature_list[-1].shape)
            max_position = [-1, -1, -1]
            min_position = [1, 1, 1]

            for point_index in range(NUM_POINT):
                max_position = np.maximum(
                    max_position, current_data[start_idx, point_index, :])
                min_position = np.minimum(
                    min_position, current_data[start_idx, point_index, :])

            print(max_position)
            print(min_position)
            for vote_idx in range(num_votes):
                rotated_data = provider.rotate_point_cloud_by_angle(
                    current_data[start_idx:end_idx, :, :],
                    vote_idx / float(num_votes) * np.pi * 2)
                feed_dict = {
                    ops['pointclouds_pl']: rotated_data,
                    ops['labels_pl']: current_label[start_idx:end_idx],
                    ops['is_training_pl']: is_training
                }

                global_feature_val = sess.run(ops['global_feature'],
                                              feed_dict=feed_dict)
                stand_feature = global_feature_val.copy()

            change_point = 0
            current_data[start_idx, :, :] = orgin_data.copy()
            for point_index in range(NUM_POINT):
                if not (point_index in feature_points[0]):
                    change_point = point_index
                    break

            for x in np.linspace(
                    min_position[0], max_position[0],
                (max_position[0] - min_position[0]) // search_step + 1):
                for y in np.linspace(
                        min_position[1], max_position[1],
                    (max_position[1] - min_position[1]) // search_step + 1):
                    for z in np.linspace(
                            min_position[2], max_position[2],
                        (max_position[2] - min_position[2]) // search_step +
                            1):
                        current_data[start_idx,
                                     change_point, :] = (x, y, z
                                                         )  #+ orgin_position

                        # Aggregating BEG
                        for vote_idx in range(num_votes):
                            rotated_data = provider.rotate_point_cloud_by_angle(
                                current_data[start_idx:end_idx, :, :],
                                vote_idx / float(num_votes) * np.pi * 2)
                            feed_dict = {
                                ops['pointclouds_pl']: rotated_data,
                                ops['labels_pl']:
                                current_label[start_idx:end_idx],
                                ops['is_training_pl']: is_training
                            }

                            global_feature_val = sess.run(
                                ops['global_feature'], feed_dict=feed_dict)

                            if (global_feature_val <= stand_feature).all():
                                upper_bound_points = np.append(
                                    upper_bound_points,
                                    np.array([[x, y, z]]),
                                    axis=0)
                print(x)

            img_filename = './test/%d_upper_bound_points.jpg' % (start_idx)
            output_img = pc_util.point_cloud_three_views(
                np.squeeze(upper_bound_points))
            scipy.misc.imsave(img_filename, output_img)

            current_data[start_idx, :, :] = orgin_data.copy()
Ejemplo n.º 21
0
def evaluate(num_votes):
    is_training = False
    num_drop, num_steps = FLAGS.num_drop, FLAGS.num_steps
    attack = RandomDrop(num_drop, num_steps)

    # Add ops to save and restore all the variables.
    saver = tf.train.Saver()

    # Create a session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    config.log_device_placement = True
    sess = tf.Session(config=config)

    # Restore variables from disk.
    saver.restore(sess, MODEL_PATH)
    log_string("Model restored.")

    ## ops built on attributes defined in attack
    ops = {
        'pointclouds_pl': attack.pointclouds_pl,
        'labels_pl': attack.labels_pl,
        'is_training_pl': attack.is_training_pl,
        'pred': attack.pred,
        'loss': attack.classify_loss
    }

    NUM_POINT = FLAGS.num_point
    NUM_POINT_ADV = NUM_POINT - num_drop * num_steps

    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
    for fn in range(len(TEST_FILES)):
        log_string('----' + str(fn) + '----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_label = np.squeeze(current_label)
        print(current_data.shape)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        print(file_size)

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE
            cur_batch_size = end_idx - start_idx

            # Aggregating BEG
            batch_loss_sum = 0  # sum of losses for the batch
            batch_pred_sum = np.zeros(
                (cur_batch_size, NUM_CLASSES))  # score for classes
            batch_pred_classes = np.zeros(
                (cur_batch_size, NUM_CLASSES))  # 0/1 for classes

            ## Produce adversarial samples
            cur_batch_data_adv = attack.drop_points(
                current_data[start_idx:end_idx, :, :],
                current_label[start_idx:end_idx], sess)
            ## Natural data
            for vote_idx in range(num_votes):
                rotated_data = provider.rotate_point_cloud_by_angle(
                    current_data[start_idx:end_idx, :, :],
                    vote_idx / float(num_votes) * np.pi * 2)
                feed_dict = {
                    ops['pointclouds_pl']: rotated_data,
                    ops['labels_pl']: current_label[start_idx:end_idx],
                    ops['is_training_pl']: is_training
                }
                loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                              feed_dict=feed_dict)
                batch_pred_sum += pred_val
                batch_pred_val = np.argmax(pred_val, 1)
                for el_idx in range(cur_batch_size):
                    batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
                batch_loss_sum += (loss_val * cur_batch_size /
                                   float(num_votes))
            pred_val = np.argmax(batch_pred_sum, 1)

            ## Adversarial data

            batch_loss_sum_adv = 0  # sum of losses for the batch
            batch_pred_sum_adv = np.zeros(
                (cur_batch_size, NUM_CLASSES))  # score for classes
            batch_pred_classes_adv = np.zeros(
                (cur_batch_size, NUM_CLASSES))  # 0/1 for classes

            for vote_idx in range(num_votes):
                rotated_data = provider.rotate_point_cloud_by_angle(
                    cur_batch_data_adv,
                    vote_idx / float(num_votes) * np.pi * 2)
                feed_dict = {
                    ops['pointclouds_pl']: rotated_data,
                    ops['labels_pl']: current_label[start_idx:end_idx],
                    ops['is_training_pl']: is_training
                }
                loss_val_adv, pred_val_adv = sess.run(
                    [ops['loss'], ops['pred']], feed_dict=feed_dict)
                batch_pred_sum_adv += pred_val_adv
                batch_pred_val_adv = np.argmax(pred_val_adv, 1)
                for el_idx in range(cur_batch_size):
                    batch_pred_classes_adv[el_idx,
                                           batch_pred_val_adv[el_idx]] += 1
                batch_loss_sum_adv += (loss_val_adv * cur_batch_size /
                                       float(num_votes))
            pred_val_adv = np.argmax(batch_pred_sum_adv, 1)

            attack.plot_natural_and_advsarial_samples_all_situation(
                current_data[start_idx:end_idx, :, :], cur_batch_data_adv,
                current_label[start_idx:end_idx], pred_val, pred_val_adv)
            correct = np.sum(pred_val_adv == current_label[start_idx:end_idx])
            # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
            total_correct += correct
            total_seen += cur_batch_size
            loss_sum += batch_loss_sum_adv

            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += (pred_val_adv[i - start_idx] == l)
                fout.write('%d, %d\n' % (pred_val_adv[i - start_idx], l))

                # if pred_val[i-start_idx] != l and FLAGS.visu: # ERROR CASE, DUMP!
                # img_filename = '%d_label_%s_pred_%s.jpg' % (error_cnt, SHAPE_NAMES[l],
                # SHAPE_NAMES[pred_val[i-start_idx]])
                # img_filename = os.path.join(DUMP_DIR, img_filename)
                # output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, :, :]))
                # scipy.misc.imsave(img_filename, output_img)
                # error_cnt += 1

    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))

    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
    for fn in range(len(TEST_FILES)):
        log_string('----' + str(fn) + '----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:, 0:NUM_POINT, :]
        # current_label = np.squeeze(current_label)
        current_label_ = np.squeeze(current_label)
        current_label = np.expand_dims(current_label_, 0)
        print(current_data.shape)  #输出是(420,1024,3)
        # print(current_label.shape)
        # # print(current_data)#输出是一堆数字
        # print(current_label)  #输出是正确的标签

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        # print(file_size)  #是420
        # print(BATCH_SIZE)  #是4

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE
            cur_batch_size = end_idx - start_idx
            # print(start_idx )  #输出开始位置
            # print(end_idx)    #输出结束位置
            # print(cur_batch_size)  #输出4

            # Aggregating BEG
            batch_loss_sum = 0  # sum of losses for the batch
            batch_pred_sum = np.zeros(
                (cur_batch_size,
                 NUM_CLASSES))  # score for classes   ########预测后的分数
            batch_pred_classes = np.zeros(
                (cur_batch_size,
                 NUM_CLASSES))  # 0/1 for classes  ########预测后分类的正确与否(0或1)
            # print(batch_loss_sum)
            # print(batch_pred_sum)#输出40个0
            # print(batch_pred_classes) #输出40个0

            for vote_idx in range(num_votes):
                rotated_data = provider.rotate_point_cloud_by_angle(
                    current_data[start_idx:end_idx, :, :],
                    vote_idx / float(num_votes) * np.pi * 2)
                print(rotated_data)
                feed_dict = {
                    ops['pointclouds_pl']: rotated_data,
                    ops['labels_pl']: current_label[start_idx:end_idx],
                    ops['is_training_pl']: is_training
                }
                loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                              feed_dict=feed_dict)
                batch_pred_sum += pred_val  #######分数
                batch_pred_val = np.argmax(pred_val, 1)  ####### 预测值
                # print(loss_val)
                # print(pred_val)  #将数据输入网络,得到每个类别的分数
                # print(batch_pred_sum )
                # print(batch_pred_val) #根据每个类别的分数来得到预测的类别和下面的pred_val是一样的
                for el_idx in range(cur_batch_size):
                    batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
                    # print(batch_pred_classes)#根据得到的类别,
                batch_loss_sum += (loss_val * cur_batch_size /
                                   float(num_votes))

            # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
            # pred_val = np.argmax(batch_pred_classes, 1)
            pred_val = np.argmax(batch_pred_sum, 1)

            # print(pred_val)  ###打印出2468个数据的类别是哪一类
            # Aggregating END

            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            # print(correct)
            # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
            total_correct += correct
            total_seen += cur_batch_size
            loss_sum += batch_loss_sum
            # print(total_correct )#总共正确的个数
            # print(total_seen)#总共的个数

            for i in range(start_idx, end_idx):

                l = current_label[i]
                # print(l)#输出是正确的标签

                total_seen_class[l] += 1
                # print(total_seen_class[l]) #输出没明白

                total_correct_class[l] += (pred_val[i - start_idx] == l)
                # print(total_correct_class[l] )#输出没明白

                # fout.write('%d, %d\n' % (pred_val[i - start_idx], l))  #是将正确的标签和预测的标签都写入到里面
                print(pred_val[i - start_idx])  #输出是预测的标签

                fout.write('%d\n' %
                           (pred_val[i - start_idx]))  #仅仅将预测的标签写入到日志文件中

                if pred_val[
                        i -
                        start_idx] != l and FLAGS.visu:  # ERROR CASE, DUMP!
                    img_filename = '%d_label_%s_pred_%s.jpg' % (
                        error_cnt, SHAPE_NAMES[l],
                        SHAPE_NAMES[pred_val[i - start_idx]])
                    img_filename = os.path.join(DUMP_DIR, img_filename)
                    output_img = pc_util.point_cloud_three_views(
                        np.squeeze(current_data[i, :, :]))
                    scipy.misc.imsave(img_filename, output_img)
                    error_cnt += 1
                    # print(output_img)

    log_string('eval mean loss: %f' %
               (loss_sum / float(total_seen)))  #损失的总和除以总共对象的个数
    log_string('eval accuracy: %f' %
               (total_correct / float(total_seen)))  #总共预测正确的个数除以总共对象的个数
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))  #没明白

    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')

    for fn in range(len(TEST_FILES)): 
        log_string('----'+str(fn)+'----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:,0:NUM_POINT,:]
        current_label = np.squeeze(current_label)
        print(current_data.shape)
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        print(file_size)
       
        print("batch number:", num_batches)
        for batch_idx in range(num_batches):
            print("batch index: ", batch_idx)
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            cur_batch_size = end_idx - start_idx
            
            # Aggregating BEG
            batch_loss_sum = 0 # sum of losses for the batch
            batch_pred_sum = np.zeros((cur_batch_size, NUM_CLASSES)) # score for classes
            batch_pred_classes = np.zeros((cur_batch_size, NUM_CLASSES)) # 0/1 for classes
            for vote_idx in range(num_votes):
                # print("batch, vote index", batch_idx, vote_idx)
                rotated_data = provider.rotate_point_cloud_by_angle(current_data[start_idx:end_idx, :, :],
                                                  vote_idx/float(num_votes) * np.pi * 2)
                feed_dict = {ops['pointclouds_pl']: rotated_data,
                             ops['labels_pl']: current_label[start_idx:end_idx],
                             ops['is_training_pl']: is_training}
                
                # print("sess.run()")

                writer = tf.compat.v1.summary.FileWriter('./tb_log/', sess.graph)
                writer.add_graph(sess.graph)

                loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict) 
                                          #options=tf.compat.v1.RunOptions(
                                          #trace_level=tf.compat.v1.RunOptions.FULL_TRACE),
                                          #run_metadata=run_metadata)
               
                writer.close()

                batch_pred_sum += pred_val
                batch_pred_val = np.argmax(pred_val, 1)
                for el_idx in range(cur_batch_size):
                    batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
                batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
            # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
            # pred_val = np.argmax(batch_pred_classes, 1)
            pred_val = np.argmax(batch_pred_sum, 1)
            # Aggregating END
            
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
            total_correct += correct
            total_seen += cur_batch_size
            loss_sum += batch_loss_sum

            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += (pred_val[i-start_idx] == l)
                fout.write('%d, %d\n' % (pred_val[i-start_idx], l))
                
                if pred_val[i-start_idx] != l and FLAGS.visu: # ERROR CASE, DUMP!
                    img_filename = '%d_label_%s_pred_%s.jpg' % (error_cnt, SHAPE_NAMES[l],
                                                           SHAPE_NAMES[pred_val[i-start_idx]])
                    img_filename = os.path.join(DUMP_DIR, img_filename)
                    output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, :, :]))
                    scipy.misc.imsave(img_filename, output_img)
                    error_cnt += 1
        
    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))