Esempio n. 1
0
        root=DATA_PATH,
        npoints=NUM_POINT,
        split='train',
        normal_channel=FLAGS.normal,
        batch_size=BATCH_SIZE)
    TEST_DATASET = modelnet_dataset.ModelNetDataset(
        root=DATA_PATH,
        npoints=NUM_POINT,
        split='test',
        normal_channel=FLAGS.normal,
        batch_size=BATCH_SIZE)
else:
    assert (NUM_POINT <= 2048)
    TRAIN_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(
        os.path.join(BASE_DIR,
                     'data/modelnet40_ply_hdf5_2048/train_files.txt'),
        batch_size=BATCH_SIZE,
        npoints=NUM_POINT,
        shuffle=True)
    TEST_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(os.path.join(
        BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt'),
                                                         batch_size=BATCH_SIZE,
                                                         npoints=NUM_POINT,
                                                         shuffle=False)


def log_string(out_str):
    LOG_FOUT.write(out_str + '\n')
    LOG_FOUT.flush()
    print(out_str)

def eval_one_epoch(sess, ops, num_votes=1, topk=1): 
    is_training = False

    TEST_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(os.path.join(BASE_DIR, DATASET_DIR, 'modelnet40_ply_hdf5_2048/test_files.txt'), batch_size=BATCH_SIZE, npoints=NUM_POINT, shuffle=False)

    # Make sure batch data is of same size
    cur_batch_data = np.zeros((BATCH_SIZE,NUM_POINT,TEST_DATASET.num_channel()))
    cur_batch_label = np.zeros((BATCH_SIZE), dtype=np.int32)

    total_correct = 0
    total_seen = 0
    loss_sum = 0
    batch_idx = 0
    shape_ious = []
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]

    while TEST_DATASET.has_next_batch():
        batch_data, batch_label = TEST_DATASET.next_batch(augment=False)
        bsize = batch_data.shape[0]
        # print('Batch: %03d, batch size: %d'%(batch_idx, bsize))
        # for the last batch in the epoch, the bsize:end are from last batch
        cur_batch_data[0:bsize,...] = batch_data
        cur_batch_label[0:bsize] = batch_label

        batch_pred_sum = np.zeros((BATCH_SIZE, NUM_CLASSES)) # score for classes
        for vote_idx in range(num_votes):
            # Shuffle point order to achieve different farthest samplings
            shuffled_indices = np.arange(NUM_POINT)
            #np.random.shuffle(shuffled_indices)
            if FLAGS.normal:
                rotated_data = provider.rotate_point_cloud_by_angle_with_normal(cur_batch_data[:, shuffled_indices, :],
                    vote_idx/float(num_votes) * np.pi * 2)
            else:
                rotated_data = provider.rotate_point_cloud_by_angle(cur_batch_data[:, shuffled_indices, :],
                    vote_idx/float(num_votes) * np.pi * 2)
            feed_dict = {ops['pointclouds_pl']: rotated_data,
                         ops['labels_pl']: cur_batch_label,
                         ops['is_training_pl']: is_training}
            loss_val, pred_val = sess.run([ops['loss'], ops['pred']], feed_dict=feed_dict)
            batch_pred_sum += pred_val
        pred_val = np.argmax(batch_pred_sum, 1)
        correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize])
        total_correct += correct
        total_seen += bsize
        loss_sum += loss_val
        batch_idx += 1
        for i in range(bsize):
            l = batch_label[i]
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_val[i] == l)
    
    log_string('eval mean loss: %f' % (loss_sum / float(batch_idx)))
    log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))

    '''
    class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
    '''

    return total_correct / float(total_seen), np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))
Esempio n. 3
0
        root=DATA_PATH,
        npoints=NUM_POINT,
        split='train',
        normal_channel=FLAGS.normal,
        batch_size=BATCH_SIZE)
    TEST_DATASET = modelnet_dataset.ModelNetDataset(
        root=DATA_PATH,
        npoints=NUM_POINT,
        split='test',
        normal_channel=FLAGS.normal,
        batch_size=BATCH_SIZE)
else:
    assert (NUM_POINT <= 2048)
    TRAIN_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(
        os.path.join('/volume/USERSTORE/jude_ra/master_thesis/pointnet2',
                     'data/modelnet40_ply_hdf5_2048/train_files.txt'),
        batch_size=BATCH_SIZE,
        npoints=NUM_POINT,
        shuffle=True)
    TEST_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(os.path.join(
        '/volume/USERSTORE/jude_ra/master_thesis/pointnet2',
        'data/modelnet40_ply_hdf5_2048/test_files.txt'),
                                                         batch_size=BATCH_SIZE,
                                                         npoints=NUM_POINT,
                                                         shuffle=False)


def log_string(out_str):
    LOG_FOUT.write(out_str + '\n')
    LOG_FOUT.flush()
    print(out_str)
Esempio n. 4
0
        normal_channel=config.normal,
        batch_size=config.batch_size)
    TEST_DATASET = modelnet_dataset.ModelNetDataset(
        root=DATA_PATH,
        npoints=config.num_points,
        split='test',
        normal_channel=config.normal,
        batch_size=config.batch_size)
else:
    assert (config.num_points <= 2048)
    CHANNELS = 3
    TRAIN_FILES = os.path.join(config.data, 'train_files.txt')
    TEST_FILES = os.path.join(config.data, 'test_files.txt')
    TRAIN_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(
        TRAIN_FILES,
        batch_size=config.batch_size,
        npoints=config.num_points,
        shuffle=True)
    TEST_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(
        TEST_FILES,
        batch_size=config.batch_size,
        npoints=config.num_points,
        shuffle=False)


def get_learning_rate(batch):
    learning_rate = tf.train.exponential_decay(
        config.learning_rate,  # Base learning rate.
        batch * config.batch_size,  # Current index into the dataset.
        config.decay_step,  # Decay step.
        config.decay_rate,  # Decay rate.
Esempio n. 5
0
from model_cls import pointnet2
from util.provider import rotate_point_cloud_by_angle
import modelnet_h5_dataset
from tf_cls import plot_cm

num_classes = 40
num_point = 1024
batch_size = 16
shape_names = [
    line.rstrip()
    for line in open('data/modelnet40_ply_hdf5_2048/shape_names.txt')
]

test_dataset = modelnet_h5_dataset.ModelNetH5Dataset(
    'data/modelnet40_ply_hdf5_2048/test_files.txt',
    batch_size=batch_size,
    npoints=num_point,
    shuffle=False)


def evaluate(num_votes=1):
    with tf.device('/gpu:0'):
        point_cloud = tf.placeholder(dtype=np.float32,
                                     shape=(batch_size, num_point, 3))
        labels = tf.placeholder(dtype=np.int32, shape=batch_size)
        is_training_pl = tf.placeholder(dtype=np.bool, shape=())

        # simple model
        logits = pointnet2(point_cloud, num_classes, is_training_pl)
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logits, labels=labels)
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99

HOSTNAME = socket.gethostname()

NUM_CLASSES = 2

# Shapenet official train/test split
if FLAGS.normal:
    assert(NUM_POINT<=10000)
    DATA_PATH = os.path.join(ROOT_DIR, 'data/modelnet40_normal_resampled')
    TRAIN_DATASET = modelnet_dataset.ModelNetDataset(root=DATA_PATH, npoints=NUM_POINT, split='train', normal_channel=FLAGS.normal, batch_size=BATCH_SIZE)
    TEST_DATASET = modelnet_dataset.ModelNetDataset(root=DATA_PATH, npoints=NUM_POINT, split='test', normal_channel=FLAGS.normal, batch_size=BATCH_SIZE)
else:
    assert(NUM_POINT<=2048)
    TRAIN_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(os.path.join(BASE_DIR, "/home/gstavrinos/catkin_ws/src/new_hpr/pointcloud2_segments_tools/dataset0_training_75.txt"), batch_size=BATCH_SIZE, npoints=NUM_POINT, shuffle=True)
    TEST_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(os.path.join(BASE_DIR, "/home/gstavrinos/catkin_ws/src/new_hpr/pointcloud2_segments_tools/dataset0_testing_75.txt"), batch_size=BATCH_SIZE, npoints=NUM_POINT, shuffle=False)

def log_string(out_str):
    LOG_FOUT.write(out_str+'\n')
    LOG_FOUT.flush()
    print(out_str)

def get_learning_rate(batch):
    learning_rate = tf.train.exponential_decay(
                        BASE_LEARNING_RATE,  # Base learning rate.
                        batch * BATCH_SIZE,  # Current index into the dataset.
                        DECAY_STEP,          # Decay step.
                        DECAY_RATE,          # Decay rate.
                        staircase=True)
    learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!