예제 #1
0
def train_one_epoch(sess, ops, gmm, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    if (".h5" in TRAIN_FILE):
        current_data, current_label = data_utils.get_current_data_h5(TRAIN_DATA, TRAIN_LABELS, NUM_POINT)
    else:
        current_data, current_label = data_utils.get_current_data(TRAIN_DATA, TRAIN_LABELS, NUM_POINT)


    current_label = np.squeeze(current_label)

    num_batches = current_data.shape[0]//BATCH_SIZE

    total_correct = 0
    total_seen = 0
    loss_sum = 0

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE

        # Augment batched point clouds by rotation and jittering

        augmented_data = current_data[start_idx:end_idx, :, :]
        if augment_scale:
            augmented_data = provider.scale_point_cloud(augmented_data, smin=0.66, smax=1.5)
        if augment_rotation:
            augmented_data = provider.rotate_point_cloud(augmented_data)
        if augment_translation:
            augmented_data = provider.translate_point_cloud(augmented_data, tval = 0.2)
        if augment_jitter:
            augmented_data = provider.jitter_point_cloud(augmented_data, sigma=0.01,
                                                    clip=0.05)  # default sigma=0.01, clip=0.05
        if augment_outlier:
            augmented_data = provider.insert_outliers_to_point_cloud(augmented_data, outlier_ratio=0.02)

        feed_dict = {ops['points_pl']: augmented_data,
                     ops['labels_pl']: current_label[start_idx:end_idx],
                     ops['w_pl']: gmm.weights_,
                     ops['mu_pl']: gmm.means_,
                     ops['sigma_pl']: np.sqrt(gmm.covariances_),
                     ops['is_training_pl']: is_training, }
        summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
                                                         ops['train_op'], ops['loss'], ops['pred']],
                                                        feed_dict=feed_dict)

        train_writer.add_summary(summary, step)
        pred_val = np.argmax(pred_val, 1)
        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        total_correct += correct
        total_seen += BATCH_SIZE
        loss_sum += loss_val

    log_string('mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('accuracy: %f' % (total_correct / float(total_seen)))
예제 #2
0
def eval_one_epoch(sess, ops, test_writer):
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]

    # current_data, current_label = data_utils.get_current_data(TEST_DATA, TEST_LABELS, sample_num)
    # current_data, current_label = data_utils.get_current_data_h5(TEST_DATA, TEST_LABELS, sample_num)
    if (".h5" in TEST_FILE):
        current_data, current_label = data_utils.get_current_data_h5(TEST_DATA, TEST_LABELS, sample_num)
    else:
        current_data, current_label = data_utils.get_current_data(TEST_DATA, TEST_LABELS, sample_num)

    current_label = np.squeeze(current_label)

    num_batches = current_data.shape[0]//batch_size
        
    for batch_idx in range(num_batches):
        start_idx = batch_idx * batch_size
        end_idx = (batch_idx+1) * batch_size

        xforms_np, rotations_np = pf.get_xforms(batch_size,
                                                rotation_range=rotation_range_val,
                                                scaling_range=scaling_range_val,
                                                order=setting.rotation_order)

        # Augment batched point clouds by rotation and jittering
        feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
                    ops['labels_pl']: current_label[start_idx:end_idx],
                    ops['is_training_pl']: is_training,
                    ops['xforms']: xforms_np,
                    ops['rotations']: rotations_np,
                    ops['jitter_range']: np.array([jitter_val])}

        summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
            ops['loss'], ops['pred']], feed_dict=feed_dict)

        pred_val = np.sum(pred_val, axis=1)
        pred_val = np.argmax(pred_val, 1)
        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        total_correct += correct
        total_seen += batch_size
        loss_sum += (loss_val*batch_size)
        for i in range(start_idx, end_idx):
            l = current_label[i]
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_val[i-start_idx] == l)
            
    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)))) 
예제 #3
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    log_string(str(datetime.now()))

    #Shuffle data
    # data_utils.shuffle_points(TRAIN_DATA)

    #get current data, shuffle and set to numpy array with desired num_point
    # current_data, current_label = data_utils.get_current_data(TRAIN_DATA, TRAIN_LABELS, NUM_POINT)
    if (".h5" in TRAIN_FILE):
        current_data, current_label = data_utils.get_current_data_h5(
            TRAIN_DATA, TRAIN_LABELS, NUM_POINT)
    else:
        current_data, current_label = data_utils.get_current_data(
            TRAIN_DATA, TRAIN_LABELS, NUM_POINT)

    current_label = np.squeeze(current_label)

    num_batches = current_data.shape[0] // BATCH_SIZE

    total_correct = 0
    total_seen = 0
    loss_sum = 0
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE

        # Augment batched point clouds by rotation and jittering
        rotated_data = provider.rotate_point_cloud(
            current_data[start_idx:end_idx, :, :])
        jittered_data = provider.jitter_point_cloud(rotated_data)
        feed_dict = {
            ops['pointclouds_pl']: jittered_data,
            ops['labels_pl']: current_label[start_idx:end_idx],
            ops['is_training_pl']: is_training,
        }
        summary, step, _, loss_val, pred_val = sess.run([
            ops['merged'], ops['step'], ops['train_op'], ops['loss'],
            ops['pred']
        ],
                                                        feed_dict=feed_dict)
        train_writer.add_summary(summary, step)
        pred_val = np.argmax(pred_val, 1)
        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        total_correct += correct
        total_seen += BATCH_SIZE
        loss_sum += loss_val

    log_string('mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('accuracy: %f' % (total_correct / float(total_seen)))
예제 #4
0
def train_one_epoch(sess, ops, train_writer):
    is_training = True

    #get current data, shuffle and set to numpy array with desired num_points
    # current_data, current_label = data_utils.get_current_data(TRAIN_DATA, TRAIN_LABELS, sample_num)
    # current_data, current_label = data_utils.get_current_data_h5(TRAIN_DATA, TRAIN_LABELS, sample_num)
    if (".h5" in TRAIN_FILE):
        current_data, current_label = data_utils.get_current_data_h5(TRAIN_DATA, TRAIN_LABELS, sample_num)
    else:
        current_data, current_label = data_utils.get_current_data(TRAIN_DATA, TRAIN_LABELS, sample_num)

    current_label = np.squeeze(current_label)

    num_batches = current_data.shape[0]//batch_size
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    for batch_idx in range(num_batches):
        start_idx = batch_idx * batch_size
        end_idx = (batch_idx+1) * batch_size

        xforms_np, rotations_np = pf.get_xforms(batch_size,
                                                rotation_range=rotation_range,
                                                scaling_range=scaling_range,
                                                order=setting.rotation_order)

        # Augment batched point clouds by rotation and jittering
        feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
                    ops['labels_pl']: current_label[start_idx:end_idx],
                    ops['is_training_pl']: is_training,
                    ops['xforms']: xforms_np,
                    ops['rotations']: rotations_np,
                    ops['jitter_range']: np.array([jitter])}

        summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
            ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)

        train_writer.add_summary(summary, step)
        pred_val = np.sum(pred_val, axis=1)
        pred_val = np.argmax(pred_val, 1)
        # print(pred_val)
        # print(current_label[start_idx:end_idx])

        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        total_correct += correct
        total_seen += batch_size
        loss_sum += loss_val

    
    log_string('mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('accuracy: %f' % (total_correct / float(total_seen)))
예제 #5
0
def eval_one_epoch(sess, ops, test_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]

    # data_utils.shuffle_points(TEST_DATA)
    # current_data, current_label = data_utils.get_current_data(TEST_DATA, TEST_LABELS, NUM_POINT)
    # current_data, current_label = data_utils.get_current_data_h5(TEST_DATA, TEST_LABELS, NUM_POINT)
    if (".h5" in TEST_FILE):
        current_data, current_label = data_utils.get_current_data_h5(
            TEST_DATA, TEST_LABELS, NUM_POINT)
    else:
        current_data, current_label = data_utils.get_current_data(
            TEST_DATA, TEST_LABELS, NUM_POINT)

    current_label = np.squeeze(current_label)

    num_batches = current_data.shape[0] // BATCH_SIZE

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE

        feed_dict = {
            ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
            ops['labels_pl']: current_label[start_idx:end_idx],
            ops['is_training_pl']: is_training
        }
        summary, step, loss_val, pred_val = sess.run(
            [ops['merged'], ops['step'], ops['loss'], ops['pred']],
            feed_dict=feed_dict)
        pred_val = np.argmax(pred_val, 1)
        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        total_correct += correct
        total_seen += BATCH_SIZE
        loss_sum += (loss_val * BATCH_SIZE)
        for i in range(start_idx, end_idx):
            l = current_label[i]
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_val[i - start_idx] == l)

    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')

    # data_utils.shuffle_points(TEST_DATA)

    # current_data, current_label = data_utils.get_current_data(TEST_DATA, TEST_LABELS, NUM_POINT)
    # current_data, current_label = data_utils.get_current_data_h5(TEST_DATA, TEST_LABELS, NUM_POINT)
    if (".h5" in TEST_FILE):
        current_data, current_label = data_utils.get_current_data_h5(
            TEST_DATA, TEST_LABELS, NUM_POINT)
    else:
        current_data, current_label = data_utils.get_current_data(
            TEST_DATA, TEST_LABELS, NUM_POINT)

    current_label = np.squeeze(current_label)

    num_batches = current_data.shape[0] // BATCH_SIZE

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        cur_batch_size = end_idx - start_idx

        # Aggregating BEG
        batch_loss_sum = 0  # sum of losses for the batch
        batch_pred_sum = np.zeros(
            (cur_batch_size, NUM_CLASSES))  # score for classes
        batch_pred_classes = np.zeros(
            (cur_batch_size, NUM_CLASSES))  # 0/1 for classes
        for vote_idx in range(num_votes):
            rotated_data = provider.rotate_point_cloud_by_angle(
                current_data[start_idx:end_idx, :, :],
                vote_idx / float(num_votes) * np.pi * 2)

            xforms_np, rotations_np = pf.get_xforms(
                BATCH_SIZE,
                rotation_range=rotation_range_val,
                scaling_range=scaling_range_val,
                order=setting.rotation_order)

            # Augment batched point clouds by rotation and jittering
            feed_dict = {
                ops['pointclouds_pl']: rotated_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
                ops['xforms']: xforms_np,
                ops['rotations']: rotations_np,
                ops['jitter_range']: np.array([jitter_val])
            }

            loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)

            pred_val = np.sum(pred_val, axis=1)
            # pred_val = np.argmax(pred_val, 1)

            batch_pred_sum += pred_val
            batch_pred_val = np.argmax(pred_val, 1)
            for el_idx in range(cur_batch_size):
                batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
            batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
        # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
        # pred_val = np.argmax(batch_pred_classes, 1)
        pred_val = np.argmax(batch_pred_sum, 1)
        # Aggregating END

        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
        total_correct += correct
        total_seen += cur_batch_size
        loss_sum += batch_loss_sum

        for i in range(start_idx, end_idx):
            l = current_label[i]
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_val[i - start_idx] == l)
            fout.write('%s, %s\n' %
                       (SHAPE_NAMES[pred_val[i - start_idx]], SHAPE_NAMES[l]))

            if pred_val[i -
                        start_idx] != l and FLAGS.visu:  # ERROR CASE, DUMP!
                img_filename = '%d_label_%s_pred_%s.jpg' % (
                    error_cnt, SHAPE_NAMES[l],
                    SHAPE_NAMES[pred_val[i - start_idx]])
                img_filename = os.path.join(DUMP_DIR, img_filename)
                output_img = pc_util.point_cloud_three_views(
                    np.squeeze(current_data[i, :, :]))
                scipy.misc.imsave(img_filename, output_img)
                #save ply
                ply_filename = '%d_label_%s_pred_%s.ply' % (
                    error_cnt, SHAPE_NAMES[l],
                    SHAPE_NAMES[pred_val[i - start_idx]])
                ply_filename = os.path.join(DUMP_DIR, ply_filename)
                data_utils.save_ply(np.squeeze(current_data[i, :, :]),
                                    ply_filename)
                error_cnt += 1

    log_string('total seen: %d' % (total_seen))
    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))

    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
예제 #7
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_C)]
    total_correct_class = [0 for _ in range(NUM_C)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')

    # data_utils.shuffle_points(TEST_DATA)

    # current_data, current_label = data_utils.get_current_data(TEST_DATA, TEST_LABELS, NUM_POINT)
    # current_data, current_label = data_utils.get_current_data_h5(TEST_DATA, TEST_LABELS, NUM_POINT)
    if (".h5" in TEST_FILE):
        current_data, current_label = data_utils.get_current_data_h5(
            TEST_DATA, TEST_LABELS, NUM_POINT)
    else:
        current_data, current_label = data_utils.get_current_data(
            TEST_DATA, TEST_LABELS, NUM_POINT)

    current_label = np.squeeze(current_label)

    ####################################################
    print(current_data.shape)
    print(current_label.shape)

    filtered_data = []
    filtered_label = []
    for i in range(current_label.shape[0]):
        if (current_label[i] in OBJECTDATASET_TO_MODELNET.keys()):
            filtered_label.append(current_label[i])
            filtered_data.append(current_data[i, :])

    filtered_data = np.array(filtered_data)
    filtered_label = np.array(filtered_label)
    print(filtered_data.shape)
    print(filtered_label.shape)

    current_data = filtered_data
    current_label = filtered_label
    ###################################################

    num_batches = current_data.shape[0] // BATCH_SIZE

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        cur_batch_size = end_idx - start_idx

        # Aggregating BEG
        batch_loss_sum = 0  # sum of losses for the batch
        batch_pred_sum = np.zeros((cur_batch_size, 40))  # score for classes
        batch_pred_classes = np.zeros((cur_batch_size, 40))  # 0/1 for classes
        for vote_idx in range(num_votes):
            rotated_data = provider.rotate_point_cloud_by_angle(
                current_data[start_idx:end_idx, :, :],
                vote_idx / float(num_votes) * np.pi * 2)
            feed_dict = {
                ops['pointclouds_pl']: rotated_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training
            }
            loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)

            batch_pred_sum += pred_val
            batch_pred_val = np.argmax(pred_val, 1)
            for el_idx in range(cur_batch_size):
                batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
            batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
        # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
        # pred_val = np.argmax(batch_pred_classes, 1)
        pred_val = np.argmax(batch_pred_sum, 1)
        # Aggregating END

        for i in range(start_idx, end_idx):
            total_seen += 1
            if (pred_val[i - start_idx]
                    not in MODELNET_TO_OBJECTDATASET.keys()):
                continue
            pred = MODELNET_TO_OBJECTDATASET[pred_val[i - start_idx]]
            # if (pred_val[i-start_idx] == current_label[i]):
            if (pred == current_label[i]):
                total_correct += 1

        for i in range(start_idx, end_idx):

            l = current_label[i]
            total_seen_class[l] += 1

            if pred_val[i - start_idx] not in MODELNET_TO_OBJECTDATASET:
                pred_label = "NA"
            else:
                pred = MODELNET_TO_OBJECTDATASET[pred_val[i - start_idx]]
                total_correct_class[l] += (pred == l)

                pred_label = SHAPE_NAMES[pred]

            # groundtruth_label = SHAPE_NAMES[MODELNET_TO_OBJECTDATASET[l]]
            groundtruth_label = SHAPE_NAMES[l]

            fout.write('%s, %s\n' % (pred_label, groundtruth_label))

            if pred_val[i -
                        start_idx] != l and FLAGS.visu:  # ERROR CASE, DUMP!
                img_filename = '%d_label_%s_pred_%s.jpg' % (
                    error_cnt, groundtruth_label, pred_label)
                img_filename = os.path.join(DUMP_DIR, img_filename)
                output_img = pc_util.point_cloud_three_views(
                    np.squeeze(current_data[i, :, :]))
                scipy.misc.imsave(img_filename, output_img)

                #save ply
                ply_filename = '%d_label_%s_pred_%s.ply' % (
                    error_cnt, groundtruth_label, pred_label)
                data_utils.save_ply(np.squeeze(current_data[i, :, :]),
                                    ply_filename)
                error_cnt += 1

    log_string('total seen: %d' % (total_seen))
    # log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))

    seen_class_accuracies = []
    seen_correct_class = []
    for i in range(len(total_seen_class)):
        if total_seen_class[i] != 0:
            seen_class_accuracies.append(total_seen_class[i])
            seen_correct_class.append(total_correct_class[i])
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(seen_correct_class) /
        np.array(seen_class_accuracies, dtype=np.float))))

    for i, name in enumerate(SHAPE_NAMES):
        if (total_seen_class[i] == 0):
            accuracy = -1
        else:
            accuracy = total_correct_class[i] / float(total_seen_class[i])
        log_string('%10s:\t%0.3f' % (name, accuracy))
예제 #8
0
def eval_one_epoch(sess, ops, gmm, num_votes):
    """ ops: dict mapping from string to tf ops """
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(15)]
    total_correct_class = [0 for _ in range(15)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')

    if (".h5" in TEST_FILE):
        current_data, current_label = data_utils.get_current_data_h5(
            TEST_DATA, TEST_LABELS, NUM_POINT)
    else:
        current_data, current_label = data_utils.get_current_data(
            TEST_DATA, TEST_LABELS, NUM_POINT)

    current_label = np.squeeze(current_label)

    ####################################################
    print(current_data.shape)
    print(current_label.shape)

    filtered_data = []
    filtered_label = []
    for i in range(current_label.shape[0]):
        if (current_label[i] in OBJECTDATASET_TO_MODELNET.keys()):
            filtered_label.append(current_label[i])
            filtered_data.append(current_data[i, :])

    filtered_data = np.array(filtered_data)
    filtered_label = np.array(filtered_label)
    print(filtered_data.shape)
    print(filtered_label.shape)

    current_data = filtered_data
    current_label = filtered_label
    ###################################################

    num_batches = current_data.shape[0] // BATCH_SIZE

    current_pred = []

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        cur_batch_size = end_idx - start_idx

        # Aggregating BEG
        batch_loss_sum = 0  # sum of losses for the batch
        batch_pred_sum = np.zeros((cur_batch_size, 40))  # score for classes
        batch_pred_classes = np.zeros((cur_batch_size, 40))  # 0/1 for classes
        for vote_idx in range(num_votes):
            rotated_data = provider.rotate_point_cloud_by_angle(
                current_data[start_idx:end_idx, :, :],
                vote_idx / float(num_votes) * np.pi * 2)
            feed_dict = {
                ops['pointclouds_pl']: rotated_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['w_pl']: gmm.weights_,
                ops['mu_pl']: gmm.means_,
                ops['sigma_pl']: np.sqrt(gmm.covariances_),
                ops['is_training_pl']: is_training
            }
            loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)

            batch_pred_sum += pred_val
            batch_pred_val = np.argmax(pred_val, 1)
            for el_idx in range(cur_batch_size):
                batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
            batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
        pred_val = np.argmax(batch_pred_sum, 1)
        # Aggregating END

        for i in range(start_idx, end_idx):
            total_seen += 1
            if (pred_val[i - start_idx]
                    not in MODELNET_TO_OBJECTDATASET.keys()):
                continue
            pred = MODELNET_TO_OBJECTDATASET[pred_val[i - start_idx]]
            # if (pred_val[i-start_idx] == current_label[i]):
            if (pred == current_label[i]):
                total_correct += 1

        for i in range(start_idx, end_idx):

            l = current_label[i]
            total_seen_class[l] += 1

            if pred_val[i - start_idx] not in MODELNET_TO_OBJECTDATASET:
                pred_label = "NA"
            else:
                pred = MODELNET_TO_OBJECTDATASET[pred_val[i - start_idx]]
                total_correct_class[l] += (pred == l)

                pred_label = SHAPE_NAMES[pred]

            # groundtruth_label = SHAPE_NAMES[MODELNET_TO_OBJECTDATASET[l]]
            groundtruth_label = SHAPE_NAMES[l]

            fout.write('%s, %s\n' % (pred_label, groundtruth_label))

    log_string('total seen: %d' % (total_seen))
    # log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))

    seen_class_accuracies = []
    seen_correct_class = []
    for i in range(len(total_seen_class)):
        if total_seen_class[i] != 0:
            seen_class_accuracies.append(total_seen_class[i])
            seen_correct_class.append(total_correct_class[i])
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(seen_correct_class) /
        np.array(seen_class_accuracies, dtype=np.float))))

    for i, name in enumerate(SHAPE_NAMES):
        if (total_seen_class[i] == 0):
            accuracy = -1
        else:
            accuracy = total_correct_class[i] / float(total_seen_class[i])
        log_string('%10s:\t%0.3f' % (name, accuracy))
예제 #9
0
def eval_one_epoch(sess, ops, gmm, num_votes):
    """ ops: dict mapping from string to tf ops """
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')

    if (".h5" in TEST_FILE):
        current_data, current_label = data_utils.get_current_data_h5(
            TEST_DATA, TEST_LABELS, NUM_POINT)
    else:
        current_data, current_label = data_utils.get_current_data(
            TEST_DATA, TEST_LABELS, NUM_POINT)

    current_label = np.squeeze(current_label)

    num_batches = current_data.shape[0] // BATCH_SIZE

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        cur_batch_size = end_idx - start_idx

        # Aggregating BEG
        batch_loss_sum = 0  # sum of losses for the batch
        batch_pred_sum = np.zeros(
            (cur_batch_size, NUM_CLASSES))  # score for classes
        batch_pred_classes = np.zeros(
            (cur_batch_size, NUM_CLASSES))  # 0/1 for classes
        for vote_idx in range(num_votes):
            rotated_data = provider.rotate_point_cloud_by_angle(
                current_data[start_idx:end_idx, :, :],
                vote_idx / float(num_votes) * np.pi * 2)
            feed_dict = {
                ops['pointclouds_pl']: rotated_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['w_pl']: gmm.weights_,
                ops['mu_pl']: gmm.means_,
                ops['sigma_pl']: np.sqrt(gmm.covariances_),
                ops['is_training_pl']: is_training
            }
            loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)

            batch_pred_sum += pred_val
            batch_pred_val = np.argmax(pred_val, 1)
            for el_idx in range(cur_batch_size):
                batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
            batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
        pred_val = np.argmax(batch_pred_sum, 1)
        # Aggregating END

        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
        total_correct += correct
        total_seen += cur_batch_size
        loss_sum += batch_loss_sum

        for i in range(start_idx, end_idx):
            l = current_label[i]
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_val[i - start_idx] == l)
            fout.write('%s, %s\n' %
                       (SHAPE_NAMES[pred_val[i - start_idx]], SHAPE_NAMES[l]))

            if pred_val[i -
                        start_idx] != l and FLAGS.visu:  # ERROR CASE, DUMP!
                img_filename = '%d_label_%s_pred_%s.jpg' % (
                    error_cnt, SHAPE_NAMES[l],
                    SHAPE_NAMES[pred_val[i - start_idx]])
                img_filename = os.path.join(DUMP_DIR, img_filename)
                output_img = pc_util.point_cloud_three_views(
                    np.squeeze(current_data[i, :, :]))
                scipy.misc.imsave(img_filename, output_img)

                #save ply
                ply_filename = '%d_label_%s_pred_%s.ply' % (
                    error_cnt, SHAPE_NAMES[l],
                    SHAPE_NAMES[pred_val[i - start_idx]])
                data_utils.save_ply(np.squeeze(current_data[i, :, :]),
                                    ply_filename)
                error_cnt += 1

    log_string('total seen: %d' % (total_seen))
    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))

    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
예제 #10
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')

    if (".h5" in TEST_FILE):
        current_data, current_label = data_utils.get_current_data_h5(
            TEST_DATA, TEST_LABELS, NUM_POINT)
    else:
        current_data, current_label = data_utils.get_current_data(
            TEST_DATA, TEST_LABELS, NUM_POINT)

    current_label = np.squeeze(current_label)

    num_batches = current_data.shape[0] // BATCH_SIZE

    current_pred = []

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        cur_batch_size = end_idx - start_idx

        # Aggregating BEG
        batch_loss_sum = 0  # sum of losses for the batch
        batch_pred_sum = np.zeros(
            (cur_batch_size, NUM_CLASSES))  # score for classes
        batch_pred_classes = np.zeros(
            (cur_batch_size, NUM_CLASSES))  # 0/1 for classes
        for vote_idx in range(num_votes):
            rotated_data = provider.rotate_point_cloud_by_angle(
                current_data[start_idx:end_idx, :, :],
                vote_idx / float(num_votes) * np.pi * 2)

            xforms_np, rotations_np = pf.get_xforms(
                BATCH_SIZE,
                rotation_range=rotation_range_val,
                scaling_range=scaling_range_val,
                order=setting.rotation_order)

            # Augment batched point clouds by rotation and jittering
            feed_dict = {
                ops['pointclouds_pl']: rotated_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
                ops['xforms']: xforms_np,
                ops['rotations']: rotations_np,
                ops['jitter_range']: np.array([jitter_val])
            }

            loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)

            pred_val = np.sum(pred_val, axis=1)
            # pred_val = np.argmax(pred_val, 1)

            batch_pred_sum += pred_val
            batch_pred_val = np.argmax(pred_val, 1)
            for el_idx in range(cur_batch_size):
                batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
            batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
        # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
        # pred_val = np.argmax(batch_pred_classes, 1)
        pred_val = np.argmax(batch_pred_sum, 1)
        # Aggregating END

        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
        total_correct += correct
        total_seen += cur_batch_size
        loss_sum += batch_loss_sum

        for i in range(start_idx, end_idx):
            l = current_label[i]
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_val[i - start_idx] == l)
            fout.write('%s, %s\n' %
                       (SHAPE_NAMES[pred_val[i - start_idx]], SHAPE_NAMES[l]))

            current_pred.append(pred_val[i - start_idx])

    log_string('total seen: %d' % (total_seen))
    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))

    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))

    #Plot confusion matrix
    current_pred = np.array(current_pred)
    groundtruth = current_label.flatten()
    predictions = current_pred.flatten()

    mat = confusion_matrix(groundtruth, predictions)

    plt.style.use('seaborn-paper')
    plt.rcParams["figure.figsize"] = (10, 10)
    ax = plt.subplot(111)
    cmap = plt.cm.Reds
    mat = mat.astype('float') / mat.sum(axis=1)[:, np.newaxis]
    mat = np.nan_to_num(mat, copy=True)

    plt.imshow(mat, interpolation='nearest', cmap=cmap)
    # cbar = plt.colorbar(fraction=0.03, pad=0.05, aspect=30)
    # cbar.ax.tick_params(labelsize=10)
    tick_marks = np.arange(len(SHAPE_NAMES))
    plt.xticks(tick_marks, SHAPE_NAMES, rotation=90)
    plt.yticks(tick_marks, SHAPE_NAMES)

    plt.ylabel('Ground truth')
    plt.xlabel('Prediction')

    for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
                 ax.get_xticklabels() + ax.get_yticklabels()):
        item.set_fontsize(36)

    plt.tight_layout()
    plt.savefig(os.path.join(DUMP_DIR, 'matrix.pdf'))
    plt.show()