def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')

    # data_utils.shuffle_points(TEST_DATA)

    # current_data, current_label = data_utils.get_current_data(TEST_DATA, TEST_LABELS, NUM_POINT)
    # current_data, current_label = data_utils.get_current_data_h5(TEST_DATA, TEST_LABELS, NUM_POINT)
    if (".h5" in TEST_FILE):
        current_data, current_label = data_utils.get_current_data_h5(
            TEST_DATA, TEST_LABELS, NUM_POINT)
    else:
        current_data, current_label = data_utils.get_current_data(
            TEST_DATA, TEST_LABELS, NUM_POINT)

    current_label = np.squeeze(current_label)

    num_batches = current_data.shape[0] // BATCH_SIZE

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        cur_batch_size = end_idx - start_idx

        # Aggregating BEG
        batch_loss_sum = 0  # sum of losses for the batch
        batch_pred_sum = np.zeros(
            (cur_batch_size, NUM_CLASSES))  # score for classes
        batch_pred_classes = np.zeros(
            (cur_batch_size, NUM_CLASSES))  # 0/1 for classes
        for vote_idx in range(num_votes):
            rotated_data = provider.rotate_point_cloud_by_angle(
                current_data[start_idx:end_idx, :, :],
                vote_idx / float(num_votes) * np.pi * 2)

            xforms_np, rotations_np = pf.get_xforms(
                BATCH_SIZE,
                rotation_range=rotation_range_val,
                scaling_range=scaling_range_val,
                order=setting.rotation_order)

            # Augment batched point clouds by rotation and jittering
            feed_dict = {
                ops['pointclouds_pl']: rotated_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
                ops['xforms']: xforms_np,
                ops['rotations']: rotations_np,
                ops['jitter_range']: np.array([jitter_val])
            }

            loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)

            pred_val = np.sum(pred_val, axis=1)
            # pred_val = np.argmax(pred_val, 1)

            batch_pred_sum += pred_val
            batch_pred_val = np.argmax(pred_val, 1)
            for el_idx in range(cur_batch_size):
                batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
            batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
        # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
        # pred_val = np.argmax(batch_pred_classes, 1)
        pred_val = np.argmax(batch_pred_sum, 1)
        # Aggregating END

        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
        total_correct += correct
        total_seen += cur_batch_size
        loss_sum += batch_loss_sum

        for i in range(start_idx, end_idx):
            l = current_label[i]
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_val[i - start_idx] == l)
            fout.write('%s, %s\n' %
                       (SHAPE_NAMES[pred_val[i - start_idx]], SHAPE_NAMES[l]))

            if pred_val[i -
                        start_idx] != l and FLAGS.visu:  # ERROR CASE, DUMP!
                img_filename = '%d_label_%s_pred_%s.jpg' % (
                    error_cnt, SHAPE_NAMES[l],
                    SHAPE_NAMES[pred_val[i - start_idx]])
                img_filename = os.path.join(DUMP_DIR, img_filename)
                output_img = pc_util.point_cloud_three_views(
                    np.squeeze(current_data[i, :, :]))
                scipy.misc.imsave(img_filename, output_img)
                #save ply
                ply_filename = '%d_label_%s_pred_%s.ply' % (
                    error_cnt, SHAPE_NAMES[l],
                    SHAPE_NAMES[pred_val[i - start_idx]])
                ply_filename = os.path.join(DUMP_DIR, ply_filename)
                data_utils.save_ply(np.squeeze(current_data[i, :, :]),
                                    ply_filename)
                error_cnt += 1

    log_string('total seen: %d' % (total_seen))
    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))

    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
Ejemplo n.º 2
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_C)]
    total_correct_class = [0 for _ in range(NUM_C)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')

    # data_utils.shuffle_points(TEST_DATA)

    # current_data, current_label = data_utils.get_current_data(TEST_DATA, TEST_LABELS, NUM_POINT)
    # current_data, current_label = data_utils.get_current_data_h5(TEST_DATA, TEST_LABELS, NUM_POINT)
    if (".h5" in TEST_FILE):
        current_data, current_label = data_utils.get_current_data_h5(
            TEST_DATA, TEST_LABELS, NUM_POINT)
    else:
        current_data, current_label = data_utils.get_current_data(
            TEST_DATA, TEST_LABELS, NUM_POINT)

    current_label = np.squeeze(current_label)

    ####################################################
    print(current_data.shape)
    print(current_label.shape)

    filtered_data = []
    filtered_label = []
    for i in range(current_label.shape[0]):
        if (current_label[i] in OBJECTDATASET_TO_MODELNET.keys()):
            filtered_label.append(current_label[i])
            filtered_data.append(current_data[i, :])

    filtered_data = np.array(filtered_data)
    filtered_label = np.array(filtered_label)
    print(filtered_data.shape)
    print(filtered_label.shape)

    current_data = filtered_data
    current_label = filtered_label
    ###################################################

    num_batches = current_data.shape[0] // BATCH_SIZE

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        cur_batch_size = end_idx - start_idx

        # Aggregating BEG
        batch_loss_sum = 0  # sum of losses for the batch
        batch_pred_sum = np.zeros((cur_batch_size, 40))  # score for classes
        batch_pred_classes = np.zeros((cur_batch_size, 40))  # 0/1 for classes
        for vote_idx in range(num_votes):
            rotated_data = provider.rotate_point_cloud_by_angle(
                current_data[start_idx:end_idx, :, :],
                vote_idx / float(num_votes) * np.pi * 2)
            feed_dict = {
                ops['pointclouds_pl']: rotated_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training
            }
            loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)

            batch_pred_sum += pred_val
            batch_pred_val = np.argmax(pred_val, 1)
            for el_idx in range(cur_batch_size):
                batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
            batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
        # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
        # pred_val = np.argmax(batch_pred_classes, 1)
        pred_val = np.argmax(batch_pred_sum, 1)
        # Aggregating END

        for i in range(start_idx, end_idx):
            total_seen += 1
            if (pred_val[i - start_idx]
                    not in MODELNET_TO_OBJECTDATASET.keys()):
                continue
            pred = MODELNET_TO_OBJECTDATASET[pred_val[i - start_idx]]
            # if (pred_val[i-start_idx] == current_label[i]):
            if (pred == current_label[i]):
                total_correct += 1

        for i in range(start_idx, end_idx):

            l = current_label[i]
            total_seen_class[l] += 1

            if pred_val[i - start_idx] not in MODELNET_TO_OBJECTDATASET:
                pred_label = "NA"
            else:
                pred = MODELNET_TO_OBJECTDATASET[pred_val[i - start_idx]]
                total_correct_class[l] += (pred == l)

                pred_label = SHAPE_NAMES[pred]

            # groundtruth_label = SHAPE_NAMES[MODELNET_TO_OBJECTDATASET[l]]
            groundtruth_label = SHAPE_NAMES[l]

            fout.write('%s, %s\n' % (pred_label, groundtruth_label))

            if pred_val[i -
                        start_idx] != l and FLAGS.visu:  # ERROR CASE, DUMP!
                img_filename = '%d_label_%s_pred_%s.jpg' % (
                    error_cnt, groundtruth_label, pred_label)
                img_filename = os.path.join(DUMP_DIR, img_filename)
                output_img = pc_util.point_cloud_three_views(
                    np.squeeze(current_data[i, :, :]))
                scipy.misc.imsave(img_filename, output_img)

                #save ply
                ply_filename = '%d_label_%s_pred_%s.ply' % (
                    error_cnt, groundtruth_label, pred_label)
                data_utils.save_ply(np.squeeze(current_data[i, :, :]),
                                    ply_filename)
                error_cnt += 1

    log_string('total seen: %d' % (total_seen))
    # log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))

    seen_class_accuracies = []
    seen_correct_class = []
    for i in range(len(total_seen_class)):
        if total_seen_class[i] != 0:
            seen_class_accuracies.append(total_seen_class[i])
            seen_correct_class.append(total_correct_class[i])
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(seen_correct_class) /
        np.array(seen_class_accuracies, dtype=np.float))))

    for i, name in enumerate(SHAPE_NAMES):
        if (total_seen_class[i] == 0):
            accuracy = -1
        else:
            accuracy = total_correct_class[i] / float(total_seen_class[i])
        log_string('%10s:\t%0.3f' % (name, accuracy))
Ejemplo n.º 3
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
    for fn in range(len(TEST_FILES)):
        log_string('----'+str(fn)+'----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:,0:NUM_POINT,:]
        current_label = np.squeeze(current_label)
        print(current_data.shape)
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        print(file_size)
        
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            cur_batch_size = end_idx - start_idx
            
            # Aggregating BEG
            batch_loss_sum = 0 # sum of losses for the batch
            batch_pred_sum = np.zeros((cur_batch_size, NUM_CLASSES)) # score for classes
            batch_pred_classes = np.zeros((cur_batch_size, NUM_CLASSES)) # 0/1 for classes
            for vote_idx in range(num_votes):
                rotated_data = provider.rotate_point_cloud_by_angle(current_data[start_idx:end_idx, :, :],
                                                  vote_idx/float(num_votes) * np.pi * 2)
                feed_dict = {ops['pointclouds_pl']: rotated_data,
                             ops['labels_pl']: current_label[start_idx:end_idx],
                             ops['is_training_pl']: is_training}
                loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)
                batch_pred_sum += pred_val
                batch_pred_val = np.argmax(pred_val, 1)
                for el_idx in range(cur_batch_size):
                    batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
                batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
            # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
            # pred_val = np.argmax(batch_pred_classes, 1)
            pred_val = np.argmax(batch_pred_sum, 1)
            # Aggregating END
            
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
            total_correct += correct
            total_seen += cur_batch_size
            loss_sum += batch_loss_sum

            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += (pred_val[i-start_idx] == l)
                fout.write('%d, %d\n' % (pred_val[i-start_idx], l))
                
                if pred_val[i-start_idx] != l and FLAGS.visu: # ERROR CASE, DUMP!
                    img_filename = '%d_label_%s_pred_%s.jpg' % (error_cnt, SHAPE_NAMES[l],
                                                           SHAPE_NAMES[pred_val[i-start_idx]])
                    img_filename = os.path.join(DUMP_DIR, img_filename)
                    output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, :, :]))
                    scipy.misc.imsave(img_filename, output_img)
                    error_cnt += 1
                if pred_val[i-start_idx] == l and FLAGS.visu: # ERROR CASE, DUMP!
                    img_filename = '%d_label_%s_pred_%s.jpg' % (total_correct, SHAPE_NAMES[l],
                                                           SHAPE_NAMES[pred_val[i-start_idx]])
                    img_filename = os.path.join(SUCC_DIR, img_filename)
                    output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, :, :]))
                    scipy.misc.imsave(img_filename, output_img)

    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
    
    class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
Ejemplo n.º 4
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
    for fn in range(len(TEST_FILES)):
        log_string('----' + str(fn) + '----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:, 0:NUM_POINT, :]
        # print(current_data)
        current_label = np.squeeze(current_label)
        print(current_data.shape)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        print(file_size)

        for batch_idx in range(num_batches):

            point_cloud_transformed_val = None
            TNet1 = None

            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE
            cur_batch_size = end_idx - start_idx

            # Aggregating BEG
            batch_loss_sum = 0  # sum of losses for the batch
            batch_pred_sum = np.zeros(
                (cur_batch_size, NUM_CLASSES))  # score for classes
            batch_pred_classes = np.zeros(
                (cur_batch_size, NUM_CLASSES))  # 0/1 for classes
            for vote_idx in range(num_votes):
                rotated_data = provider.rotate_point_cloud_by_angle(
                    current_data[start_idx:end_idx, :, :],
                    vote_idx / float(num_votes) * np.pi * 2)
                feed_dict = {
                    ops['pointclouds_pl']: rotated_data,
                    ops['labels_pl']: current_label[start_idx:end_idx],
                    ops['is_training_pl']: is_training
                }
                loss_val, pred_val, point_cloud_transformed_val, TNet1 = sess.run(
                    [
                        ops['loss'], ops['pred'],
                        ops['point_cloud_transformed'], ops['TNet1']
                    ],
                    feed_dict=feed_dict)

                for i in range(start_idx, end_idx):
                    l = current_label[i]
                    img_filename = os.path.join(
                        DUMP_DIR,
                        "{label}.{fn}.{batch_idx}.{i}.{vote_idx}.rotated_data.jpg"
                        .format(fn=fn,
                                batch_idx=batch_idx,
                                i=i,
                                label=SHAPE_NAMES[l],
                                vote_idx=vote_idx))
                    # input: test point cloud
                    output_img = pc_util.point_cloud_three_views(
                        np.squeeze(rotated_data[i - start_idx, :, :]))
                    # using utilities include in this repo
                    # to draw point cloud into jpg
                    scipy.misc.imsave(img_filename, output_img)

                    img_filename = os.path.join(
                        DUMP_DIR,
                        "{label}.{fn}.{batch_idx}.{i}.{vote_idx}.point_cloud_transformed.jpg"
                        .format(fn=fn,
                                batch_idx=batch_idx,
                                i=i,
                                label=SHAPE_NAMES[l],
                                vote_idx=vote_idx))
                    # output: transformed point cloud
                    output_img = pc_util.point_cloud_three_views(
                        np.squeeze(
                            point_cloud_transformed_val[i - start_idx, :, :]))
                    # using utilities include in this repo
                    # to draw point cloud into jpg
                    scipy.misc.imsave(img_filename, output_img)

                    # j = i - start_idx
                    # f = open(
                    #     "dump/{label}.{fn}.{batch_idx}.{i}.{vote_idx}.txt".
                    #     format(
                    #         fn=fn,
                    #         batch_idx=batch_idx,
                    #         i=i,
                    #         label=SHAPE_NAMES[l],
                    #         vote_idx=vote_idx), "w")
                    # f.write(str(TNet1[j]))  # T-Net1: 3X3 martix
                    # f.write("\r\n")
                    # # ||1-AA'||
                    # det = np.linalg.det(
                    #     np.eye(3) - np.dot(TNet1[j], TNet1[j].T))
                    # f.write(str(det))
                    # f.write("\r\n")
                    # z, y, x = mat2euler(TNet1[j])
                    # print(z, y, x)
                    # theta, vec = euler2angle_axis(z, y, x)
                    # print(theta, vec)
                    # f.write(str(theta*180/np.pi) + "\r\n" + str(vec))
                    # f.close()

                # print(point_cloud_transformed_val.shape)  # (4, 1024, 3)
                # print(rotated_data.shape)  # (4, 1024, 3)
                # print(current_data[0, :, :].shape)  # (1024, 3)
                # print(np.squeeze(current_data[0, :, :]).shape)  # (1024, 3)

                batch_pred_sum += pred_val
                batch_pred_val = np.argmax(pred_val, 1)
                for el_idx in range(cur_batch_size):
                    batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
                batch_loss_sum += (loss_val * cur_batch_size /
                                   float(num_votes))
            # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
            # pred_val = np.argmax(batch_pred_classes, 1)
            pred_val = np.argmax(batch_pred_sum, 1)
            # Aggregating END

            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
            total_correct += correct
            total_seen += cur_batch_size
            loss_sum += batch_loss_sum

            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += (pred_val[i - start_idx] == l)
                fout.write('%d, %d\n' % (pred_val[i - start_idx], l))

                if pred_val[
                        i -
                        start_idx] != l and FLAGS.visu:  # ERROR CASE, DUMP!
                    img_filename = '%d_label_%s_pred_%s.jpg' % (
                        error_cnt, SHAPE_NAMES[l],
                        SHAPE_NAMES[pred_val[i - start_idx]])
                    img_filename = os.path.join(DUMP_DIR, img_filename)
                    output_img = pc_util.point_cloud_three_views(
                        np.squeeze(current_data[i, :, :]))
                    scipy.misc.imsave(img_filename, output_img)
                    error_cnt += 1

    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))

    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
Ejemplo n.º 5
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
    for fn in range(len(TEST_FILES)):
        log_string('----' + str(fn) + '----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_label = np.squeeze(current_label)
        print(current_data.shape)  #输出是(420,1024,3)
        # print(current_data)#输出是一堆数字
        print(current_label)  #输出是正确的标签

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        print(file_size)  #是420
        print(BATCH_SIZE)  #是4

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE
            cur_batch_size = end_idx - start_idx
            print(start_idx)  #输出开始位置
            print(end_idx)  #输出结束位置
            print(cur_batch_size)  #输出4

            # Aggregating BEG
            batch_loss_sum = 0  # sum of losses for the batch
            batch_pred_sum = np.zeros(
                (cur_batch_size,
                 NUM_CLASSES))  # score for classes   ########预测后的分数
            batch_pred_classes = np.zeros(
                (cur_batch_size,
                 NUM_CLASSES))  # 0/1 for classes  ########预测后分类的正确与否(0或1)
            # print(batch_loss_sum)
            print(batch_pred_sum)  #输出40个0
            print(batch_pred_classes)  #输出40个0

            for vote_idx in range(num_votes):
                rotated_data = provider.rotate_point_cloud_by_angle(
                    current_data[start_idx:end_idx, :, :],
                    vote_idx / float(num_votes) * np.pi * 2)
                feed_dict = {
                    ops['pointclouds_pl']: rotated_data,
                    ops['labels_pl']: current_label[start_idx:end_idx],
                    ops['is_training_pl']: is_training
                }
                loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                              feed_dict=feed_dict)
                batch_pred_sum += pred_val  #######分数
                batch_pred_val = np.argmax(pred_val, 1)  ####### 预测值
                # print(loss_val)
                print(pred_val)  #将数据输入网络,得到每个类别的分数
                print(batch_pred_sum)
                # print(batch_pred_val) #根据每个类别的分数来得到预测的类别和下面的pred_val是一样的
                for el_idx in range(cur_batch_size):
                    batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
                    print(batch_pred_classes)  #根据得到的类别,
                batch_loss_sum += (loss_val * cur_batch_size /
                                   float(num_votes))

            # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
            # pred_val = np.argmax(batch_pred_classes, 1)
            pred_val = np.argmax(batch_pred_sum, 1)

            print(pred_val)  ###打印出2468个数据的类别是哪一类
            # Aggregating END

            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            print(correct)
            # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
            total_correct += correct
            total_seen += cur_batch_size
            loss_sum += batch_loss_sum
            print(total_correct)  #总共正确的个数
            print(total_seen)  #总共的个数

            for i in range(start_idx, end_idx):

                l = current_label[i]
                print(l)  #输出是正确的标签

                total_seen_class[l] += 1
                # print(total_seen_class[l]) #输出没明白

                total_correct_class[l] += (pred_val[i - start_idx] == l)
                # print(total_correct_class[l] )#输出没明白

                fout.write('%d, %d\n' %
                           (pred_val[i - start_idx], l))  #是将正确的标签和预测的标签都写入到里面
                print(pred_val[i - start_idx])  #输出是预测的标签

                # fout.write('%d\n'%(pred_val[i - start_idx])) #仅仅将预测的标签写入到日志文件中

                if pred_val[
                        i -
                        start_idx] != l and FLAGS.visu:  # ERROR CASE, DUMP!
                    img_filename = '%d_label_%s_pred_%s.jpg' % (
                        error_cnt, SHAPE_NAMES[l],
                        SHAPE_NAMES[pred_val[i - start_idx]])
                    img_filename = os.path.join(DUMP_DIR, img_filename)
                    output_img = pc_util.point_cloud_three_views(
                        np.squeeze(current_data[i, :, :]))
                    scipy.misc.imsave(img_filename, output_img)
                    error_cnt += 1
                    # print(output_img)

    log_string('eval mean loss: %f' %
               (loss_sum / float(total_seen)))  #损失的总和除以总共对象的个数
    log_string('eval accuracy: %f' %
               (total_correct / float(total_seen)))  #总共预测正确的个数除以总共对象的个数
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))  #没明白

    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
Ejemplo n.º 6
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1, run_metadata=None):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    total_time = 0.
    total_batches = 0.
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')

    for fn in range(len(TEST_FILES)):
        log_string('----' + str(fn) + '----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_label = np.squeeze(current_label)
        print(current_data.shape)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        print(file_size)

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE
            cur_batch_size = end_idx - start_idx

            # Aggregating BEG
            batch_loss_sum = 0  # sum of losses for the batch
            batch_pred_sum = np.zeros(
                (cur_batch_size, NUM_CLASSES))  # score for classes
            batch_pred_classes = np.zeros(
                (cur_batch_size, NUM_CLASSES))  # 0/1 for classes
            for vote_idx in range(num_votes):
                rotated_data = provider.rotate_point_cloud_by_angle(
                    current_data[start_idx:end_idx, :, :],
                    vote_idx / float(num_votes) * np.pi * 2)
                feed_dict = {
                    ops['pointclouds_pl']: rotated_data,
                    ops['labels_pl']: current_label[start_idx:end_idx],
                    ops['is_training_pl']: is_training
                }
                # Time measurement
                start = timer()
                loss_val, pred_val = sess.run(
                    [ops['loss'], ops['pred']], feed_dict=feed_dict
                )  #,  options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE), run_metadata=run_metadata)
                end = timer()
                if batch_idx != 0:
                    total_time += (end - start)
                    total_batches += 1
                batch_pred_sum += pred_val
                batch_pred_val = np.argmax(pred_val, 1)
                for el_idx in range(cur_batch_size):
                    batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
                batch_loss_sum += (loss_val * cur_batch_size /
                                   float(num_votes))
            # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
            # pred_val = np.argmax(batch_pred_classes, 1)
            pred_val = np.argmax(batch_pred_sum, 1)
            # Aggregating END

            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
            total_correct += correct
            total_seen += cur_batch_size
            loss_sum += batch_loss_sum

            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += (pred_val[i - start_idx] == l)
                fout.write('%d, %d\n' % (pred_val[i - start_idx], l))

                if pred_val[
                        i -
                        start_idx] != l and FLAGS.visu:  # ERROR CASE, DUMP!
                    img_filename = '%d_label_%s_pred_%s.jpg' % (
                        error_cnt, SHAPE_NAMES[l],
                        SHAPE_NAMES[pred_val[i - start_idx]])
                    img_filename = os.path.join(DUMP_DIR, img_filename)
                    output_img = pc_util.point_cloud_three_views(
                        np.squeeze(current_data[i, :, :]))
                    scipy.misc.imsave(img_filename, output_img)
                    error_cnt += 1

    if VERBOSE:
        flops = tf.profiler.profile(
            sess.graph,
            options=tf.profiler.ProfileOptionBuilder.float_operation(),
            run_meta=run_metadata)
        log_string('flops: %f' % (flops.total_float_ops))

    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))
    log_string('mean evaluation time of one batch: %f' %
               (total_time / total_batches))
    parameter_num = np.sum(
        [np.prod(v.shape.as_list()) for v in tf.trainable_variables()])
    log_string('number of trainable parameters: %f' % (parameter_num))
    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
    else:
        x_original, target, x_adv, faces = res

    for eps_idx in range(len(args.eps)):
        class_idx = np.random.choice(len(class_names),
                                     size=len(class_names),
                                     replace=False)
        for i in class_idx:
            idx = np.random.choice(len(x_original[eps_idx][i]),
                                   size=min(3, len(x_original[eps_idx][i])),
                                   replace=False)
            for j in idx:
                img_file = "%d_%s_original.jpg" % (
                    j, class_names[target[eps_idx][i][j]])
                img_file = os.path.join(args.output, img_file)
                img = pc_util.point_cloud_three_views(
                    x_original[eps_idx][i][j])
                scipy.misc.imsave(img_file, img)

                eps_str = str(args.eps[eps_idx]).replace(".", "_")
                img_file = "%d_%s_adv_target_%s_eps_%s.jpg" % (j, class_names[
                    target[eps_idx][i][j]], class_names[i], eps_str)
                img_file = os.path.join(args.output, img_file)
                img = pc_util.point_cloud_three_views(x_adv[eps_idx][i][j])
                scipy.misc.imsave(img_file, img)
else:
    res = adversarial_utils.untargeted_attack(
        args.checkpoint,
        args.output,
        x_pl,
        t_pl,
        model_loss_fn,
Ejemplo n.º 8
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    want_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')

    for fn in range(len(TEST_FILES)):
        log_string('----'+str(fn)+'----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:,0:NUM_POINT,:]
        current_label = np.squeeze(current_label)

        log_string(current_data.shape.__str__())
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        log_string(file_size.__str__())

        
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            cur_batch_size = end_idx - start_idx
            
            # Aggregating BEG
            batch_loss_sum = 0 # sum of losses for the batch
            batch_pred_sum = np.zeros((cur_batch_size, NUM_CLASSES)) # score for classes
            batch_pred_classes = np.zeros((cur_batch_size, NUM_CLASSES)) # 0/1 for classes
            for vote_idx in range(num_votes):
                nnnnn = 1

                rotated_data = provider.rotate_point_cloud_by_angle(current_data[start_idx:end_idx, :, :],
                                                  vote_idx/float(num_votes) * np.pi * 2)

                feed_dict = {ops['pointclouds_pl']: rotated_data,
                             ops['labels_pl']: current_label[start_idx:end_idx],
                             ops['is_training_pl']: is_training}


                # if (fn==0 and batch_idx == nnnnn):
                loss_val, pred_val, point_cloud_transformed,PC_after_transformed1,PC_after_transformed2,after_maxpool,rotateTransform \
                    = sess.run([ops['loss'], ops['pred'], ops['point_cloud_transformed'],ops['PC_after_transformed1'],ops['PC_after_transformed2'],ops['after_maxpool'],ops['rotateTransform']],
                                                      feed_dict=feed_dict)
                # else:
                #     loss_val, pred_val = sess.run([ops['loss'], ops['pred']], feed_dict=feed_dict)
                if (fn==0 and batch_idx == nnnnn):
                    log_string ("---point_cloud_transformed---")

                    log_string (point_cloud_transformed.shape.__str__())
                    log_string(point_cloud_transformed[0, :, :][0].__str__())
                    log_string ("---PC_after_transformed1---")
                    log_string (PC_after_transformed1.shape.__str__())
                    log_string ("---PC_after_transformed2---")
                    log_string (PC_after_transformed2.shape.__str__())
                    log_string ("---after_maxpool---")
                    log_string (after_maxpool.shape.__str__())
                    log_string("---rotateTransform---")
                    log_string(rotateTransform.shape.__str__())
                    log_string(rotateTransform[0].__str__())
                    log_string ("---pred_val---")
                    log_string (pred_val.shape.__str__())


                batch_pred_sum += pred_val
                batch_pred_val = np.argmax(pred_val, 1)
                for el_idx in range(cur_batch_size):
                    batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
                batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
            # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
            # pred_val = np.argmax(batch_pred_classes, 1)
            pred_val = np.argmax(batch_pred_sum, 1)
            # Aggregating END
            
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
            total_correct += correct
            total_seen += cur_batch_size
            loss_sum += batch_loss_sum

            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += (pred_val[i-start_idx] == l)
                fout.write('%d, %d\n' % (pred_val[i-start_idx], l))\

                THS_DIR = os.path.join(FIN_DIR, SHAPE_NAMES[l])
                if not os.path.exists(THS_DIR): os.mkdir(THS_DIR)

                ##log rotate
                THS_FOUT = open(os.path.join(ROTMAT_DIR, '%s_evaluate.txt'%(SHAPE_NAMES[l])), 'a')
                THS_FOUT.write('%d_TnetR_label_%s_pred_%s\n' % (want_cnt, SHAPE_NAMES[l],SHAPE_NAMES[pred_val[i-start_idx]]))
                THS_FOUT.write(rotateTransform[i-start_idx].__str__()+'\n\n')
                THS_FOUT.flush()

                Nx3Tnet[l]['%d'%(want_cnt)] = rotateTransform

                if FLAGS.visu:
                    # if(fn==0 and batch_idx== nnnnn):

                    RAimg_filename = '%d_beforeR_label_%s_pred_%s.jpg' % (want_cnt, SHAPE_NAMES[l],SHAPE_NAMES[pred_val[i-start_idx]])
                    RAimg_filename = os.path.join(THS_DIR, RAimg_filename)
                    RAoutput_img = pc_util.point_cloud_three_views(np.squeeze(rotated_data[i-start_idx, :, :]))
                    # RAoutput_img = pc_util.draw_point_cloud(np.squeeze(rotated_data[0, :, :]))
                    # print(rotated_data[0, :, :][0])
                    scipy.misc.imsave(RAimg_filename, RAoutput_img)

                    # print("---")
                    # print(point_cloud_transformed[0, :, :][0])
                    Nimg_filename = '%d_TnetR_label_%s_pred_%s.jpg' % (want_cnt, SHAPE_NAMES[l],SHAPE_NAMES[pred_val[i-start_idx]])
                    Nimg_filename = os.path.join(THS_DIR, Nimg_filename)
                    Noutput_img = pc_util.point_cloud_three_views(np.squeeze(point_cloud_transformed[i-start_idx, :, :]))
                    # Noutput_img = pc_util.draw_point_cloud(np.squeeze(point_cloud_transformed[0, :, :]))
                    scipy.misc.imsave(Nimg_filename, Noutput_img)

                    if pred_val[i-start_idx] != l: # ERROR CASE, DUMP!
                            img_filename = '%d_label_%s_pred_%s.jpg' % (error_cnt, SHAPE_NAMES[l],
                                                                   SHAPE_NAMES[pred_val[i-start_idx]])
                            img_filename = os.path.join(DUMP_DIR, img_filename)
                            output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, :, :]))
                            scipy.misc.imsave(img_filename, output_img)
                            error_cnt += 1
                    # else:
                    #     	img_filename = '%d_label_%s_pred_%s.jpg' % (want_cnt, SHAPE_NAMES[l],
                    #                                                SHAPE_NAMES[pred_val[i-start_idx]])
                    #         	img_filename = os.path.join(WANT_DIR, img_filename)
                    #         	output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, :, :]))
                    #        	scipy.misc.imsave(img_filename, output_img)
                    #             	want_cnt +=
                want_cnt += 1

    #save to npz
    print (len(SHAPE_NAMES))
    for i in range(0, len(SHAPE_NAMES)):
        npzFile = os.path.join(ROTMAT_DIR, '%s.npz'%(SHAPE_NAMES[i]))
        np.savez(npzFile, **(Nx3Tnet[i]))


    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
    
    class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
Ejemplo n.º 9
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
    for fn in range(len(TEST_FILES)):
        log_string('----' + str(fn) + '----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        print(file_size)

        # set by wind:
        # my code is based on batch_size = 1
        # set batch_size = 1 for this file
        for batch_idx in range(file_size):
            start_idx = batch_idx
            end_idx = batch_idx + 1
            cur_batch_size = 1

            #-------------------------------------------------------------------
            # get critical points
            #-------------------------------------------------------------------
            no_influence_position = current_data[start_idx, 0, :].copy()

            global_feature_list = []
            orgin_data = current_data[start_idx, :, :].copy()

            for change_point in range(NUM_POINT):
                current_data[start_idx,
                             change_point, :] = no_influence_position.copy()

            for change_point in range(NUM_POINT):
                current_data[start_idx, change_point, :] = orgin_data[
                    change_point, :].copy()
                # Aggregating BEG
                for vote_idx in range(num_votes):
                    rotated_data = provider.rotate_point_cloud_by_angle(
                        current_data[start_idx:end_idx, :, :],
                        vote_idx / float(num_votes) * np.pi * 2)
                    feed_dict = {
                        ops['pointclouds_pl']: rotated_data,
                        ops['labels_pl']: current_label[start_idx:end_idx],
                        ops['is_training_pl']: is_training
                    }

                    global_feature_val = sess.run(ops['global_feature'],
                                                  feed_dict=feed_dict)

                    global_feature_list.append(global_feature_val)

            critical_points = []
            max_feature = np.zeros(global_feature_list[0].size) - 10
            feature_points = np.zeros(global_feature_list[0].size)
            for index in range(len(global_feature_list)):
                #distance = math.sqrt(((global_feature_list[index] - global_feature_list[-1]) ** 2).sum())
                #distance_list.append(distance)
                top = global_feature_list[index]
                feature_points = np.where(top > max_feature, index,
                                          feature_points)
                max_feature = np.where(top > max_feature, top, max_feature)

            for index in feature_points[0]:
                critical_points.append(orgin_data[int(index), :])
            critical_points = list(set([tuple(t) for t in critical_points]))
            print(len(critical_points))

            img_filename = './test/%d_critical_points.jpg' % (start_idx)
            output_img = pc_util.point_cloud_three_views(
                np.squeeze(critical_points))
            scipy.misc.imsave(img_filename, output_img)

            img_filename = './test/%d_orgin_points.jpg' % (start_idx)
            output_img = pc_util.point_cloud_three_views(
                np.squeeze(orgin_data))
            scipy.misc.imsave(img_filename, output_img)

            #-------------------------------------------------------------------
            # get upper-bound points
            #-------------------------------------------------------------------
            upper_bound_points = np.empty_like(orgin_data.shape)
            upper_bound_points = orgin_data.copy()
            current_data[start_idx, :, :] = orgin_data.copy()

            search_step = 0.02
            stand_feature = np.empty_like(global_feature_list[-1].shape)
            max_position = [-1, -1, -1]
            min_position = [1, 1, 1]

            for point_index in range(NUM_POINT):
                max_position = np.maximum(
                    max_position, current_data[start_idx, point_index, :])
                min_position = np.minimum(
                    min_position, current_data[start_idx, point_index, :])

            print(max_position)
            print(min_position)
            for vote_idx in range(num_votes):
                rotated_data = provider.rotate_point_cloud_by_angle(
                    current_data[start_idx:end_idx, :, :],
                    vote_idx / float(num_votes) * np.pi * 2)
                feed_dict = {
                    ops['pointclouds_pl']: rotated_data,
                    ops['labels_pl']: current_label[start_idx:end_idx],
                    ops['is_training_pl']: is_training
                }

                global_feature_val = sess.run(ops['global_feature'],
                                              feed_dict=feed_dict)
                stand_feature = global_feature_val.copy()

            change_point = 0
            current_data[start_idx, :, :] = orgin_data.copy()
            for point_index in range(NUM_POINT):
                if not (point_index in feature_points[0]):
                    change_point = point_index
                    break

            for x in np.linspace(
                    min_position[0], max_position[0],
                (max_position[0] - min_position[0]) // search_step + 1):
                for y in np.linspace(
                        min_position[1], max_position[1],
                    (max_position[1] - min_position[1]) // search_step + 1):
                    for z in np.linspace(
                            min_position[2], max_position[2],
                        (max_position[2] - min_position[2]) // search_step +
                            1):
                        current_data[start_idx,
                                     change_point, :] = (x, y, z
                                                         )  #+ orgin_position

                        # Aggregating BEG
                        for vote_idx in range(num_votes):
                            rotated_data = provider.rotate_point_cloud_by_angle(
                                current_data[start_idx:end_idx, :, :],
                                vote_idx / float(num_votes) * np.pi * 2)
                            feed_dict = {
                                ops['pointclouds_pl']: rotated_data,
                                ops['labels_pl']:
                                current_label[start_idx:end_idx],
                                ops['is_training_pl']: is_training
                            }

                            global_feature_val = sess.run(
                                ops['global_feature'], feed_dict=feed_dict)

                            if (global_feature_val <= stand_feature).all():
                                upper_bound_points = np.append(
                                    upper_bound_points,
                                    np.array([[x, y, z]]),
                                    axis=0)
                print(x)

            img_filename = './test/%d_upper_bound_points.jpg' % (start_idx)
            output_img = pc_util.point_cloud_three_views(
                np.squeeze(upper_bound_points))
            scipy.misc.imsave(img_filename, output_img)

            current_data[start_idx, :, :] = orgin_data.copy()
for fn in range(len(TEST_FILES)):
    print('----' + str(fn) + '----')
    current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
    current_data = current_data[:, 0:NUM_POINT, :]
    current_label = np.squeeze(current_label)

    file_size = current_data.shape[0]

    for file_idx in range(0, file_size, BATCH_SIZE):
        if file_idx + BATCH_SIZE > file_size:
            break
        data = current_data[file_idx:file_idx + BATCH_SIZE, :, :]

        for idx in range(BATCH_SIZE):
            img_filename = f"images/test_{count}_idx_{idx}_front.jpg"
            output_img = pc_util.point_cloud_three_views(np.squeeze(data[idx]))
            skimage.io.imsave(img_filename, output_img)

        if ROTATE_POINT_CLOUD:
            for k in range(6):
                rotated_data = provider.rotate_point_by_label(
                    data, [k] * BATCH_SIZE)
                for idx in range(BATCH_SIZE):
                    img_filename = f"images/test_{count}_idx_{idx}_rot_{k}.jpg"
                    output_img = pc_util.point_cloud_three_views(
                        np.squeeze(rotated_data[idx]))
                    skimage.io.imsave(img_filename, output_img)

        if ROTATE_TENSOR:
            w = tf.constant(data)
            for k in range(6):
Ejemplo n.º 11
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')

    total_correct_seg = 0

    # data_utils.shuffle_points(TEST_DATA)

    # current_data, current_label = data_utils.get_current_data(TEST_DATA, TEST_LABELS, NUM_POINT)
    # current_data, current_label = data_utils.get_current_data_h5(TEST_DATA, TEST_LABELS, NUM_POINT)
    current_data, current_label, current_mask = data_utils.get_current_data_withmask_h5(TEST_DATA, TEST_LABELS, TEST_MASKS, NUM_POINT, shuffle=False)

    current_label = np.squeeze(current_label)
    current_mask = np.squeeze(current_mask)

    num_batches = current_data.shape[0]//BATCH_SIZE

    current_pred = []
    
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx+1) * BATCH_SIZE
        cur_batch_size = end_idx - start_idx
        
        # Aggregating BEG
        batch_loss_sum = 0 # sum of losses for the batch
        batch_pred_sum = np.zeros((cur_batch_size, NUM_CLASSES)) # score for classes
        batch_seg_sum = np.zeros((cur_batch_size, NUM_POINT, 2)) # score for classes
        batch_pred_classes = np.zeros((cur_batch_size, NUM_CLASSES)) # 0/1 for classes
        for vote_idx in range(num_votes):
            rotated_data = provider.rotate_point_cloud_by_angle(current_data[start_idx:end_idx, :, :],
                                              vote_idx/float(num_votes) * np.pi * 2)
            feed_dict = {ops['pointclouds_pl']: rotated_data,
                         ops['masks_pl']: current_mask[start_idx:end_idx],
                         ops['labels_pl']: current_label[start_idx:end_idx],
                         ops['is_training_pl']: is_training}
            loss_val, pred_val, seg_val = sess.run([ops['loss'], ops['pred'],ops['seg_pred']],
                                      feed_dict=feed_dict)

            batch_pred_sum += pred_val
            batch_seg_sum += seg_val
            batch_pred_val = np.argmax(pred_val, 1)
            for el_idx in range(cur_batch_size):
                batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
            batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
        # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
        # pred_val = np.argmax(batch_pred_classes, 1)
        pred_val = np.argmax(batch_pred_sum, 1)
        # Aggregating END
        seg_val = np.argmax(batch_seg_sum, 2)
        seg_correct = np.sum(seg_val == current_mask[start_idx:end_idx])
        total_correct_seg += seg_correct

        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
        total_correct += correct
        total_seen += cur_batch_size
        loss_sum += batch_loss_sum

        for i in range(start_idx, end_idx):
            l = current_label[i]
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_val[i-start_idx] == l)

            current_pred.append(pred_val[i-start_idx])

            fout.write('%s, %s\n' % (SHAPE_NAMES[pred_val[i-start_idx]], SHAPE_NAMES[l]))

            gt_mask = current_mask[i]
            pred_mask = seg_val[i-start_idx]

            pred_mask_idx = np.where(pred_mask==1)[0]
            gt_mask_idx = np.where(gt_mask==1)[0]
            correct_obj_mask = np.where((pred_mask==gt_mask) & (pred_mask==1))[0]

            if (len(correct_obj_mask)==1):
                continue

            if (FLAGS.visu_mask and pred_val[i-start_idx] == l):
                fname = str(start_idx)+'_gt'
                fname = os.path.join(DUMP_DIR, fname)
                save_binfiles(current_data[start_idx,:,:], gt_mask,fname)

                fname = str(start_idx)+'_pred'
                fname = os.path.join(DUMP_DIR, fname)
                save_binfiles(current_data[start_idx,:,:], pred_mask,fname)

                fname = str(start_idx)+'_pred.obj'
                fname = os.path.join(DUMP_DIR, fname)
                output_color_point_cloud(current_data[start_idx,:,:], pred_mask,fname)

                fname = str(start_idx)+'_gt.obj'
                fname = os.path.join(DUMP_DIR, fname)
                output_color_point_cloud(current_data[start_idx,:,:], gt_mask,fname)

                ###1)
                img_filename = '%d_label_%s_pred_%s_gtmask.jpg' % (i, SHAPE_NAMES[l],
                                                       SHAPE_NAMES[pred_val[i-start_idx]])
                img_filename = os.path.join(DUMP_DIR, img_filename)
                output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, gt_mask_idx, :]))
                scipy.misc.imsave(img_filename, output_img)

            #     #save ply
            #     ply_filename = '%d_label_%s_pred_%s_gtmask.ply' % (i, SHAPE_NAMES[l],
            #                                            SHAPE_NAMES[pred_val[i-start_idx]])
            #     ply_filename = os.path.join(DUMP_DIR, ply_filename)
            #     data_utils.save_ply(np.squeeze(current_data[i, gt_mask_idx, :]),ply_filename)                   

                ###2)
                img_filename = '%d_label_%s_pred_%s_predmask.jpg' % (i, SHAPE_NAMES[l],
                                                       SHAPE_NAMES[pred_val[i-start_idx]])
                img_filename = os.path.join(DUMP_DIR, img_filename)
                output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, pred_mask_idx, :]))
                scipy.misc.imsave(img_filename, output_img)

            #     #save ply
            #     ply_filename = '%d_label_%s_pred_%s_predmask.ply' % (i, SHAPE_NAMES[l],
            #                                            SHAPE_NAMES[pred_val[i-start_idx]])
            #     ply_filename = os.path.join(DUMP_DIR, ply_filename)
            #     data_utils.save_ply(np.squeeze(current_data[i, pred_mask_idx, :]),ply_filename) 

                ###3)
                img_filename = '%d_label_%s_pred_%s_correctpredmask.jpg' % (i, SHAPE_NAMES[l],
                                                       SHAPE_NAMES[pred_val[i-start_idx]])
                img_filename = os.path.join(DUMP_DIR, img_filename)
                output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, correct_obj_mask, :]))
                scipy.misc.imsave(img_filename, output_img)

            #     #save ply
            #     ply_filename = '%d_label_%s_pred_%s_correctpredmask.ply' % (i, SHAPE_NAMES[l],
            #                                            SHAPE_NAMES[pred_val[i-start_idx]])
            #     ply_filename = os.path.join(DUMP_DIR, ply_filename)
            #     data_utils.save_ply(np.squeeze(current_data[i, correct_obj_mask, :]),ply_filename)                 
            
            # if pred_val[i-start_idx] != l and FLAGS.visu: # ERROR CASE, DUMP!
            #     img_filename = '%d_label_%s_pred_%s.jpg' % (error_cnt, SHAPE_NAMES[l],
            #                                            SHAPE_NAMES[pred_val[i-start_idx]])
            #     img_filename = os.path.join(DUMP_DIR, img_filename)
            #     output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, :, :]))
            #     scipy.misc.imsave(img_filename, output_img)
            #     #save ply
            #     ply_filename = '%d_label_%s_pred_%s.ply' % (error_cnt, SHAPE_NAMES[l],
            #                                            SHAPE_NAMES[pred_val[i-start_idx]])
            #     ply_filename = os.path.join(DUMP_DIR, ply_filename)
            #     data_utils.save_ply(np.squeeze(current_data[i, :, :]),ply_filename)                
            #     error_cnt += 1
    
    log_string('total seen: %d' % (total_seen)) 
    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
    log_string('seg accuracy: %f' % (total_correct_seg / (float(total_seen)*NUM_POINT)))
    
    class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
Ejemplo n.º 12
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
    for fn in range(len(TEST_FILES)):
        log_string('----'+str(fn)+'----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:,0:NUM_POINT,:]
        current_label = np.squeeze(current_label)
        print(current_data.shape)
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        print(file_size)
        
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            cur_batch_size = end_idx - start_idx
            
            # Aggregating BEG
            batch_loss_sum = 0 # sum of losses for the batch
            batch_pred_sum = np.zeros((cur_batch_size, NUM_CLASSES)) # score for classes
            batch_pred_classes = np.zeros((cur_batch_size, NUM_CLASSES)) # 0/1 for classes
            for vote_idx in range(num_votes):
                rotated_data = provider.rotate_point_cloud_by_angle(current_data[start_idx:end_idx, :, :],
                                                  vote_idx/float(num_votes) * np.pi * 2)
                feed_dict = {ops['pointclouds_pl']: rotated_data,
                             ops['labels_pl']: current_label[start_idx:end_idx],
                             ops['is_training_pl']: is_training}
                loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)
                batch_pred_sum += pred_val
                batch_pred_val = np.argmax(pred_val, 1)
                for el_idx in range(cur_batch_size):
                    batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
                batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
            # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
            # pred_val = np.argmax(batch_pred_classes, 1)
            pred_val = np.argmax(batch_pred_sum, 1)
            # Aggregating END
            
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
            total_correct += correct
            total_seen += cur_batch_size
            loss_sum += batch_loss_sum

            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += (pred_val[i-start_idx] == l)
                fout.write('%d, %d\n' % (pred_val[i-start_idx], l))
                
                if pred_val[i-start_idx] != l and FLAGS.visu: # ERROR CASE, DUMP!
                    img_filename = '%d_label_%s_pred_%s.jpg' % (error_cnt, SHAPE_NAMES[l],
                                                           SHAPE_NAMES[pred_val[i-start_idx]])
                    img_filename = os.path.join(DUMP_DIR, img_filename)
                    output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, :, :]))
                    scipy.misc.imsave(img_filename, output_img)
                    error_cnt += 1
                
    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
    
    class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))