Ejemplo n.º 1
0
def eval(config, solver, epoch=0):
    acc = 0
    loss = 0
    labels = []
    predictions = []

    test_file = os.path.join(config.data, 'testrotnet.txt')
    mean = get_mean(config.mean_file)

    batch_size = solver.test_nets[0].blobs['data'].num
    test_iters = int(
        get_dataset_size(config, 'testrotnet') / config.batch_size)

    for i in range(test_iters):
        solver.test_nets[0].forward()
        acc += solver.test_nets[0].blobs['my_accuracy'].data
        loss += solver.test_nets[0].blobs['(automatic)'].data
        probs = solver.test_nets[0].blobs['prob'].data
        predictions += classify(probs)
        labels.append(int(solver.test_nets[0].blobs['label'].data[0]))

    acc /= test_iters
    loss /= test_iters
    if not config.test:
        print("Accuracy: {:.3f}".format(acc))
        print("Loss: {:.3f}".format(loss))
        LOSS_LOGGER.log(loss, epoch, "eval_loss")
        ACC_LOGGER.log(acc, epoch, "eval_accuracy")
    else:
        import Evaluation_tools as et
        eval_file = os.path.join(config.log_dir, '{}.txt'.format(config.name))
        et.write_eval_file(config.data, eval_file, predictions, labels,
                           config.name)
        et.make_matrix(config.data, eval_file, config.log_dir)
Ejemplo n.º 2
0
def eval_alone(config):
    data = model_data.read_data(config.data, config, read_train=False)
    data = data.test
    seq_rnn_model = SequenceRNNModel(config.n_input_fc, config.num_views, config.n_hidden, config.decoder_embedding_size, config.num_classes+1, config.n_hidden,
                                     batch_size=data.size(),
                                     is_training=False,
                                     use_lstm=config.use_lstm,
                                     use_attention=config.use_attention,
                                     use_embedding=config.use_embedding, num_heads=config.num_heads)
    seq_rnn_model.build_model("train")
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    with tf.Session(config=tf_config) as sess:
    #config.gpu_options.per_process_gpu_memory_fraction = 0.3
        saver = tf.train.Saver()
        saver.restore(sess, get_modelpath(config.weights))
        acc, loss, predictions, labels = _test(data, seq_rnn_model, sess)
    log(config.log_file, "TESTING ACCURACY {}".format(acc))
    
    predictions = [x-1 for x in predictions]  
    labels = [x-1 for x in labels]
    
    import Evaluation_tools as et
    eval_file = os.path.join(config.log_dir, '{}.txt'.format(config.name))
    et.write_eval_file(config.data, eval_file, predictions, labels, config.name)
    et.make_matrix(config.data, eval_file, config.log_dir)    
Ejemplo n.º 3
0
def eval(config, solver, epoch=0):
    acc = 0
    loss = 0
    all_labels = []
    predictions = []
    test_count = get_dataset_size(config, 'test')
    keys = solver.test_nets[0].blobs.keys()
    batch_size = (solver.test_nets[0].blobs['label_octreedatabase_1_split_0'].
                  data.shape[0])
    test_iters = test_count / batch_size

    logits = np.zeros((test_count, config.num_classes))
    print(logits.shape)
    for i in range(test_iters):
        solver.test_nets[0].forward()
        loss += solver.test_nets[0].blobs['loss'].data
        probs = solver.test_nets[0].blobs['ip2'].data

        logits[i * batch_size:(i + 1) * batch_size] = probs
        all_labels += list(solver.test_nets[0].blobs['label'].data)

    solver.test_nets[0].forward()
    loss += solver.test_nets[0].blobs['loss'].data
    probs = solver.test_nets[0].blobs['ip2'].data
    logits[batch_size * (test_iters):] = probs[0:test_count % batch_size]
    all_labels += list(solver.test_nets[0].blobs['label'].data)[0:test_count %
                                                                batch_size]

    loss /= test_iters + 1

    predictions = []
    labels = []

    for i in range(len(all_labels) / config.num_rotations):
        predictions.append(
            np.argmax(
                np.sum(logits[i * config.num_rotations:(i + 1) *
                              config.num_rotations],
                       axis=0)))
        labels.append(all_labels[i * config.num_rotations])

    acc = sum([1 for i in range(len(labels)) if predictions[i] == labels[i]
               ]) / float(len(labels))

    if not config.test:
        log(config.log_file, "EPOCH: {} Test loss: {}".format(epoch, loss))
        log(config.log_file, "EPOCH: {} Test accuracy: {}".format(epoch, acc))
        LOSS_LOGGER.log(loss, epoch, "eval_loss")
        ACC_LOGGER.log(acc, epoch, "eval_accuracy")
    else:
        log(config.log_file, "----------------------")
        import Evaluation_tools as et
        labels = [int(l) for l in labels]
        eval_file = os.path.join(config.log_dir, '{}.txt'.format(config.name))
        et.write_eval_file(config.data, eval_file, predictions, labels,
                           config.name)
        et.make_matrix(config.data, eval_file, config.log_dir)
Ejemplo n.º 4
0
def test(config,test_vertices, test_faces, test_nFaces, test_labels):
    log(config['log_file'],"Start testing")
    config['mode'] = 'test'
    _, predictions = acc_fun(net,test_vertices, test_faces, test_nFaces, test_labels, config) 
    acc = 100.*(predictions == test_labels).sum()/len(test_labels)
    
    log(config['log_file'],'Eval accuracy:  {}'.format(acc))
    import Evaluation_tools as et
    eval_file = os.path.join(config['log_dir'], '{}.txt'.format(config['name']))
    print(eval_file)
    et.write_eval_file(config['data'], eval_file, predictions , test_labels , config['name'])
    et.make_matrix(config['data'], eval_file, config['log_dir'])
Ejemplo n.º 5
0
def test(model, config, best_accuracy=0, epoch=None):
    batch_amount = 0
    model.test_loss.data.zero_()
    model.test_accuracy.data.zero_()

    predictions = []
    labels = []

    for i, data in enumerate(testloader):
        input_pc, input_sn, input_label, input_node, input_node_knn_I = data
        model.set_input(input_pc, input_sn, input_label, input_node,
                        input_node_knn_I)
        model.test_model()

        batch_amount += input_label.size()[0]
        model.test_loss += model.loss.detach() * input_label.size()[0]

        # accumulate accuracy
        _, predicted_idx = torch.max(model.score.data, dim=1, keepdim=False)
        predictions += list(predicted_idx)
        labels += list(input_label)

        correct_mask = torch.eq(predicted_idx, model.input_label).float()
        test_accuracy = torch.mean(correct_mask).cpu()

        model.test_accuracy += test_accuracy * input_label.size()[0]

    model.test_loss /= batch_amount
    model.test_accuracy /= batch_amount

    if config.test:
        predictions = [x.item() for x in predictions]
        labels = [x.item() for x in labels]
        import Evaluation_tools as et
        eval_file = os.path.join(config.log_dir, '{}.txt'.format(config.name))
        et.write_eval_file(config.data, eval_file, predictions, labels,
                           config.name)
        et.make_matrix(config.data, eval_file, config.log_dir)
    else:
        if model.test_accuracy.item() > best_accuracy:
            best_accuracy = model.test_accuracy.item()
        loss = model.test_loss.item()
        acc = model.test_accuracy.item()
        log(config.log_file,
            'Tested network. So far best: {}'.format(best_accuracy))
        log(config.log_file,
            "TESTING EPOCH {} acc: {} loss: {}".format(epoch, acc, loss))
        LOSS_LOGGER.log(loss, epoch, "eval_loss")
        ACC_LOGGER.log(acc, epoch, "eval_accuracy")
        return best_accuracy
Ejemplo n.º 6
0
def test(config):
    log_dir = os.path.join(config.log_dir, config.name + '_stage_2')

    val_path = os.path.join(config.data, "*/test")

    val_dataset = MultiviewImgDataset(val_path,
                                      scale_aug=False,
                                      rot_aug=False,
                                      num_views=config.num_views)
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=config.stage2_batch_size,
        shuffle=False,
        num_workers=0)

    pretraining = not config.no_pretraining
    cnet = SVCNN(config.name,
                 nclasses=config.num_classes,
                 cnn_name=config.cnn_name,
                 pretraining=pretraining)

    cnet_2 = MVCNN(config.name,
                   cnet,
                   nclasses=config.num_classes,
                   cnn_name=config.cnn_name,
                   num_views=config.num_views)
    cnet_2.load(
        os.path.join(log_dir, config.snapshot_prefix + str(config.weights)))
    optimizer = optim.Adam(cnet_2.parameters(),
                           lr=config.learning_rate,
                           weight_decay=config.weight_decay,
                           betas=(0.9, 0.999))

    trainer = ModelNetTrainer(cnet_2,
                              None,
                              val_loader,
                              optimizer,
                              nn.CrossEntropyLoss(),
                              config,
                              log_dir,
                              num_views=config.num_views)

    labels, predictions = trainer.update_validation_accuracy(config.weights,
                                                             test=True)
    import Evaluation_tools as et
    eval_file = os.path.join(config.log_dir, '{}.txt'.format(config.name))
    et.write_eval_file(config.data, eval_file, predictions, labels,
                       config.name)
    et.make_matrix(config.data, eval_file, config.log_dir)
Ejemplo n.º 7
0
def eval(config, solver, epoch=0):
    acc = 0
    loss = 0
    labels = []
    predictions = []
    test_count = get_dataset_size(config, 'test')

    keys = solver.test_nets[0].blobs.keys()
    batch_size = (
        solver.test_nets[0].blobs['label_data_1_split_0'].data.shape[0])
    test_iters = test_count / batch_size

    for i in range(test_iters):
        solver.test_nets[0].forward()

        acc += solver.test_nets[0].blobs['accuracy'].data
        loss += solver.test_nets[0].blobs['loss'].data

        probs = solver.test_nets[0].blobs['ip2'].data
        predictions += list(np.argmax(np.array(probs), axis=1))
        labels += list(solver.test_nets[0].blobs['label'].data)

    solver.test_nets[0].forward()
    acc += solver.test_nets[0].blobs['accuracy'].data
    loss += solver.test_nets[0].blobs['loss'].data
    probs = solver.test_nets[0].blobs['ip2'].data
    predictions += list(np.argmax(np.array(probs),
                                  axis=1))[0:test_count % batch_size]
    labels += list(solver.test_nets[0].blobs['label'].data)[0:test_count %
                                                            batch_size]

    acc /= test_iters + 1
    loss /= test_iters + 1

    if not config.test:
        print("Accuracy: {:.3f}".format(acc))
        print("Loss: {:.3f}".format(loss))
        LOSS_LOGGER.log(loss, epoch, "eval_loss")
        ACC_LOGGER.log(acc, epoch, "eval_accuracy")
    else:
        print("----------------------")
        import Evaluation_tools as et
        labels = [int(l) for l in labels]
        eval_file = os.path.join(config.log_dir, '{}.txt'.format(config.name))
        et.write_eval_file(config.data, eval_file, predictions, labels,
                           config.name)
        et.make_matrix(config.data, eval_file, config.log_dir)
Ejemplo n.º 8
0
def evaluate(x_test, y_test, cfg, tfuncs, tvars, config, epoch=0):
    log(config.log_file, "testing")
    n_rotations = cfg['n_rotations']
    chunk_size = n_rotations * cfg['batches_per_chunk']
    num_chunks = int(math.ceil(float(len(x_test)) / chunk_size))

    labels = []
    test_class_error = []
    pred_array = []
    losses = []
    accs = []
    for chunk_index in xrange(num_chunks):
        upper_range = min(len(y_test), (chunk_index + 1) * chunk_size)  #
        x_shared = np.asarray(x_test[chunk_index *
                                     chunk_size:upper_range, :, :, :, :],
                              dtype=np.float32)
        y_shared = np.asarray(y_test[chunk_index * chunk_size:upper_range],
                              dtype=np.float32)

        num_batches = int(math.ceil(float(len(x_shared)) / n_rotations))

        tvars['X_shared_'].set_value(4.0 * x_shared - 1.0, borrow=True)
        tvars['y_shared_'].set_value(y_shared, borrow=True)
        for bi in xrange(num_batches):
            [batch_loss, batch_test_class_error, pred, raw_pred,
             y] = tfuncs['test_function'](bi)
            losses.append(batch_loss)
            accs.append(batch_test_class_error)
            pred_array.append(np.array(pred))
            labels.append(y[0])

    loss, acc = [float(np.mean(losses)), 1.0 - float(np.mean(accs))]
    predictions = list(pred_array)
    log(
        config.log_file,
        'EVALUTAION: epoch: {0:^3d}, loss: {2:.6f}, acc: {3:.5f}'.format(
            epoch, loss, acc))
    if not config.test:
        LOSS_LOGGER.log(loss, epoch, "eval_loss")
        ACC_LOGGER.log(acc, epoch, "eval_accuracy")
    else:
        import Evaluation_tools as et
        eval_file = os.path.join(config.log_dir, '{}.txt'.format(config.name))
        et.write_eval_file(config.data, eval_file, predictions, labels,
                           config.name)
        et.make_matrix(config.data, eval_file, config.log_dir)
Ejemplo n.º 9
0
def test(dataset, config):
    print('test() called')
    weights = config.weights
    V = config.num_views
    batch_size = config.batch_size
    ckptfile = os.path.join(config.log_dir,
                            config.snapshot_prefix + str(weights))
    data_size = dataset.size()
    print('dataset size:', data_size)

    with tf.Graph().as_default():

        global_step = tf.Variable(0, trainable=False)

        view_ = tf.placeholder('float32',
                               shape=(None, V, 227, 227, 3),
                               name='im0')
        y_ = tf.placeholder('int64', shape=(None), name='y')
        keep_prob_ = tf.placeholder('float32')

        fc8 = model.inference_multiview(view_, config.num_classes, keep_prob_)
        loss = model.loss(fc8, y_)
        #train_op = model.train(loss, global_step, data_size)
        prediction = model.classify(fc8)
        placeholders = [view_, y_, keep_prob_, prediction, loss]
        saver = tf.train.Saver(tf.all_variables())

        init_op = tf.global_variables_initializer()
        sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))

        saver.restore(sess, ckptfile)
        print('restore variables done')
        print("Start testing")
        print("Size:", data_size)
        print("It'll take", int(math.ceil(data_size / batch_size)),
              "iterations.")

        acc, _, predictions, labels = _test(dataset, config, sess,
                                            placeholders)
        print('acc:', acc * 100)

    import Evaluation_tools as et
    eval_file = os.path.join(config.log_dir, '{}.txt'.format(config.name))
    et.write_eval_file(config.data, eval_file, predictions, labels,
                       config.name)
    et.make_matrix(config.data, eval_file, config.log_dir)
Ejemplo n.º 10
0
def _evaluate(net, sess, test_data, config, epoch=0):

    labels = []
    predictions = []
    losses = []
    acc = 0
    count = 0
    while True:
        batch_images, batch_labels, reset = test_data.next_batch(
            config.batch_size)
        if reset:
            break
        logits, loss = net.test(batch_images, batch_labels, sess)
        losses.append(loss)
        for i in range(len(batch_labels) / config.num_views):
            endindex = i * config.num_views + config.num_views
            prediction = np.argmax(
                np.sum(logits[i * config.num_views:endindex], axis=0))
            predictions.append(prediction)
            label = batch_labels[i * config.num_views]
            labels.append(label)
            if label == prediction:
                acc += 1
            else:
                acc += 0
            count += 1
    acc = acc / float(count)
    log(config.log_file,
        "EVALUATING epoch {} - acc: {} loss: {}".format(epoch, acc, loss))
    if not config.test:
        loss = np.mean(losses)
        LOSS_LOGGER.log(loss, epoch, "eval_loss")
        ACC_LOGGER.log(acc, epoch, "eval_accuracy")
    else:
        import Evaluation_tools as et
        eval_file = os.path.join(config.log_dir, '{}.txt'.format(config.name))
        et.write_eval_file(config.data, eval_file, predictions, labels,
                           config.name)
        et.make_matrix(config.data, eval_file, config.log_dir)
Ejemplo n.º 11
0
def evaluate_ensemble(all_preds, labels):
    all_preds = np.array(all_preds)
    summed_preds = np.sum(all_preds, axis=0)
    final_predictions = np.argmax(summed_preds, 1)
    return final_predictions


### TODO: Clean this up and add the necessary arguments to enable all of the options we want.
if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument('model',
                        help='path to file containing model definition')
    parser.add_argument('data',
                        default="/data/converted",
                        help='path to data folder')
    parser.add_argument('--log_dir',
                        default="logs",
                        help='path to data folder')
    parser.add_argument('--weights', type=int, help='number of model to test')
    args = parser.parse_args()
    file = args.model

    predictions, labels = test(args)

    import Evaluation_tools as et
    eval_file = os.path.join(args.log_dir, 'vrnens.txt')
    et.write_eval_file(args.data, eval_file, predictions, labels, 'VRNENS')
    et.make_matrix(args.data, eval_file, args.log_dir)
Ejemplo n.º 12
0
def main(argv):
    pycaffe_dir = caffe_root + 'python/'

    parser = argparse.ArgumentParser()
    # Required arguments: input and output files.
    parser.add_argument("--input_file",
                        default="/data/converted/testrotnet.txt",
                        help="text file containg the image paths")

    # Optional arguments.
    parser.add_argument(
        "--model_def",
        default="./Training/rotationnet_modelnet40_case1_solver.prototxt",
        help="Model definition file.")
    parser.add_argument('--weights', type=int, default=-1)
    parser.add_argument('--views', type=int, default=12)
    parser.add_argument('--log_dir', default='logs', type=str)

    parser.add_argument(
        "--center_only",
        action='store_true',
        default=False,
        help="Switch for prediction from center crop alone instead of " +
        "averaging predictions across crops (default).")
    parser.add_argument(
        "--images_dim",
        default='227,227',
        help="Canonical 'height,width' dimensions of input images.")
    parser.add_argument(
        "--mean_file",
        default=os.path.join(caffe_root,
                             'data/ilsvrc12/imagenet_mean.binaryproto'),
        help="Data set image mean of H x W x K dimensions (np array). " +
        "Set to '' for no mean subtraction.")
    parser.add_argument(
        "--input_scale",
        type=float,
        default=255,
        help="Multiply input features by this scale before input to net")
    parser.add_argument(
        "--channel_swap",
        default='2,1,0',
        help="Order to permute input channels. The default converts " +
        "RGB -> BGR since BGR is the Caffe default by way of OpenCV.")

    args = parser.parse_args()

    args.pretrained_model = os.path.join(
        args.log_dir, 'case1_iter_' + str(args.weights) + '.caffemodel')

    image_dims = [int(s) for s in args.images_dim.split(',')]
    channel_swap = [int(s) for s in args.channel_swap.split(',')]

    if args.mean_file:
        mean = get_mean(args.mean_file)

    caffe.set_mode_gpu()

    # Make classifier.
    classifier = caffe.Classifier(args.model_def,
                                  args.pretrained_model,
                                  image_dims=image_dims,
                                  mean=mean,
                                  input_scale=1.0,
                                  raw_scale=255.0,
                                  channel_swap=channel_swap)

    listfiles, labels = read_lists(args.input_file)

    #dataset = Dataset(listfiles, labels, subtract_mean=False, V=20)
    # Load image file.
    args.input_file = os.path.expanduser(args.input_file)

    preds = []
    labels = [int(label) for label in labels]

    total = len(listfiles)

    views = args.views
    batch = 8 * views
    for i in range(len(listfiles) / (batch * views)):

        #im_files = [line.rstrip('\n') for line in open(listfiles[views*i+j])]
        im_files = listfiles[i * batch * views:(i + 1) * batch * views]

        #labels.append(int(im_files[0]))
        #im_files = im_files[2:]
        inputs = [caffe.io.load_image(im_f) for im_f in im_files]

        predictions = classifier.predict(inputs, not args.center_only)
        classified = classify(predictions)
        preds.append(classified)
        print(classified)

    import Evaluation_tools as et
    data = '/data'
    logs = '/logs'
    eval_file = os.path.join(logs, 'rotnet.txt')
    et.write_eval_file(data, eval_file, preds, labels, 'ROTNET')
    et.make_matrix(data, eval_file, logs)
Ejemplo n.º 13
0
def eval_one_epoch(config, sess, ops, epoch=0):
    is_training = False
    num_votes = config.num_votes

    total_seen = 0
    loss_sum = 0
    predictions = []
    labels = []
    all = 0
    for fn in range(len(TEST_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:, 0:config.num_points, :]
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // config.batch_size + 1

        for batch_idx in range(num_batches):
            start_idx = batch_idx * config.batch_size
            end_idx = (batch_idx + 1) * config.batch_size
            cur_batch_size = min(end_idx - start_idx,
                                 config.batch_size - end_idx + file_size)

            if cur_batch_size < config.batch_size:
                placeholder_data = np.zeros(
                    ([config.batch_size] + (list(current_data.shape))[1:]))
                placeholder_data[0:cur_batch_size, :, :] = current_data[
                    start_idx:end_idx, :, :]

                placeholder_labels = np.zeros((config.batch_size))
                placeholder_labels[0:cur_batch_size] = current_label[
                    start_idx:end_idx]

                batch_labels = placeholder_labels
                batch_data = placeholder_data
            else:
                batch_data = current_data[start_idx:end_idx, :, :]
                batch_labels = current_label[start_idx:end_idx]

            # Aggregating BEG
            batch_loss_sum = 0  # sum of losses for the batch
            batch_pred_sum = np.zeros(
                (config.batch_size, config.num_classes))  # score for classes

            for vote_idx in range(num_votes):
                rotated_data = provider.rotate_point_cloud_by_angle(
                    batch_data, vote_idx / float(num_votes) * np.pi * 2)
                feed_dict = {
                    ops['pointclouds_pl']: rotated_data,
                    ops['labels_pl']: batch_labels,
                    ops['is_training_pl']: is_training
                }
                loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                              feed_dict=feed_dict)

                batch_pred_sum += pred_val
                batch_loss_sum += (loss_val * cur_batch_size /
                                   float(num_votes))

            pred_val = np.argmax(batch_pred_sum, 1)
            predictions += pred_val.tolist()[0:cur_batch_size]
            labels += current_label[start_idx:end_idx].tolist()

            total_seen += cur_batch_size
            loss_sum += batch_loss_sum

    loss = loss_sum / float(total_seen)
    acc = sum([
        1 if predictions[i] == labels[i] else 0
        for i in range(len(predictions))
    ]) / float(len(predictions))
    print(loss)
    print(acc)

    if config.test:
        import Evaluation_tools as et
        eval_file = os.path.join(config.log_dir, '{}.txt'.format(config.name))
        et.write_eval_file(config.data, eval_file, predictions, labels,
                           config.name)
        et.make_matrix(config.data, eval_file, config.log_dir)
    else:
        log_string('eval mean loss: %f' % loss)
        LOSS_LOGGER.log(loss, epoch, "eval_loss")
        log_string('eval accuracy: %f' % acc)
        ACC_LOGGER.log(acc, epoch, "eval_accuracy")
Ejemplo n.º 14
0
def eval_one_epoch(config, sess, ops, topk=1, epoch=0):
    is_training = False

    # Make sure batch data is of same size
    cur_batch_data = np.zeros(
        (config.batch_size, config.num_points, TEST_DATASET.num_channel()))
    cur_batch_label = np.zeros((config.batch_size), dtype=np.int32)

    total_correct = 0
    total_seen = 0
    loss_sum = 0
    batch_idx = 0
    shape_ious = []

    predictions = []
    labels = []

    while TEST_DATASET.has_next_batch():
        batch_data, batch_label = TEST_DATASET.next_batch(augment=False)
        bsize = batch_data.shape[0]
        # for the last batch in the epoch, the bsize:end are from last batch
        cur_batch_data[0:bsize, ...] = batch_data
        cur_batch_label[0:bsize] = batch_label

        batch_pred_sum = np.zeros(
            (config.batch_size, config.num_classes))  # score for classes
        for vote_idx in range(config.num_votes):
            # Shuffle point order to achieve different farthest samplings
            shuffled_indices = np.arange(config.num_points)
            np.random.shuffle(shuffled_indices)
            if config.normal:
                rotated_data = provider.rotate_point_cloud_by_angle_with_normal(
                    cur_batch_data[:, shuffled_indices, :],
                    vote_idx / float(config.num_votes) * np.pi * 2)
            else:
                rotated_data = provider.rotate_point_cloud_by_angle(
                    cur_batch_data[:, shuffled_indices, :],
                    vote_idx / float(config.num_votes) * np.pi * 2)
            feed_dict = {
                ops['pointclouds_pl']: rotated_data,
                ops['labels_pl']: cur_batch_label,
                ops['is_training_pl']: is_training
            }
            loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)
            batch_pred_sum += pred_val
        pred_val = np.argmax(batch_pred_sum, 1)
        correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize])

        predictions += pred_val[0:bsize].tolist()
        labels += batch_label[0:bsize].tolist()

        total_correct += correct
        total_seen += bsize
        loss_sum += loss_val
        batch_idx += 1

    loss = (loss_sum / float(batch_idx))
    acc = (total_correct / float(total_seen))
    log(config.log_file,
        "EVALUATING epoch {} - loss: {} acc: {} ".format(epoch, loss, acc))
    if config.test:
        import Evaluation_tools as et
        eval_file = os.path.join(config.log_dir, '{}.txt'.format(config.name))
        et.write_eval_file(config.data, eval_file, predictions, labels,
                           config.name)
        et.make_matrix(config.data, eval_file, config.log_dir)
    else:
        LOSS_LOGGER.log(loss, epoch, "eval_loss")
        ACC_LOGGER.log(acc, epoch, "eval_accuracy")
        TEST_DATASET.reset()
        return total_correct / float(total_seen)
Ejemplo n.º 15
0
 
 if config.weights != -1:
     weights = config.weights
     load_weights(os.path.join(config.log_dir, config.snapshot_prefix+str(weights)), net.KDNet['output'])
     print("Loaded weights")
 
 if config.test:
     print("Start testing")
     _, predictions = acc_fun(net, test_vertices, test_faces, test_nFaces, test_labels, mode='test',config=config) 
     acc = 100.*(predictions == test_labels).sum()/len(test_labels)
     
     print('Eval accuracy:  {}'.format(acc))
     import Evaluation_tools as et
     eval_file = os.path.join(config.log_dir, '{}.txt'.format(config.name))
     et.write_eval_file(config.data, eval_file, predictions, test_labels, config.name)
     et.make_matrix(config.data, eval_file, config.log_dir)  
 else:
     print("Start training")
     LOSS_LOGGER = Logger("{}_loss".format(config.name))
     ACC_LOGGER = Logger("{}_acc".format(config.name))
     start_epoch = 0
     if config.weights != -1:
         ld = config.log_dir
         WEIGHTS = config.weights
         ckptfile = os.path.join(ld,config.snapshot_prefix+str(WEIGHTS))
         start_epoch = WEIGHTS + 1
         ACC_LOGGER.load((os.path.join(ld,"{}_acc_train_accuracy.csv".format(config.name)),os.path.join(ld,"{}_acc_eval_accuracy.csv".format(config.name))), epoch = WEIGHTS)
         LOSS_LOGGER.load((os.path.join(ld,"{}_loss_train_loss.csv".format(config.name)), os.path.join(ld,'{}_loss_eval_loss.csv'.format(config.name))), epoch = WEIGHTS)
       
     begin = start_epoch
     end = config.max_epoch+start_epoch