예제 #1
0
def predict_views(batch_size, batch_threads, dataset_factory, estimator,
                  image_size, output_directory, dataset_part):
    print('Starting views evaluation...')

    dataset = dataset_factory.get_dataset(dataset_part)

    output_directory = os.path.join(output_directory, dataset_part)
    if os.path.exists(output_directory):
        shutil.rmtree(output_directory)
    os.makedirs(output_directory)

    print('\n\nRunning Prediction for %s' % dataset_part)
    input_function = get_input_function(dataset, batch_size, batch_threads,
                                        False, image_size)
    predicted = estimator.predict(input_fn=input_function)
    num_samples = dataset.get_number_of_samples()

    for sample, prediction in enumerate(predicted):
        target_path = os.path.join(output_directory,
                                   str(prediction['views_classifications']),
                                   prediction['file_names'].decode('UTF-8'))
        original_path = prediction['paths'].decode('UTF-8')

        directory = os.path.dirname(target_path)
        if not os.path.exists(directory):
            os.makedirs(directory)

        shutil.copy(original_path, target_path)

        if (sample + 1) % batch_size == 0:
            sys.stdout.write('\r>> Processed %d samples of %d' %
                             (sample + 1, num_samples))
        sys.stdout.flush()

    print('Finished views prediction.')
def predict_views(batch_size, batch_threads, dataset_factory, estimator, image_size, dataset_part):
	print('Starting views evaluation...')

	dataset = dataset_factory.get_dataset(dataset_part)

	print('\n\nRunning Prediction for %s' % dataset_part)
	input_function = get_input_function(dataset, batch_size, batch_threads, False, image_size)
	predicted = estimator.predict(input_fn=input_function)
	num_samples = dataset.get_number_of_samples()

	counts = [0, 0, 0]
	correct = [0, 0, 0]

	for sample, prediction in enumerate(predicted):
		expected = prediction['views']
		actual = prediction['views_classifications']

		counts[expected] += 1
		if expected == actual:
			correct[expected] += 1

		if (sample + 1) % batch_size == 0:
			sys.stdout.write('\r>> Processed %d samples of %d' % (sample + 1, num_samples))
		sys.stdout.flush()

	print('Finished views prediction.')

	print('counts')
	print(counts)
	print('correct')
	print(correct)

	accuracies = [correct[0] / counts[0], correct[1] / counts[1], correct[2] / counts[2]]
	print('accuracies')
	print(accuracies)
예제 #3
0
파일: trainer.py 프로젝트: splinter21/SPL
def run_validation(dataset_factory, batch_size, batch_threads, estimator,
                   image_size):
    val_input_function = get_input_function(dataset_factory.get_dataset('val'),
                                            batch_size, batch_threads, False,
                                            image_size)
    evaluation_result = estimator.evaluate(input_fn=val_input_function)
    print('Finished Validation: ')
    print(evaluation_result)
예제 #4
0
def run_training(dataset, batch_size, batch_threads, epoch, estimator,
                 num_epochs, image_size):
    print('\n\nRunning training of epoch %d of %d:\n' %
          (epoch + 1, num_epochs))
    train_input_function = get_input_function(dataset, batch_size,
                                              batch_threads, True, image_size)
    estimator.train(input_fn=train_input_function)
    print('\nFinished Training epoch %d' % (epoch + 1))
def run_evaluation(batch_size, batch_threads, dataset_factory, estimator,
                   image_size):
    print('\n\nRunning test set evaluation:')
    evaluation_result = estimator.evaluate(
        input_fn=get_input_function(dataset_factory.get_dataset(
            'test'), batch_size, batch_threads, False, image_size))
    print('Finished test evaluation: ')
    print(evaluation_result)
def run_prediction_and_store_features(dataset_factory, batch_size,
                                      batch_threads, estimator,
                                      output_directory, dataset_part,
                                      image_size):
    dataset = dataset_factory.get_dataset(dataset_part)

    output_directory = os.path.join(output_directory, dataset_part)
    if os.path.exists(output_directory):
        shutil.rmtree(output_directory)
    os.makedirs(output_directory)

    print('\n\nRunning Prediction for %s' % dataset_part)
    input_function = get_input_function(dataset, batch_size, batch_threads,
                                        False, image_size)
    predicted = estimator.predict(input_fn=input_function)

    num_samples = dataset.get_number_of_samples()

    with open(output_directory + '/features.csv', 'w', newline='') as features_file, open(output_directory + '/labels.csv', 'w', newline='') as labels_file, \
      open(output_directory + '/cameras.csv', 'w', newline='') as cameras_file, open(output_directory + '/names.csv', 'w', newline='') as file_names_file:

        features_writer = csv.writer(features_file, delimiter=',')
        labels_writer = csv.writer(labels_file, delimiter=',')
        cameras_writer = csv.writer(cameras_file, delimiter=',')
        file_names_writer = csv.writer(file_names_file, delimiter=',')

        for sample, prediction in enumerate(predicted):
            if (sample + 1) % batch_size == 0:
                sys.stdout.write('\r>> Processed %d samples of %d' %
                                 (sample + 1, num_samples))
                sys.stdout.flush()

            pre_logits = prediction['pre_logits']
            features_writer.writerow(np.squeeze(pre_logits))

            actual_labels = prediction['actual_labels']
            labels_writer.writerow([actual_labels])

            camera = prediction['cameras']
            cameras_writer.writerow([camera])

            file_names = prediction['file_names'].decode('UTF-8')
            file_names_writer.writerow([file_names])

    print('\nFinished Prediction %s' % dataset_part)
예제 #7
0
def run_prediction_and_store_images(dataset_factory, batch_size, batch_threads,
                                    estimator, output_directory, dataset_part,
                                    image_size, result_dir):
    dataset = dataset_factory.get_dataset(dataset_part)

    print('\n\nRunning Prediction for %s' % dataset_part)
    input_function = get_input_function(dataset, batch_size, batch_threads,
                                        False, image_size)
    predicted = estimator.predict(input_fn=input_function)

    num_samples = dataset.get_number_of_samples()

    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    if not os.path.exists(os.path.join(result_dir, 'input')):
        os.makedirs(os.path.join(result_dir, 'input'))
    if not os.path.exists(os.path.join(result_dir, 'prediction')):
        os.makedirs(os.path.join(result_dir, 'prediction'))
    if not os.path.exists(os.path.join(result_dir, 'target')):
        os.makedirs(os.path.join(result_dir, 'target'))

    for sample, prediction in enumerate(predicted):
        if (sample + 1) % batch_size == 0:
            sys.stdout.write('\r>> Processed %d samples of %d' %
                             (sample + 1, num_samples))
            sys.stdout.flush()

        out_im = (1 * prediction['output'])  #.astype(np.uint8)
        in_im = (1 * prediction['input'])  #.astype(np.uint8)
        tar_im = (1 * prediction['target'])  #.astype(np.uint8)

        imageio.imwrite(
            os.path.join(result_dir, 'input',
                         prediction['file_names'].decode('UTF-8')), in_im)
        imageio.imwrite(
            os.path.join(result_dir, 'prediction',
                         prediction['file_names'].decode('UTF-8')), out_im)
        imageio.imwrite(
            os.path.join(result_dir, 'target',
                         prediction['file_names'].decode('UTF-8')), tar_im)

    print('\nFinished Prediction %s' % dataset_part)
예제 #8
0
def predict_views(batch_size, batch_threads, dataset_factory, estimator,
                  image_size, output_directory, dataset_part):
    print('Starting views evaluation...')

    dataset = dataset_factory.get_dataset(dataset_part)

    output_directory = os.path.join(output_directory, dataset_part)
    if os.path.exists(output_directory):
        shutil.rmtree(output_directory)
    os.makedirs(output_directory)

    print('\n\nRunning Prediction for %s' % dataset_part)
    input_function = get_input_function(dataset, batch_size, batch_threads,
                                        False, image_size)
    predicted = estimator.predict(input_fn=input_function)
    num_samples = dataset.get_number_of_samples()

    sum_images = np.zeros([3, 128, 64, 3], dtype=np.longlong)
    counters = np.zeros([3])

    for sample, prediction in enumerate(predicted):
        original_path = prediction['paths'].decode('UTF-8')

        image = cv2.imread(original_path)
        image = cv2.resize(image, (64, 128))
        predicted_view = prediction['views_classifications']

        sum_images[predicted_view, :, :, :] += image
        counters[predicted_view] += 1

        if (sample + 1) % batch_size == 0:
            sys.stdout.write('\r>> Processed %d samples of %d' %
                             (sample + 1, num_samples))
        sys.stdout.flush()

    for view in range(3):
        mean_image = sum_images[view] / counters[view]
        cv2.imwrite(os.path.join(output_directory,
                                 str(view) + '.png'), mean_image)

    print('\n\nFinished views prediction.')