Example #1
0
def run_typing_test(topics):
    """Measure typing speed and accuracy on the command line."""
    paragraphs = lines_from_file('data/sample_paragraphs.txt')
    select = lambda p: True
    if topics:
        select = about(topics)
    i = 0
    while True:
        reference = choose(paragraphs, select, i)
        if not reference:
            print('No more paragraphs about', topics, 'are available.')
            return
        print('Type the following paragraph and then press enter/return.')
        print(
            'If you only type part of it, you will be scored only on that part.\n'
        )
        print(reference)
        print()

        start = datetime.now()
        typed = input()
        if not typed:
            print('Goodbye.')
            return
        print()

        elapsed = (datetime.now() - start).total_seconds()
        print("Nice work!")
        print('Words per minute:', wpm(typed, elapsed))
        print('Accuracy:        ', accuracy(typed, reference))

        print('\nPress enter/return for the next paragraph or type q to quit.')
        if input().strip() == 'q':
            return
        i += 1
Example #2
0
# Training
image_size = 112
batch_size = 32
num_epochs = 20
epoch_size = 28747

train_enqueue_steps = 50

save_steps = 200  # Number of steps to perform saving checkpoints
test_steps = 20  # Number of times to test for test accuracy
start_test_step = 50

max_checkpoints_to_keep = 2
save_dir = "/home/aiteam/quan/checkpoints/ucf101"

train_data_reader = lines_from_file(train_txt, repeat=True)

image_paths_placeholder = tf.placeholder(tf.string,
                                         shape=(None, num_frames),
                                         name='image_paths')
labels_placeholder = tf.placeholder(tf.int64, shape=(None, ), name='labels')

train_input_queue = data_flow_ops.FIFOQueue(capacity=10000,
                                            dtypes=[tf.string, tf.int64],
                                            shapes=[(num_frames, ), ()])

train_enqueue_op = train_input_queue.enqueue_many(
    [image_paths_placeholder, labels_placeholder])

frames_batch, labels_batch = input_pipeline(train_input_queue,
                                            batch_size=batch_size,
Example #3
0
    return resized_image

def visualize_frame_batches(frame_batches, num_videos, num_frames):
    for i in range(num_videos):
        print("Visualize video ", i)
        video = frame_batches[i, :, :, :, :]
        for j in range(num_frames):
            frame = video[j, :, :, :]
            frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
            cv2.imshow("frame", frame)
            cv2.waitKey(100)

if __name__ == "__main__":
    num_frames = 10
    root_folder = "/home/ubuntu/datasets/ucf101/train/"
    data_reader = lines_from_file("/home/ubuntu/datasets/ucf101/train.txt", repeat=True)
    # image_paths, labels = sample_videos(data_reader, root_folder=root_folder,
    #                                     num_samples=3, num_frames=num_frames)
    #
    # for i in range(3):
    #     print(image_paths[i], labels[i])

    image_paths_placeholder = tf.placeholder(tf.string, shape=(None, num_frames), name='image_paths')
    labels_placeholder = tf.placeholder(tf.int64, shape=(None, ), name='labels')

    input_queue = data_flow_ops.FIFOQueue(capacity=10000,
                                          dtypes=[tf.string, tf.int64],
                                          shapes=[(num_frames,), ()])

    enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder])