Ejemplo n.º 1
0
        # Validate using IoU
        if episode_num % 100 == 0:
            policy_estimator.validate_iou(train_env, max_timesteps_per_episode=max_timesteps_per_episode,
                                          summary_prefix='train')
            policy_estimator.validate_iou(valid_env, max_timesteps_per_episode=max_timesteps_per_episode,
                                          summary_prefix='valid')


max_timesteps = 10

training_env, validation_env = [PolygonEnv(d) for d in
                                get_train_and_valid_datasets('/home/wesley/data/polygons_dataset_2',
                                                             max_timesteps=10,
                                                             image_size=28,
                                                             prediction_size=28,
                                                             history_length=2,
                                                             is_local=True,
                                                             load_max_images=2,
                                                             validation_set_percentage=0.5)]

logdir = '/home/wesley/data/{}/'.format(os.path.splitext(os.path.basename(__file__))[0])
reset_dir(logdir)

with tf.Session() as sess:
    tf.Variable(0, name="global_step", trainable=False)
    summary_writer = tf.summary.FileWriter(logdir)
    policy_estimator = ExperimentPolicyEstimator(image_size=28, action_size=28,
                                                 tf_session=sess, summary_writer=summary_writer,
                                                 max_timesteps=max_timesteps)
    summary_writer.add_graph(sess.graph)
    sess.run(tf.global_variables_initializer())
Ejemplo n.º 2
0
from collections import defaultdict

import numpy as np

from Dataset import get_train_and_valid_datasets

image_size = 28
prediction_size = 28
max_timesteps = 10
history_length = 2

print('Loading dataset from numpy archive...')
training_set, validation_set = get_train_and_valid_datasets(
    '/home/wesley/docker_data/polygons_dataset_3',
    max_timesteps=5,
    image_size=image_size,
    prediction_size=prediction_size,
    history_length=history_length,
    is_local=True,
    validation_set_percentage=0)
print('Done!')

print('Training set')
batch_images = training_set._images
batch_verts = training_set._vertices
vertex_counts = defaultdict(lambda: 0)
max = 0
min = 1000

for idx, (image, vertices) in enumerate(zip(batch_images, batch_verts)):
    l = len(vertices)
    vertex_counts[l] += 1
        return self._prediction_logits


if __name__ == '__main__':
    import shutil

    image_size = 28
    prediction_size = 28
    max_timesteps = 5
    history_length = 1

    global_step = tf.Variable(0, name='global_step', trainable=False)
    train_data, valid_data = get_train_and_valid_datasets(
        '/data/polygons_dataset_3',
        max_timesteps=max_timesteps,
        image_size=image_size,
        prediction_size=prediction_size,
        history_length=history_length,
        is_local=True,
        load_max_images=100000)

    with tf.Session() as sess:
        model_dir = '/data/{}/'.format(
            os.path.splitext(os.path.basename(__file__))[0])
        if os.path.exists(model_dir):
            shutil.rmtree(model_dir)
        model = ExperimentModel(sess, max_timesteps, image_size,
                                prediction_size, history_length, model_dir)
        sess.run(tf.global_variables_initializer())

        total_steps = 100000
        # Wait until at least step 20000 to see decent? results
import matplotlib.pyplot as plt
from scipy.misc import imresize

from Dataset import get_train_and_valid_datasets
from util import *

image_size = 224
prediction_size = 28

max_timesteps = 5
history_length = 2

print('Loading dataset from numpy archive...')
training_set, validation_set = get_train_and_valid_datasets('/home/wesley/data/tiny-polygons',
                                                            max_timesteps=5,
                                                            image_size=image_size,
                                                            prediction_size=prediction_size,
                                                            history_length=history_length,
                                                            is_local=True)
print('Done!')

batch = training_set.get_batch_for_rnn(batch_size=len(training_set))
for i, (d, image, histories, targets, vertices) in enumerate(zip(*batch)):
    print('Shape {} (duration={})...'.format(i, d), end='')
    fig, ax = plt.subplots()
    plt.imshow(imresize(image, prediction_size / image_size, interp='nearest'))
    # Ground truth
    for e, v in enumerate(vertices):
        ax.add_artist(plt.Circle(v, 0.5, color='lightgreen'))
        # plt.text(v[0], v[1], e, color='forestgreen')
    for a, b in iterate_in_ntuples(vertices, n=2):
        ax.add_line(matplotlib.lines.Line2D([a[0], b[0]], [a[1], b[1]], color='forestgreen'))