Exemplo n.º 1
0
    def __init__(self):
        #self.q_ph = None
        self.q_ph = tf.compat.v1.placeholder(dtype=tf.int32, shape=())

        from gqn_inputs import DataReader
        kwargs = {
            'dataset': const.GQN_DATA_NAME,
            'context_size': const.NUM_VIEWS,
            'root': 'gqn-dataset'
        }

        self.train_data_reader = DataReader(mode='train', **kwargs)
        self.test_data_reader = DataReader(mode='test', **kwargs)

        assert const.NUM_PREDS == 1
Exemplo n.º 2
0
#!/usr/bin/env python3

import sys
sys.path.append('..')
from gqn_inputs import DataReader
import tensorflow as tf
import numpy as np
import ipdb

data_reader = DataReader(dataset='shepard_metzler_5_parts',
                         context_size=10,
                         root='../gqn-dataset',
                         mode='test')

data = data_reader.read(batch_size=10)

with tf.train.SingularMonitoredSession() as sess:
    task = sess.run(data)

pos = task.query.context.cameras[0, :, :3]
dist = np.linalg.norm(pos, axis=1)

print(dist)

# let's do some geometry here
pos /= 10.0 / 3.0

yaws = task.query.context.cameras[0, :, 3]
pitches = task.query.context.cameras[0, :, 4]

# ys = np.sin(pitches)
Exemplo n.º 3
0
class GQNInput(Input):
    def __init__(self):
        #self.q_ph = None
        self.q_ph = tf.compat.v1.placeholder(dtype=tf.int32, shape=())

        from gqn_inputs import DataReader
        kwargs = {
            'dataset': const.GQN_DATA_NAME,
            'context_size': const.NUM_VIEWS,
            'root': 'gqn-dataset'
        }

        self.train_data_reader = DataReader(mode='train', **kwargs)
        self.test_data_reader = DataReader(mode='test', **kwargs)

        assert const.NUM_PREDS == 1

    def data(self):
        #self.q_ph = utils.tfpy.print_val(self.q_ph, 'qph')

        rval = tf.case(
            {
                tf.equal(self.q_ph, 0):
                lambda: self.train_data_reader.read(batch_size=const.BS),
                tf.equal(self.q_ph, 1):
                lambda: self.test_data_reader.read(batch_size=const.BS),
                tf.equal(self.q_ph, 2):
                lambda: self.test_data_reader.read(batch_size=const.BS)
            },
            exclusive=True)

        rval = self.munch(rval)

        if const.generate_views:
            assert const.BS == 1

            counter = tf.Variable(0, dtype=tf.int32)
            increment_op = tf.compat.v1.assign_add(counter, 1)
            counter_mod = tf.mod(counter, const.GEN_NUM_VIEWS)

            # printing is a bit weird due to the async ??
            counter_mod = utils.tfpy.print_val(counter_mod, 'counter is')

            elev_index = (counter_mod - 1) / const.AZIMUTH_GRANULARITY
            azimuth_index = tf.mod((counter_mod - 1),
                                   const.AZIMUTH_GRANULARITY)

            elev_index = tf.cast(elev_index, tf.float32)
            azimuth_index = tf.cast(azimuth_index, tf.float32)

            azimuth = azimuth_index * (360 / const.AZIMUTH_GRANULARITY)
            azimuth = azimuth + tf.cast(azimuth > 180, tf.float32) * (-360)
            azimuth /= 180 / np.pi

            if const.ELEV_GRANULARITY == 1:
                elev = const.MIN_ELEV
            else:
                elev_step = (const.MAX_ELEV -
                             const.MIN_ELEV) / (const.ELEV_GRANULARITY - 1)
                elev = (const.MIN_ELEV + elev_index * elev_step) / (180 /
                                                                    np.pi)

            with tf.control_dependencies([increment_op]):
                #phi, theta is the correct order here
                query_cam = tf.expand_dims(tf.stack([elev, azimuth], axis=0),
                                           axis=0)

            rval.query.query_camera = query_cam

        return rval

    def munch(self, task):
        return Munch(query=self.munchq(task.query), target=task.target)

    def munchq(self, query):
        return Munch(context=self.munchc(query.context),
                     query_camera=query.query_camera)

    def munchc(self, context):
        return Munch(frames=tf.unstack(context.frames, axis=1),
                     cameras=tf.unstack(context.cameras, axis=1))