Example #1
0
def make_ro(r, raster_space, width, height):
    """Symbolically render rays starting with raster_space according to geometry
      e  defined by """
    nmatrices = r.shape[0]
    resolution = np.array([width, height], dtype=floatX())
    # Normalise it to be bound between 0 1
    norm_raster_space = raster_space / resolution
    # Put it in NDC space, -1, 1
    screen_space = -1.0 + 2.0 * norm_raster_space
    # Make pixels square by mul by aspect ratio
    aspect_ratio = resolution[0] / resolution[1]
    ndc_space = screen_space * np.array([aspect_ratio, 1.0], dtype=floatX())
    # Ray Direction

    # Position on z-plane
    ndc_xyz = stack(ndc_space, width, height, 1.0) * 0.5  # Change focal length

    # Put the origin farther along z-axis
    ro = np.array([0, 0, 1.5], dtype=floatX())

    # Rotate both by same rotation matrix
    ro_t = np.dot(np.reshape(ro, (1, 3)), r)
    ndc_t = np.dot(np.reshape(ndc_xyz, (1, width, height, 3)), r)
    print(ndc_t.shape, width, height, nmatrices)
    ndc_t = np.reshape(ndc_t, (width, height, nmatrices, 3))
    ndc_t = np.transpose(ndc_t, (2, 0, 1, 3))

    # Increment by 0.5 since voxels are in [0, 1]
    ro_t = ro_t + 0.5
    ndc_t = ndc_t + 0.5
    # Find normalise ray dirs from origin to image plane
    unnorm_rd = ndc_t - np.reshape(ro_t, (nmatrices, 1, 1, 3))
    rd = unnorm_rd / np.reshape(norm(unnorm_rd), (nmatrices, width, height, 1))
    return rd, ro_t
Example #2
0
def gen_fragcoords(width: int, height: int):
    """Create a (width * height * 2) matrix, where element i,j is [i,j]
       This is used to generate ray directions based on an increment"""
    raster_space = np.zeros([width, height, 2], dtype=floatX())
    for i in range(width):
        for j in range(height):
            raster_space[i, j] = np.array([i, j], dtype=floatX()) + 0.5
    return raster_space
Example #3
0
def tf_inputs(n_sources, batch_size, source_len, ndim):
    sources = []
    positions = []
    for i in range(n_sources):
        source = tf.placeholder(name="source", shape=(batch_size, source_len), dtype=floatX())
        sources.append(source)
        position = tf.placeholder(name="pos", shape=(batch_size, ndim), dtype=floatX())
        positions.append(position)
    return sources, positions
Example #4
0
def tf_dist_inputs(n_sources, batch_size, source_len):
    sources = []
    distances = []
    for i in range(n_sources):
        source = tf.placeholder(name="source", shape=(batch_size, source_len), dtype=floatX())
        sources.append(source)
        distance = tf.placeholder(name="dist", shape=(batch_size, 1), dtype=floatX())
        distances.append(distance)
    return sources, distances
Example #5
0
    def __init__(self, n_inputs: int) -> None:
        super().__init__(name="VarFromMean")
        comp_arrow = self
        in_ports = [comp_arrow.add_port() for i in range(n_inputs + 1)]
        for in_port in in_ports:
            make_in_port(in_port)
        out_port = comp_arrow.add_port()
        make_out_port(out_port)

        sub_arrows = [SubArrow() for i in range(n_inputs)]
        squares = [SquareArrow() for i in range(n_inputs)]
        addn = AddNArrow(n_inputs)
        for i in range(n_inputs):
            comp_arrow.add_edge(in_ports[0], sub_arrows[i].in_ports()[1])
            comp_arrow.add_edge(in_ports[i + 1], sub_arrows[i].in_ports()[0])
            comp_arrow.add_edge(sub_arrows[i].out_ports()[0],
                                squares[i].in_ports()[0])
            comp_arrow.add_edge(squares[i].out_ports()[0], addn.in_ports()[i])

        nn = SourceArrow(n_inputs)
        cast = CastArrow(floatX())
        variance = DivArrow()
        comp_arrow.add_edge(nn.out_ports()[0], cast.in_ports()[0])
        comp_arrow.add_edge(addn.out_ports()[0], variance.in_ports()[0])
        comp_arrow.add_edge(cast.out_ports()[0], variance.in_ports()[1])

        comp_arrow.add_edge(variance.out_ports()[0], out_port)
        assert comp_arrow.is_wired_correctly
Example #6
0
def test_inv_twoxyplusx() -> CompositeArrow:
    """approximate parametric inverse of twoxyplusx"""
    inv_add = InvAddArrow()
    inv_mul = InvMulArrow()
    two_int = SourceArrow(2)
    two = CastArrow(floatX())
    div = DivArrow()
    c = ApproxIdentityArrow(2)
    inv_dupl = InvDuplArrow()
    edges = Bimap()  # type: EdgeMap
    edges.add(two_int.out_ports()[0], two.in_ports()[0])
    edges.add(inv_add.out_ports()[0], c.in_ports()[0])
    edges.add(inv_add.out_ports()[1], inv_mul.in_ports()[0])
    edges.add(inv_mul.out_ports()[0], div.in_ports()[0])
    edges.add(two.out_ports()[0], div.in_ports()[1])
    edges.add(div.out_ports()[0], c.in_ports()[1])
    edges.add(c.out_ports()[0], inv_dupl.in_ports()[0])
    edges.add(c.out_ports()[1], inv_dupl.in_ports()[1])

    param_inports = [inv_add.in_ports()[1], inv_mul.in_ports()[1]]
    op = CompositeArrow(in_ports=[inv_add.in_ports()[0]] + param_inports,
                        out_ports=[
                            inv_dupl.out_ports()[0],
                            inv_mul.out_ports()[1],
                            c.out_ports()[2]
                        ],
                        edges=edges,
                        name="InvTwoXYPlusY")

    make_param_port(op.in_ports()[1])
    make_param_port(op.in_ports()[2])
    make_error_port(op.out_ports()[2])
    return op
Example #7
0
def get_port_dtype(port: Port, default_to_floatX=True):
    """Set the dtype of a `port` to dtype"""
    port_attr = port.arrow.port_attr[port.index]
    if "dtype" in port_attr:
        return port_attr["dtype"]
    else:
        assert default_to_floatX, "no dtype and no default dtype"
        return floatX()
Example #8
0
def robo_tensorflow(batch_size, n_links, **options):
    lengths = [1 for i in range(n_links)]
    with tf.name_scope("fwd_kinematics"):
        angles = []
        for _ in range(n_links):
            angles.append(
                tf.placeholder(floatX(), name="theta", shape=(batch_size, 1)))
        x, y = gen_robot(lengths, angles)
    return {'inputs': angles, 'outputs': [x, y]}
Example #9
0
def render_gen_graph(options):
    """Generate a graph for the rendering function"""
    res = options.get('res')
    batch_size = options.get('batch_size')
    phong = options.get('phong')
    nviews = options.get('nviews')
    nvoxels = res * res * res

    with tf.name_scope("fwd_g"):
        voxels = tf.placeholder(floatX(),
                                name="voxels",
                                shape=(batch_size, nvoxels))

        if phong:
            gdotl_cube = tf.placeholder(floatX(),
                                        name="gdotl",
                                        shape=(batch_size, nvoxels))
        else:
            gdotl_cube = None

        rotation_matrices = STD_ROTATION_MATRIX
        out_img = gen_img(voxels, gdotl_cube, rotation_matrices, options)
        return {'voxels': voxels, 'gdotl_cube': gdotl_cube, 'out_img': out_img}
Example #10
0
 def __init__(self, n_inputs: int) -> None:
     name = 'Mean'
     edges = Bimap()  # type: EdgeMap
     addn_arrow = AddNArrow(n_inputs)
     nsource = SourceArrow(n_inputs)
     castarrow_nb = CastArrow(floatX())
     castarrow = BroadcastArrow()
     div_arrow = DivArrow()
     edges.add(nsource.out_ports()[0], castarrow_nb.in_ports()[0])
     edges.add(castarrow_nb.out_port(0), castarrow.in_port(0))
     edges.add(addn_arrow.out_ports()[0], div_arrow.in_ports()[0])
     edges.add(castarrow.out_ports()[0], div_arrow.in_ports()[1])
     super().__init__(edges=edges,
                      in_ports=addn_arrow.in_ports(),
                      out_ports=div_arrow.out_ports(),
                      name=name)
Example #11
0
def stack(intensor, width: int, height: int, scalar):
    scalars = np.ones([width, height, 1], dtype=floatX()) * scalar
    return np.concatenate([intensor, scalars], axis=2)
Example #12
0
def run(options):
    """Simple Example"""
    # x, y sampled from normal distribution
    batch_size = options['batch_size']
    x_len = 1
    # x_prior_gen = infinite_samples(lambda *shape: np.random.exponential(size=shape),
    #                                shape=(x_len,),
    #                                batch_size=batch_size,
    #                                add_batch=True)
    x_prior_gen = infinite_samples(lambda *shape: np.ones(shape=shape) * 0.5,
                                   shape=(x_len, ),
                                   batch_size=batch_size,
                                   add_batch=True)

    x_prior = tf.placeholder(dtype=floatX(), shape=(batch_size, x_len))

    def f(x):
        """The model"""
        # return tf.reduce_sum(x, axis=1)
        return x

    z_len = 1
    z = tf.placeholder(dtype=floatX(), shape=(batch_size, z_len))
    # z_gen = infinite_samples(np.random.rand,
    #                          shape=(z_len,),
    #                          batch_size=batch_size,
    #                          add_batch=True)
    z_gen = infinite_samples(lambda *shape: np.ones(shape=shape) * 0.5,
                             shape=(z_len, ),
                             batch_size=batch_size,
                             add_batch=True)

    def g(y, z):
        """Generator"""
        with tf.name_scope("generator"):
            with tf.variable_scope("generator"):
                # y = tf.expand_dims(y, 1)
                # inp = tf.concat([y, z], axis=1)
                inp = y
                inp = fully_connected(inp, 10, activation='elu')
                # inp = batch_normalization(inp)
                inp = fully_connected(inp, 10, activation='elu')
                # inp = batch_normalization(inp)
                inp = fully_connected(inp, x_len, activation='elu')
                # inp = batch_normalization(inp)
                return inp

    def g_pi(y, z):
        """Parametric Inverse Generator"""
        with tf.name_scope("generator"):
            with tf.variable_scope("generator"):
                theta_len = 1
                # the neural network will take as input z, and output
                # the two parameters for
                inp = z
                inp = fully_connected(inp, 20, activation='elu')
                inp = batch_normalization(inp)
                inp = fully_connected(inp, 20, activation='elu')
                inp = batch_normalization(inp)
                theta = fully_connected(inp, theta_len, activation='elu')
                theta = batch_normalization(theta)
                x_1 = tf.expand_dims(y, 1) - theta
                x_2 = theta
                x = tf.concat([x_1, x_2], 1)
                return x

    def disc(x, y, reuse, use_y=False):
        """Discriminator"""
        with tf.name_scope("discriminator"):
            with tf.variable_scope("discriminator", reuse=reuse):
                if use_y:
                    inp = tf.concat([x, tf.expand_dims(y, 1)], 1)
                else:
                    inp = x
                # import pdb; pdb.set_trace()
                # inp = fully_connected(inp, 3, activation='elu')
                out = fully_connected(inp, 1, activation='sigmoid')
                return out

    tf_cgan(x_prior, x_prior_gen, z, z_gen, f, g, disc, options)