コード例 #1
0
ファイル: to_graph.py プロジェクト: llwu/reverseflow
def conv(a: TfLambdaArrow, args: TensorVarList, state) -> Sequence[Tensor]:
    with tf.name_scope(a.name):
        if 'seen_tf' in state and a.name in state['seen_tf']:
          return a.func(args, reuse=True)
        else:
          state['seen_tf'] = set([a.name])
          return a.func(args, reuse=False)
コード例 #2
0
def gan_shopping_arrow_pi(nitems: int, options) -> CompositeArrow:
    """Gan on shopping basket"""
    n_fake_samples = options['n_fake_samples']
    n_samples = n_fake_samples + 1
    batch_size = options['batch_size']
    # nitems = options['nitems']
    fwd = SumNArrow(nitems)
    inv = invert(fwd)
    info = propagate(inv)

    def gen_func(args, reuse=False):
        # import pdb; pdb.set_trace()
        """Generator function"""
        with tf.variable_scope("generator", reuse=reuse):
            inp = tf.concat(args, axis=1)
            # inp = fully_connected(inp, 10, activation='elu')
            inp = fully_connected(inp, inv.num_param_ports(), activation='elu')
            inps = tf.split(inp,
                            axis=1,
                            num_or_size_splits=inv.num_param_ports())
            # inps = [tf.Print(inp, [inp[0]], message="Generated!", summarize=100) for inp in inps]
            return inps

    def disc_func(args, reuse=False):
        # import pdb; pdb.set_trace()
        """Discriminator function """
        with tf.variable_scope("discriminator", reuse=reuse):
            inp = tf.concat(args, axis=2)
            # inp = tf.Print(inp, [inp[0]], message="inp to disc", summarize=100)
            # inp = fully_connected(inp, 20, activation='elu')
            inp = fully_connected(inp, 10, activation='elu')
            inp = fully_connected(inp, n_samples, activation='sigmoid')
            return [inp]

    # Make a conditional generator from the inverse\
    num_non_param_in_ports = inv.num_in_ports() - inv.num_param_ports()
    g_theta = TfLambdaArrow(inv.num_in_ports() - inv.num_param_ports() + 1,
                            inv.num_param_ports(),
                            func=gen_func,
                            name="g_theta")
    cond_gen = g_from_g_theta(inv, g_theta)

    disc = TfLambdaArrow(nitems, 1, func=disc_func, name="disc")
    gan_arr = set_gan_arrow(fwd,
                            cond_gen,
                            disc,
                            n_fake_samples,
                            2,
                            x_shapes=[(batch_size, 1) for i in range(nitems)],
                            z_shape=(batch_size, 1))

    return gan_arr
コード例 #3
0
ファイル: gan.py プロジェクト: llwu/reverseflow
def set_gan_nn_arrow(options):
    """Test Gan on fwd function f(x) = x"""
    fwd_arr = IdentityArrow()
    n_fake_samples = options['n_fake_samples']
    n_samples = n_fake_samples + 1
    batch_size = options['batch_size']

    def gen_func(args):
        """Generator function"""
        with tf.variable_scope("generator", reuse=False):
            # inp = tf.concat(args, axis=1)
            inp = args[0]
            inp = fully_connected(inp, 10, activation='elu')
            # inp = batch_normalization(inp)
            # inp = fully_connected(inp, 10, activation='elu')
            # inp = batch_normalization(inp)
            inp = fully_connected(inp, 1, activation='sigmoid')
            # inp = batch_normalization(inp)
            return [inp]

    # def gen_func(args):
    #   """Generator function"""
    #   with tf.variable_scope("generator", reuse=False):
    #     return [args[0]+0.2]

    def disc_func(args):
        """Discriminator function"""
        with tf.variable_scope("discriminator", reuse=False):
            assert len(args) == 1
            inp = args[0]
            inp = fully_connected(inp, 5, activation='elu')
            # inp = batch_normalization(inp)
            inp = fully_connected(inp, 5, activation='elu')
            # inp = batch_normalization(inp)
            inp = args[0]
            inp = fully_connected(inp, n_samples, activation='sigmoid')
            return [inp]

    cond_gen = TfLambdaArrow(2, 1, func=gen_func)
    disc = TfLambdaArrow(1, 1, func=disc_func)
    gan_arr = set_gan_arrow(fwd_arr,
                            cond_gen,
                            disc,
                            n_fake_samples,
                            2,
                            x_shape=(batch_size, 1),
                            z_shape=(batch_size, 1))

    return gan_arr
コード例 #4
0
ファイル: voxel_render.py プロジェクト: llwu/reverseflow
def tensor_to_sup_right_inv(output_tensors: Sequence[Tensor], options):
    """Convert outputs from tensoflow graph into supervised loss arrow"""
    fwd = graph_to_arrow(output_tensors, name="render")
    inv = invert(fwd)
    inv_arrow = inv_fwd_loss_arrow(fwd, inv)
    info = propagate(inv_arrow)

    def g_tf(args, reuse=False):
        eps = 1e-3
        """Tensorflow map from image to pi parameters"""
        shapes = [info[port]['shape'] for port in inv_arrow.param_ports()]
        inp = args[0]
        width, height = getn(options, 'width', 'height')
        inp = tf.reshape(inp, (-1, width, height))
        inp = tf.expand_dims(inp, axis=3)
        from tflearn.layers import conv_2d, fully_connected

        tf.summary.image("g_tf_output", inp)
        # Do convolutional layers
        nlayers = 2
        for i in range(nlayers):
            inp = conv_2d(inp, nb_filter=4, filter_size=1, activation="elu")

        inp = conv_2d(inp,
                      nb_filter=options['nsteps'],
                      filter_size=1,
                      activation="sigmoid")
        out = []
        for i, shape in enumerate(shapes):
            this_inp = inp[:, :, :, i:i + 1]
            if shape[1] == width * height:
                this_inp = tf.reshape(this_inp,
                                      (options['batch_size'], -1)) + eps
                out.append(this_inp)
            else:
                r_length = int(np.ceil(np.sqrt(shape[1])))
                rs = tf.image.resize_images(this_inp, (r_length, r_length))
                rs = tf.reshape(rs, (options['batch_size'], -1)) + eps
                out.append(rs[:, 0:shape[1]])
        return out

    g_arrow = TfLambdaArrow(1,
                            inv_arrow.num_param_ports(),
                            func=g_tf,
                            name="img_to_theta")
    right_inv = unparam(inv_arrow, nnet=g_arrow)

    # 1. Attach voxel input to right_inverse to feed in x data
    fwd2 = deepcopy(fwd)
    data_right_inv = comp(fwd2, right_inv)
    # sup_right_inv = supervised_loss_arrow(right_inv)
    return data_right_inv
コード例 #5
0
def gan_shopping_arrow_compare(nitems: int, options) -> CompositeArrow:
    """Comparison for Gan, g is straight neural network"""
    n_fake_samples = options['n_fake_samples']
    n_samples = n_fake_samples + 1
    batch_size = options['batch_size']
    # nitems = options['nitems']
    fwd = SumNArrow(nitems)

    def gen_func(args, reuse=False):
        # import pdb; pdb.set_trace()
        """Generator function"""
        with tf.variable_scope("generator", reuse=reuse):
            inp = tf.concat(args, axis=1)
            inp = fully_connected(inp, nitems, activation='elu')
            inps = tf.split(inp, axis=1, num_or_size_splits=nitems)
            return inps

    def disc_func(args, reuse=False):
        # import pdb; pdb.set_trace()
        """Discriminator function """
        with tf.variable_scope("discriminator", reuse=reuse):
            inp = tf.concat(args, axis=2)
            inp = fully_connected(inp, 10, activation='elu')
            inp = fully_connected(inp, n_samples, activation='sigmoid')
            return [inp]

    cond_gen = TfLambdaArrow(2, nitems, func=gen_func, name="cond_gen")
    disc = TfLambdaArrow(nitems, 1, func=disc_func, name="disc")
    gan_arr = set_gan_arrow(fwd,
                            cond_gen,
                            disc,
                            n_fake_samples,
                            2,
                            x_shapes=[(batch_size, 1) for i in range(nitems)],
                            z_shape=(batch_size, 1))

    return gan_arr
コード例 #6
0
ファイル: gan.py プロジェクト: llwu/reverseflow
def GanLossArrow(nsamples):
    def func(args, eps=1e-6, reuse=False):
        """Do Gan Loss in TensorFlow
    Args:
      x : (batch_size, nsamples) discriminator output
      perm: (namples)
    Returns:
      (nsamples) - Generator loss (per batch)
      (nsamples) - Discriminator loss (per batch)
    """
        with tf.name_scope("ganloss"):
            assert len(args) == 2
            xs = args[0]
            perm = args[1]

            # Only the tensor as position nsamples is authentic
            is_auth = tf.equal(perm, nsamples - 1)

            # xs = tf.Print(xs, [xs], message="xs", summarize=1000)
            is_auth = tf.Print(is_auth, [is_auth, perm] +
                               [xs[0, i] for i in range(nsamples)])
            xs = tf.split(xs, axis=1, num_or_size_splits=nsamples)
            assert len(xs) == nsamples
            losses_d = []
            losses_g = []
            for i in range(nsamples):
                x = xs[i]

                def lambda_log(v):
                    return lambda: tf.log(v)

                # FIXME: Redundant computation
                losses_d.append(
                    tf.cond(is_auth[i], lambda_log(x + eps),
                            lambda_log(1 - x + eps)))
                losses_g.append(
                    tf.cond(is_auth[i], lambda: tf.zeros_like(x),
                            lambda_log(x + eps)))

            loss_d = tf.concat(losses_d, axis=1)
            loss_g = tf.concat(losses_g, axis=1)
            loss_d = tf.Print(loss_d, [loss_d[0], loss_g[0]], message="losses")
            sum_loss_d = tf.reduce_sum(loss_d, axis=1)
            sum_loss_g = tf.reduce_sum(loss_g, axis=1)
            return [sum_loss_d, sum_loss_g]

    return TfLambdaArrow(2, 2, func=func, name="ganloss")
コード例 #7
0
ファイル: voxel_render.py プロジェクト: llwu/reverseflow
def right_inv_nnet(output_tensors: Sequence[Tensor], options):
    """Convert outputs from tensoflow graph into supervised loss arrow"""
    fwd = graph_to_arrow(output_tensors, name="render")
    info = propagate(fwd)

    def g_tf(args, reuse=False):
        eps = 1e-3
        """Tensorflow map from image to pi parameters"""
        shapes = [info[port]['shape'] for port in fwd.in_ports()]
        inp = args[0]
        width, height = getn(options, 'width', 'height')
        inp = tf.reshape(inp, (-1, width, height))
        inp = tf.expand_dims(inp, axis=3)
        from tflearn.layers import conv_2d, fully_connected
        tf.summary.image("g_tf_output", inp)

        # Do convolutional layers
        nlayers = 2
        for i in range(nlayers):
            inp = conv_2d(inp, nb_filter=4, filter_size=1, activation="elu")

        ratio = width / options['res']
        inp = conv_2d(inp,
                      nb_filter=options['res'],
                      filter_size=1,
                      strides=int(ratio),
                      activation="sigmoid")
        return [tf.reshape(inp, (options['batch_size'], -1))]

    from arrows import CompositeArrow, Arrow

    def wrap(a: Arrow):
        """Wrap an arrow in a composite arrow"""
        c = CompositeArrow(name=a.name)
        for port in a.ports():
            c_port = c.add_port()
            if is_in_port(port):
                make_in_port(c_port)
                c.add_edge(c_port, port)
            if is_param_port(port):
                make_param_port(c_port)
            if is_out_port(port):
                make_out_port(c_port)
                c.add_edge(port, c_port)
            if is_error_port(port):
                make_error_port(c_port)
            transfer_labels(port, c_port)

        assert c.is_wired_correctly()
        return c

    right_inv = TfLambdaArrow(fwd.num_out_ports(),
                              fwd.num_in_ports(),
                              func=g_tf,
                              name="img_to_voxels")
    right_inv = wrap(right_inv)
    for i, in_port in enumerate(fwd.in_ports()):
        set_port_shape(right_inv.out_port(i), get_port_shape(in_port, info))

    for i, out_port in enumerate(fwd.out_ports()):
        set_port_shape(right_inv.in_port(i), get_port_shape(out_port, info))

    inv_arrow = inv_fwd_loss_arrow(fwd, right_inv)
    # 1. Attach voxel input to right_inverse to feed in x data
    fwd2 = deepcopy(fwd)
    data_right_inv = comp(fwd2, inv_arrow)
    return data_right_inv
コード例 #8
0
def test_set_gan_nn_arrow(options):
    fwd = IdentityArrow()
    n_fake_samples = 1
    n_samples = n_fake_samples + 1

    def gen_func(args):
        """Generator function"""
        with tf.variable_scope("generator", reuse=False):
            inp = tf.concat(args, axis=1)
            inp = fully_connected(inp, 1, activation='elu')
            inp = batch_normalization(inp)
            inp = fully_connected(inp, 1, activation='elu')
            inp = batch_normalization(inp)
            return [inp]

    # def gen_func(args):
    #   """Generator function"""
    #   with tf.variable_scope("generator", reuse=False):
    #     return [args[0]]

    def disc_func(args):
        """Discriminator function"""
        with tf.variable_scope("discriminator", reuse=False):
            assert len(args) == 1
            inp = args[0]
            l1 = fully_connected(inp, n_samples, activation='sigmoid')
            return [l1]

    cond_gen = TfLambdaArrow(2, 1, func=gen_func)
    disc = TfLambdaArrow(1, 1, func=disc_func)
    gan_arr = set_gan_arrow(fwd, cond_gen, disc, n_fake_samples, 2)
    x = np.array([1.0])
    z = np.array([0.5])
    perm = np.array([1, 0])

    batch_size = 64
    set_port_shape(gan_arr.in_port(0), (batch_size, 1))
    set_port_shape(gan_arr.in_port(1), (batch_size, 1))
    set_port_shape(gan_arr.in_port(2), (n_samples, ))
    set_port_dtype(gan_arr.in_port(2), 'int32')

    with tf.name_scope(gan_arr.name):
        input_tensors = gen_input_tensors(gan_arr, param_port_as_var=False)
        output_tensors = arrow_to_graph(gan_arr, input_tensors)

    x_ten, z_ten, perm_ten = input_tensors
    d_loss, g_loss, fake_x_1 = output_tensors
    d_loss = tf.reduce_mean(-d_loss)
    g_loss = tf.reduce_mean(-g_loss)
    # fetch = {'d_loss': d_loss, 'g_loss': g_loss, 'x_ten': x_ten, 'fake': fake_x_1}
    fetch = {'d_loss': d_loss, 'g_loss': g_loss}
    sess = tf.Session()
    x = np.random.rand(16, 1)
    z = np.random.rand(16, 1)
    # output_data = sess.run(fetch,
    #                        feed_dict={x_ten: x, z_ten: z, perm_ten: perm})

    losses = {'d_loss': d_loss, 'g_loss': g_loss}
    options = {'learning_rate': 0.01, 'update': 'adam'}
    d_vars = get_variables('discriminator')
    g_vars = get_variables('generator')
    loss_updates = [
        updates(d_loss, d_vars, options=options)[1],
        updates(g_loss, g_vars, options=options)[1]
    ]

    fetch['check'] = tf.add_check_numerics_ops()
    loss_ratios = [1, 1]

    def train_gen():
        while True:
            x = np.random.rand(batch_size, 1)
            z = np.random.rand(batch_size, 1)
            perm = np.arange(n_samples)
            np.random.shuffle(perm)
            yield {x_ten: x, z_ten: z, perm_ten: perm}

    # Summaries
    summaries = variable_summaries(losses)
    writers = setup_file_writers('summaries', sess)
    options['writers'] = writers
    callbacks = [every_n(summary_writes, 25)]

    sess.run(tf.initialize_all_variables())
    train_loop(sess,
               loss_updates,
               fetch,
               train_generators=[train_gen()],
               test_generators=None,
               num_iterations=100000,
               loss_ratios=loss_ratios,
               callbacks=callbacks,
               callbacks=None)
コード例 #9
0
ファイル: gan.py プロジェクト: llwu/reverseflow
def gan_renderer_arrow(options):
    """Gan on renderer"""
    n_fake_samples = options['n_fake_samples']
    n_samples = n_fake_samples + 1
    batch_size = options['batch_size']
    res = options['res']
    width = options['width']
    height = options['height']
    nvoxels = res * res * res
    npixels = width * height

    from voxel_render import test_invert_render_graph
    fwd, inv = test_invert_render_graph(options)
    from arrows.apply.propagate import propagate
    info = propagate(inv)

    def gen_func(args):
        """Generator function"""
        with tf.variable_scope("generator", reuse=False):
            # inp = tf.concat(args, axis=1
            shapes = [info[port]['shape'] for port in inv.param_ports()]
            inp = args[0]
            inp = fully_connected(inp, 2, activation='elu')
            return [
                fully_connected(inp, shape[1], activation='elu')
                for shape in shapes
            ]

    def disc_func(args):
        """Discriminator function"""
        with tf.variable_scope("discriminator", reuse=False):
            assert len(args) == 1
            inp = args[0]
            inp = fully_connected(inp, n_samples, activation='sigmoid')
            return [inp]

    # Make a conditional generator from the inverse\
    g_theta = TfLambdaArrow(inv.num_in_ports() - inv.num_param_ports() + 1,
                            inv.num_param_ports(),
                            func=gen_func)
    cond_gen = g_from_g_theta(inv, g_theta)

    disc = TfLambdaArrow(1, 1, func=disc_func)

    def train_gen():
        """Generator for x, z and permutation"""
        from wacacore.util.generators import infinite_batches
        from voxel_helpers import model_net_40
        voxel_data = model_net_40()
        x_gen = infinite_batches(voxel_data, batch_size=batch_size)
        while True:
            x = next(x_gen)
            x = x.reshape(batch_size, -1)
            z = np.random.rand(batch_size, 1)
            perm = np.arange(n_samples)
            np.random.shuffle(perm)
            yield {x_ten: x, z_ten: z, perm_ten: perm}

    gan_arr = set_gan_arrow(fwd,
                            cond_gen,
                            disc,
                            n_fake_samples,
                            2,
                            x_shape=(batch_size, nvoxels),
                            z_shape=(batch_size, 1))

    return gan_arr