Exemplo n.º 1
0
def pi_reparam(options):
    """Neural network enhanced Parametric inverse! to do supervised learning"""
    tf.reset_default_graph()
    batch_size = options['batch_size']
    model_tensorflow = options['model']
    gen_data = options['gen_data']
    phi_shape = options['phi_shape']
    n_links = options['n_links']

    arrow = gen_arrow(batch_size, model_tensorflow, options)
    inv_arrow = invert(arrow)
    inv_arrow = inv_fwd_loss_arrow(arrow, inv_arrow)
    rep_arrow = reparam(inv_arrow, (batch_size, ) + phi_shape)

    def sampler(*x):
        return np.random.rand(*x) * n_links

    frac_repeat = 0.25
    nrepeats = int(np.ceil(batch_size * frac_repeat))
    train_input1 = repeated_random(sampler, batch_size, nrepeats, shape=(1, ))
    train_input2 = repeated_random(sampler, batch_size, nrepeats, shape=(1, ))
    test_input1 = repeated_random(sampler, batch_size, nrepeats, shape=(1, ))
    test_input2 = repeated_random(sampler, batch_size, nrepeats, shape=(1, ))
    d = [p for p in inv_arrow.out_ports() if not is_error_port(p)]
    # plot_cb = plot_callback(batch_size)

    # callbacks = [] + options['callbacks']
    reparam_train(rep_arrow,
                  d, [train_input1, train_input2], [test_input1, test_input2],
                  options=options)
Exemplo n.º 2
0
def parametric_plot():
    n_param = 1000
    z_value = 5

    x = tf.placeholder(tf.float32, shape=(n_param**2, ))
    y = tf.placeholder(tf.float32, shape=(n_param**2, ))
    # w = tf.placeholder(tf.float32, shape=(n_param**2,))
    out = x * y + x
    arrow = graph_to_arrow(output_tensors=[out], input_tensors=[x, y])
    inv_arrow = invert(arrow)
    loss_arrow = inv_fwd_loss_arrow(arrow, inv_arrow)
    # import pdb; pdb.set_trace()

    z = z_value * np.ones(n_param**2)
    theta1 = np.repeat(np.linspace(0.5, 2.5, n_param), n_param)
    theta2 = np.tile(np.linspace(1, 3, n_param), n_param)
    outputs = apply(loss_arrow, inputs=[z, theta1, theta2])
    sub_arrow_loss = outputs[2]
    inv_fwd_loss = outputs[3]

    theta1 = theta1.reshape(n_param, n_param)
    theta2 = theta2.reshape(n_param, n_param)
    sub_arrow_loss = sub_arrow_loss.reshape((n_param, n_param))
    inv_fwd_loss = inv_fwd_loss.reshape((n_param, n_param))
    # print(sub_arrow_loss)
    # print(inv_fwd_loss)
    x = np.linspace(1, 3, n_param)
    y = np.linspace(0.5, 2.5, n_param)
    plt.xlabel('Parameter 1')
    plt.ylabel('Parameter 2')
    plt.title('Heatmap for inv_fwd_loss')
    # plt.imshow(inv_fwd_loss, extent=[5, 15, 5, 15], cmap='hot')
    plt.pcolormesh(x, y, inv_fwd_loss, cmap='hot')
    plt.colorbar()
    plt.show(block=True)
Exemplo n.º 3
0
def test_apply_backwards():
    orig = test_twoxyplusx()
    arrow = invert(orig)
    outputs = [
        np.random.randn(2, 2) for out_port in arrow.out_ports()
        if not is_error_port(out_port)
    ]
    return orig, arrow, outputs, apply_backwards(arrow, outputs)
Exemplo n.º 4
0
def gan_shopping_arrow_pi(nitems: int, options) -> CompositeArrow:
    """Gan on shopping basket"""
    n_fake_samples = options['n_fake_samples']
    n_samples = n_fake_samples + 1
    batch_size = options['batch_size']
    # nitems = options['nitems']
    fwd = SumNArrow(nitems)
    inv = invert(fwd)
    info = propagate(inv)

    def gen_func(args, reuse=False):
        # import pdb; pdb.set_trace()
        """Generator function"""
        with tf.variable_scope("generator", reuse=reuse):
            inp = tf.concat(args, axis=1)
            # inp = fully_connected(inp, 10, activation='elu')
            inp = fully_connected(inp, inv.num_param_ports(), activation='elu')
            inps = tf.split(inp,
                            axis=1,
                            num_or_size_splits=inv.num_param_ports())
            # inps = [tf.Print(inp, [inp[0]], message="Generated!", summarize=100) for inp in inps]
            return inps

    def disc_func(args, reuse=False):
        # import pdb; pdb.set_trace()
        """Discriminator function """
        with tf.variable_scope("discriminator", reuse=reuse):
            inp = tf.concat(args, axis=2)
            # inp = tf.Print(inp, [inp[0]], message="inp to disc", summarize=100)
            # inp = fully_connected(inp, 20, activation='elu')
            inp = fully_connected(inp, 10, activation='elu')
            inp = fully_connected(inp, n_samples, activation='sigmoid')
            return [inp]

    # Make a conditional generator from the inverse\
    num_non_param_in_ports = inv.num_in_ports() - inv.num_param_ports()
    g_theta = TfLambdaArrow(inv.num_in_ports() - inv.num_param_ports() + 1,
                            inv.num_param_ports(),
                            func=gen_func,
                            name="g_theta")
    cond_gen = g_from_g_theta(inv, g_theta)

    disc = TfLambdaArrow(nitems, 1, func=disc_func, name="disc")
    gan_arr = set_gan_arrow(fwd,
                            cond_gen,
                            disc,
                            n_fake_samples,
                            2,
                            x_shapes=[(batch_size, 1) for i in range(nitems)],
                            z_shape=(batch_size, 1))

    return gan_arr
Exemplo n.º 5
0
def tensor_to_sup_right_inv(output_tensors: Sequence[Tensor], options):
    """Convert outputs from tensoflow graph into supervised loss arrow"""
    fwd = graph_to_arrow(output_tensors, name="render")
    inv = invert(fwd)
    inv_arrow = inv_fwd_loss_arrow(fwd, inv)
    info = propagate(inv_arrow)

    def g_tf(args, reuse=False):
        eps = 1e-3
        """Tensorflow map from image to pi parameters"""
        shapes = [info[port]['shape'] for port in inv_arrow.param_ports()]
        inp = args[0]
        width, height = getn(options, 'width', 'height')
        inp = tf.reshape(inp, (-1, width, height))
        inp = tf.expand_dims(inp, axis=3)
        from tflearn.layers import conv_2d, fully_connected

        tf.summary.image("g_tf_output", inp)
        # Do convolutional layers
        nlayers = 2
        for i in range(nlayers):
            inp = conv_2d(inp, nb_filter=4, filter_size=1, activation="elu")

        inp = conv_2d(inp,
                      nb_filter=options['nsteps'],
                      filter_size=1,
                      activation="sigmoid")
        out = []
        for i, shape in enumerate(shapes):
            this_inp = inp[:, :, :, i:i + 1]
            if shape[1] == width * height:
                this_inp = tf.reshape(this_inp,
                                      (options['batch_size'], -1)) + eps
                out.append(this_inp)
            else:
                r_length = int(np.ceil(np.sqrt(shape[1])))
                rs = tf.image.resize_images(this_inp, (r_length, r_length))
                rs = tf.reshape(rs, (options['batch_size'], -1)) + eps
                out.append(rs[:, 0:shape[1]])
        return out

    g_arrow = TfLambdaArrow(1,
                            inv_arrow.num_param_ports(),
                            func=g_tf,
                            name="img_to_theta")
    right_inv = unparam(inv_arrow, nnet=g_arrow)

    # 1. Attach voxel input to right_inverse to feed in x data
    fwd2 = deepcopy(fwd)
    data_right_inv = comp(fwd2, right_inv)
    # sup_right_inv = supervised_loss_arrow(right_inv)
    return data_right_inv
Exemplo n.º 6
0
def execute_pi(x, y, lr=0.01, n_steps=200, n_execs=1):
    initial_losses = []
    optimal_losses = []
    for _ in range(n_execs):
        xtf = tf.constant(x)
        ytf = tf.constant(y)
        alpha = tf.placeholder(tf.float64, shape=())
        beta = tf.placeholder(tf.float64, shape=())
        arrow = graph_to_arrow(robot_arm(alpha, beta), [alpha, beta])
        print(arrow.in_ports(), arrow.out_ports())
        print('got the arrow from tensorflow graph...')
        inv_arrow = invert(arrow)
        print(inv_arrow.in_ports(), inv_arrow.out_ports())
        print('inverted the arrow...')
        in_tensors = gen_input_tensors(inv_arrow)
        print('generated the input tensors...')
        inv_tf = arrow_to_graph(inv_arrow, in_tensors)
        print('got the tensorflow graph of the inverted arrow...')

    return initial_losses, optimal_losses
Exemplo n.º 7
0
def pi_supervised(options):
    """Neural network enhanced Parametric inverse! to do supervised learning
    Args:
      batch_size: the batch_size
      model_tensorflow: f: options -> {'inputs': inp_tensors, 'outputs': out_tensors}
      gen_data
    """
    tf.reset_default_graph()
    batch_size = options['batch_size']
    model_tensorflow = options['model']
    gen_data = options['gen_data']

    arrow = gen_arrow(batch_size, model_tensorflow, options)
    inv_arrow = invert(arrow)
    inv_arrow = inv_fwd_loss_arrow(arrow, inv_arrow)
    right_inv = unparam(inv_arrow)
    sup_right_inv = supervised_loss_arrow(right_inv)

    # Get training and test_data
    train_data = gen_data(batch_size, model_tensorflow, options)
    test_data = gen_data(batch_size, model_tensorflow, options)

    # Have to switch input from output because data is from fwd model
    train_input_data = train_data['outputs']
    train_output_data = train_data['inputs']
    test_input_data = test_data['outputs']
    test_output_data = test_data['inputs']
    num_params = get_tf_num_params(right_inv)
    import pdb
    pdb.set_trace()
    print("Number of params", num_params)
    # print("NNet Number of params", num_params)
    supervised_train(
        sup_right_inv,
        train_input_data,
        train_output_data,
        test_input_data,
        test_output_data,
        callbacks=[save_every_n, save_everything_last, save_options],
        options=options)
Exemplo n.º 8
0
def test_batch_apply_backwards():
    orig = test_twoxyplusx()
    inv = invert(orig)
    inputs = [[np.random.randn(2, 2) for in_port in orig.in_ports()]
              for i in range(10)]
    return orig, inv, from_input_list(orig, inv, inputs)
Exemplo n.º 9
0
from reverseflow.invert import invert
import tensorflow as tf
from tensorflow import float32

# f(x,y) = x * y + x
tf.reset_default_graph()
g = tf.get_default_graph()

with g.name_scope("fwd_g"):
    x = tf.placeholder(float32, name="x", shape=())
    y = tf.placeholder(float32, name="y", shape=())

z = x * y + x
g = tf.get_default_graph()
inv_g, inputs, out_map = invert(({'z': z}))

tf.reset_default_graph()
g = tf.get_default_graph()

with g.name_scope("fwd_g"):
    x = tf.placeholder(float32, name="x", shape=())
    y = tf.placeholder(float32, name="y", shape=())
    z = tf.placeholder(float32, name="z", shape=())

    e = x * y
    f = x - y
    o1 = (e + 2 * f) + (3 * z)
    o2 = e + f

inv_g, inputs, out_map, = invert({'o1': o1, "o2": o2})
# print(out_map)
Exemplo n.º 10
0
def test_invert_render_graph(options):
    out_img = render_gen_graph(options)['out_img']
    arrow_renderer = graph_to_arrow([out_img], name="renderer")
    inv_renderer = invert(arrow_renderer)
    return arrow_renderer, inv_renderer
Exemplo n.º 11
0
def test_closure():
    arrow = test_twoxyplusx()
    inv_arrow = invert(arrow)
    return inv_arrow