data = build_toy_dataset() # Set up figure fig = plt.figure(figsize=(8, 8), facecolor='white') ax = fig.add_subplot(111, frameon=False) plt.ion() plt.show(block=False) inference = ed.MFVI(model, variational, data) inference.initialize(n_print=5) sess = ed.get_session() for t in range(600): loss = inference.update() if t % inference.n_print == 0: print("iter {:d} loss {:.2f}".format(t, loss)) variational.print_params() # Sample functions from variational model mean, std = sess.run( [variational.layers[0].m, variational.layers[0].s]) rs = np.random.RandomState(0) zs = rs.randn(10, variational.num_vars) * std + mean zs = tf.constant(zs, dtype=tf.float32) inputs = np.linspace(-3, 3, num=400, dtype=np.float32) x = tf.expand_dims(tf.constant(inputs), 1) mus = tf.pack([model.mapping(x, z) for z in tf.unpack(zs)]) outputs = mus.eval() # Get data y, x = sess.run([data.data[:, 0], data.data[:, 1]])
data = build_toy_dataset() # Set up figure fig = plt.figure(figsize=(8, 8), facecolor='white') ax = fig.add_subplot(111, frameon=False) plt.ion() plt.show(block=False) inference = ed.MFVI(model, variational, data) sess = inference.initialize(n_print=5) # TODO it gets NaN's at iteration 608 and beyond for t in range(600): loss = inference.update(sess) if t % inference.n_print == 0: print("iter {:d} loss {:.2f}".format(t, loss)) variational.print_params(sess) # Sample functions from variational model mean, std = sess.run( [variational.layers[0].m, variational.layers[0].s]) rs = np.random.RandomState(0) zs = rs.randn(10, variational.num_vars) * std + mean zs = tf.constant(zs, dtype=tf.float32) inputs = np.linspace(-3, 3, num=400, dtype=np.float32) x = tf.expand_dims(tf.constant(inputs), 1) mus = tf.pack([model.mapping(x, z) for z in tf.unpack(zs)]) outputs = sess.run(mus) # Get data y, x = sess.run([data.data[:, 0], data.data[:, 1]])
data = build_toy_dataset() # Set up figure fig = plt.figure(figsize=(8,8), facecolor='white') ax = fig.add_subplot(111, frameon=False) plt.ion() plt.show(block=False) inference = ed.MFVI(model, variational, data) inference.initialize(n_print=5) sess = ed.get_session() for t in range(600): loss = inference.update() if t % inference.n_print == 0: print("iter {:d} loss {:.2f}".format(t, loss)) variational.print_params() # Sample functions from variational model mean, std = sess.run([variational.layers[0].m, variational.layers[0].s]) rs = np.random.RandomState(0) zs = rs.randn(10, variational.num_vars) * std + mean zs = tf.constant(zs, dtype=tf.float32) inputs = np.linspace(-3, 3, num=400, dtype=np.float32) x = tf.expand_dims(tf.constant(inputs), 1) mus = tf.pack([model.mapping(x, z) for z in tf.unpack(zs)]) outputs = mus.eval() # Get data y, x = sess.run([data.data[:, 0], data.data[:, 1]])
data = build_toy_dataset() # Set up figure fig = plt.figure(figsize=(8,8), facecolor='white') ax = fig.add_subplot(111, frameon=False) plt.ion() plt.show(block=False) inference = ed.MFVI(model, variational, data) sess = inference.initialize(n_print=5) # TODO it gets NaN's at iteration 608 and beyond for t in range(600): loss = inference.update(sess) if t % inference.n_print == 0: print("iter {:d} loss {:.2f}".format(t, loss)) variational.print_params(sess) # Sample functions from variational model mean, std = sess.run([variational.layers[0].m, variational.layers[0].s]) rs = np.random.RandomState(0) zs = rs.randn(10, variational.num_vars) * std + mean zs = tf.constant(zs, dtype=tf.float32) inputs = np.linspace(-3, 3, num=400, dtype=np.float32) x = tf.expand_dims(tf.constant(inputs), 1) mus = tf.pack([model.mapping(x, z) for z in tf.unpack(zs)]) outputs = sess.run(mus) # Get data y, x = sess.run([data.data[:, 0], data.data[:, 1]])