Example #1
0
def make_video(output_video_name):
    v = imutil.VideoMaker(output_video_name)
    for i in range(100):
        theta = abs(i - 50) / 50.
        z = theta * fixed_z + (1 - theta) * fixed_zprime
        samples = generator(z).cpu().data.numpy()
        pixels = samples.transpose((0,2,3,1)) * 0.5 + 0.5
        v.write_frame(pixels)
    v.finish()
Example #2
0
 def __init__(self, render=False, video_filename=None, verbose=False, num_players=2):
     if video_filename:
         render = True
     self.render = render
     self.num_players = num_players
     self.sc2env = make_sc2env(num_players, render=render)
     self.video = None
     if video_filename:
         self.video = imutil.VideoMaker(filename=video_filename)
     self.verbose = verbose
Example #3
0
def make_video(output_video_name, trajectory, whatif=""):
    print('Generating video from trajectory shape {}'.format(trajectory.shape))
    generator.eval()
    v = imutil.VideoMaker(output_video_name)

    z_0 = torch.Tensor(trajectory[0]).to(device)
    original_samples = generator(z_0)[0]
    original_qvals = value_estimator(z_0)[0]
    left_pixels = format_demo_img(to_np(original_samples),
                                  to_np(original_qvals), 'Reality')
    for z in torch.Tensor(trajectory):
        z = z.to(device)
        samples = generator(z)[0]
        qvals = value_estimator(z)[0]
        right_pixels = format_demo_img(to_np(samples), to_np(qvals),
                                       'What If: {}'.format(whatif))
        pixels = np.concatenate([left_pixels, right_pixels], axis=1)
        v.write_frame(pixels)
    v.finish()
Example #4
0
def main():
    print('building model...')
    Z_dim = args.latent_dim

    generator = model.Generator(Z_dim).cuda()

    print('Loading model...')
    generator.load_state_dict(torch.load(args.generator_filename))
    print('Loaded model')

    output_video_name = args.video_name
    fixed_z = Variable(torch.randn(1, Z_dim).cuda())
    fixed_zprime = Variable(torch.randn(1, Z_dim).cuda())

    v = imutil.VideoMaker(output_video_name)
    for i in range(400):
        theta = abs(i - 200) / 200.
        z = theta * fixed_z + (1 - theta) * fixed_zprime
        samples = generator(z[0]).cpu().data.numpy()
        samples = samples.transpose((0,2,3,1))
        v.write_frame(samples)
    v.finish()
Example #5
0
import imutil
from atari_dataloader import AtariDataloader

loader1 = AtariDataloader(name='Pong-v0', batch_size=1)
loader2 = AtariDataloader(name='Pong-v0', batch_size=4)
loader3 = AtariDataloader(name='Pong-v0', batch_size=9)
loader4 = AtariDataloader(name='Pong-v0', batch_size=16)
vid = imutil.VideoMaker('pongtest')

for _ in range(400):
    x, y = next(loader1)
    vid.write_frame(x, caption=str(y[0]))

for _ in range(300):
    x, y = next(loader2)
    vid.write_frame(x, caption=str(y[:2]))

for _ in range(200):
    x, y = next(loader3)
    vid.write_frame(x, caption=str(y[:4]))

for _ in range(100):
    x, y = next(loader4)
    vid.write_frame(x, caption=str(y[:8]))

vid.finish()
Example #6
0
# ok now, can the network learn the task?
latent_size = 4
num_actions = 4
data = build_dataset(num_actions)
encoder = Encoder(latent_size)
decoder = Decoder(latent_size)
transition = Transition(latent_size, num_actions)
opt_encoder = optim.Adam(encoder.parameters(), lr=0.0001)
opt_decoder = optim.Adam(decoder.parameters(), lr=0.0001)
opt_transition = optim.Adam(transition.parameters(), lr=0.0001)

iters = 100 * 1000
ts = TimeSeries('Training', iters)

vid = imutil.VideoMaker('causal_model.mp4')
for i in range(iters):
    opt_encoder.zero_grad()
    opt_decoder.zero_grad()
    opt_transition.zero_grad()

    before, actions, target = get_batch(data)

    # Just try to autoencode
    z = encoder(before)
    z_prime = transition(z, actions)
    predicted = decoder(z_prime)

    pred_loss = F.binary_cross_entropy(predicted, target)
    #pred_loss = torch.mean((predicted - target) ** 2)
    ts.collect('Reconstruction loss', pred_loss)