def draw_pred_gif_old(self, full_getter, p=1.0, use_pf=False, sim_config=None, use_stepper=False, folder_plots=FOLDER_PLOTS, tag=0, normalize=False, nice_start=True): ep_images, poses = full_getter() ep_images = ep_images[0, ...].reshape((1,) + ep_images.shape[1:]) ep_images_masked, removed_percepts = self.mask_percepts(ep_images, p, return_indices=True) # net_preds = self.pred_ae.predict(ep_images_masked[:, 0:-SERIES_SHIFT, ...]) net_preds = self.pred_ae.predict(ep_images_masked[:, :, ...]) # stepper predictions stepper_pred = [] if use_stepper: self.stepper.reset_states() for t in range(EP_LEN-SERIES_SHIFT): im = ep_images_masked[:, t, ...] stepper_pred.append(self.stepper.predict(im)) pf_pred = [] if use_pf: pf = ParticleFilter(sim_config, n_particles=4000, nice_start=nice_start) for t in range(EP_LEN-SERIES_SHIFT): if not removed_percepts[t]: pose = poses[0, t, 0, :] pf.update(pose) pf.resample() # add noise only if next percept is available # if t+1 < EP_LEN-SERIES_SHIFT and not removed_percepts[t+1]: # pf.resample() # pf.add_noise() pf.predict() pf_pred.append(pf.draw()) # create header with labels col = np.zeros((HEADER_HEIGHT, 1)) labels = [] labels.append(create_im_label('Ob')) labels.append(create_im_label('GT')) labels.append(create_im_label('AE')) if use_stepper: labels.append(create_im_label('ST')) if use_pf: labels.append(create_im_label('PF')) header = [col] for label in labels: header.append(label) header.append(col) header = np.concatenate(header, axis=1) # combine predictions col = np.ones((IM_HEIGHT, 1)) frames = [] for t in range(EP_LEN - SERIES_SHIFT): images = [] images.append(ep_images_masked[0, t+SERIES_SHIFT, :, :, 0]) images.append(ep_images[0, t+SERIES_SHIFT, :, :, 0]) if normalize: net_preds[0, t, :, :, 0] /= np.max(net_preds[0, t, :, :, 0]) images.append(net_preds[0, t, :, :, 0]) if use_stepper: if normalize: stepper_pred[t][0, :, :, 0] /= np.max(stepper_pred[t][0, :, :, 0]) images.append(stepper_pred[t][0, :, :, 0]) if use_pf: if normalize: pf_pred[t][:, :, 0] /= np.max(pf_pred[t][:, :, 0]) images.append(pf_pred[t][:, :, 0]) table = [col] for image in images: table.append(image) table.append(col) frame = np.concatenate(table, axis=1) # print(frame.shape) width = frame.shape[1] row = np.ones((1, width)) frame = np.concatenate([header, frame, row], axis=0) frames.append(frame) fpath = '{}/predictions-{}.gif'.format(folder_plots, tag) imageio.mimsave(fpath, frames)
def pf_multi_run_plot(net, sim_conf, fpath='ims/last_test.csv', cuda=True, runs=10, p_mask=1.0, n_particles=100, gif_no=0): CONSISTENT_NOISE = False RUN_LENGTH = 160 DURATION = 0.4 N_SIZE = 256 pf_loss_ar = np.zeros(RUN_LENGTH) pae_loss_ar = np.zeros(RUN_LENGTH) for _ in range(runs): w = World(**sim_conf) pf = ParticleFilter(sim_conf, n_particles=n_particles) pos = [body.pos for body in w.bodies] vel = [body.vel for body in w.bodies] pf.warm_start(pos, vel=vel) ims_percept = [] ims_pf_belief = [] ims_pf_sample = [] loss_pf = [] loss_sample_mse = [] masked_percepts = np.zeros(RUN_LENGTH) < 1 for i in range(RUN_LENGTH): if i < 8 or np.random.rand() > p_mask: measures = [ body.pos + sim_conf['measurement_noise'] * np.random.randn(2) for body in w.bodies ] pf.update(measures) masked_percepts[i] = False pf.resample() else: masked_percepts[i] = True w.run() pf.predict() percept = w.draw() belief = pf.draw()[:, :, 0] sample = pf.parts[np.random.randint(pf.n)].draw() loss_pf.append(np.mean((percept - belief)**2)) loss_sample_mse.append(np.mean((percept - sample)**2)) ims_percept.append(percept) ims_pf_belief.append(belief) ims_pf_sample.append(sample) # run predictions with the network x = np.array(ims_percept) x = x.reshape((1, RUN_LENGTH, 28, 28, 1)) x[:, masked_percepts, ...] = 0 x = x.transpose((1, 0, 4, 2, 3)) x = Variable(torch.FloatTensor(x)) if cuda: net = net.cuda() x = x.cuda() states = net.bs_prop(x) # create expected observations obs_expectation = net.decoder(states) obs_expectation = obs_expectation.view(x.size()) obs_expectation = obs_expectation.data.cpu().numpy() obs_expectation = obs_expectation.reshape((RUN_LENGTH, 28, 28)) # create observation samples (constant or varying noise accross time) if CONSISTENT_NOISE is True: noise = Variable(torch.FloatTensor(1, N_SIZE)) noise.data.normal_(0, 1) noise = noise.expand(RUN_LENGTH, N_SIZE) else: noise = Variable(torch.FloatTensor(RUN_LENGTH, N_SIZE)) noise.data.normal_(0, 1) if cuda: noise = noise.cuda() # states_non_ep = states.unfold(0, 1, (EP_LEN*BATCH_SIZE)//GAN_BATCH_SIZE).squeeze(-1) pae_samples = net.G(noise, states.squeeze_(1)) pae_samples = net.decoder(pae_samples) pae_samples = pae_samples.view(x.size()) pae_samples = pae_samples.data.cpu().numpy() pae_samples = pae_samples.reshape((RUN_LENGTH, 28, 28)) pae_ims = [] pae_samples_ims = [] loss_pae = [] for i in range(RUN_LENGTH): pae_ims.append(obs_expectation[i, ...]) pae_samples_ims.append(pae_samples[i, ...]) loss_pae.append( np.mean((ims_percept[i] - obs_expectation[i, ...])**2)) pf_loss_ar += np.array(loss_pf) pae_loss_ar += np.array(loss_pae) ims_ar = np.array(ims_percept) av_pixel_intensity = np.mean(ims_ar) baseline_level = np.mean((ims_ar - av_pixel_intensity)**2) baseline = np.ones(len(loss_pf)) * baseline_level # print("Uninformative baseline level at {}".format(baseline_level)) pf_loss_ar /= runs pae_loss_ar /= runs baseline_ar = baseline df = pd.DataFrame({ 'pf loss': pf_loss_ar, 'pae loss': pae_loss_ar, 'baseline': baseline_ar }) df.to_csv(fpath) plt.plot(pf_loss_ar) plt.plot(pae_loss_ar) plt.plot(baseline, 'g--') plt.title("Image reconstruction loss vs timestep") plt.ylabel("loss (MSE)") plt.xlabel("timestep") plt.legend(["PF", "PAE", "baseline"], loc=4) plt.savefig("ims/{}-plot.png".format(gif_no)) # plt.show() plt.clf() imageio.mimsave("ims/{}-percept.gif".format(gif_no), ims_percept, duration=DURATION) imageio.mimsave("ims/{}-pf_belief.gif".format(gif_no), ims_pf_belief, duration=DURATION) imageio.mimsave("ims/{}-pf_sample.gif".format(gif_no), ims_pf_sample, duration=DURATION) imageio.mimsave("ims/{}-pae_belief.gif".format(gif_no), pae_ims, duration=DURATION) imageio.mimsave("ims/{}-pae_sample.gif".format(gif_no), pae_samples_ims, duration=DURATION) page = """ <html> <body> Configuration: {1} <br> <img src="{0}-plot.png" align="center"> <table> <tr> <th>Ground truth</th> <th>Particle Filter</th> <th>PF Sample</th> <th>Predictive AE</th> <th>PAE Sample</th> </tr> <tr> <td><img src="{0}-percept.gif" width="140"></td> <td><img src="{0}-pf_belief.gif" width="140"></td> <td><img src="{0}-pf_sample.gif" width="140"></td> <td><img src="{0}-pae_belief.gif" width="140"></td> <td><img src="{0}-pae_sample.gif" width="140"></td> </tr> </table> </body> </html> """.format(gif_no, sim_conf) with open("ims/page-{}.html".format(gif_no), 'w') as f: f.write(page)
def draw_pred_gif(self, full_getter, p=1.0, use_pf=False, sim_config=None, use_stepper=False, folder_plots=FOLDER_PLOTS, tag=0, normalize=False): ep_images, poses, eps_vels = full_getter() ep_images = ep_images[0, ...].reshape((1,) + ep_images.shape[1:]) ep_images_masked, removed_percepts = self.mask_percepts(ep_images, p, return_indices=True) # net_preds = self.pred_ae.predict(ep_images_masked[:, 0:-SERIES_SHIFT, ...]) net_preds = self.pred_ae.predict(ep_images_masked[:, :, ...]) # stepper predictions # stepper_pred = [] # if use_stepper: # self.stepper.reset_states() # for t in range(EP_LEN-SERIES_SHIFT): # im = ep_images_masked[:, t, ...] # stepper_pred.append(self.stepper.predict(im)) pf_pred = [] if use_pf: pf = ParticleFilter(sim_config, n_particles=3000) init_poses = poses[0][0] init_vels = eps_vels[0][0] pf.warm_start(init_poses, init_vels) for t in range(EP_LEN-SERIES_SHIFT): if not removed_percepts[t]: measurements = poses[0][t] # print(measurements) pf.update(measurements) pf.resample() pf_pred.append(pf.draw()) pf.predict() # combine predictions percepts = [] truths = [] pae_preds = [] pf_preds = [] pae_losses = [] pf_losses = [] for t in range(EP_LEN - SERIES_SHIFT): percepts.append(ep_images_masked[0, t+SERIES_SHIFT, :, :, 0]) truths.append(ep_images[0, t+SERIES_SHIFT, :, :, 0]) pae_preds.append(net_preds[0, t, :, :, 0]) if use_pf: pf_preds.append(pf_pred[t][:, :, 0]) pae_losses.append(np.mean((truths[-1] - pae_preds[-1])**2)) pf_losses.append(np.mean((truths[-1] - pf_preds[-1])**2)) if normalize: pae_preds[-1] /= np.max(pae_preds[-1]) if use_pf: pf_preds[-1] /= np.max(pf_preds[-1]) # if use_stepper: # if normalize: # stepper_pred[t][0, :, :, 0] /= np.max(stepper_pred[t][0, :, :, 0]) # images.append(stepper_pred[t][0, :, :, 0]) imageio.mimsave('{}/percepts-{}.gif'.format(folder_plots, tag), percepts) imageio.mimsave('{}/truths-{}.gif'.format(folder_plots, tag), truths) imageio.mimsave('{}/pae_preds-{}.gif'.format(folder_plots, tag), pae_preds) if use_pf: imageio.mimsave('{}/pf_preds-{}.gif'.format(folder_plots, tag), pf_preds) return {'pae_losses': pae_losses, 'pf_losses': pf_losses}
class Run: def __init__(self, factory): """Constructor. Args: factory (factory.FactoryCreate) """ self.create = factory.create_create() self.time = factory.create_time_helper() self.servo = factory.create_servo() self.sonar = factory.create_sonar() # Add the IP-address of your computer here if you run on the robot self.virtual_create = factory.create_virtual_create() self.odometry = odometry.Odometry() self.particle_filter = ParticleFilter() def sense(self): dist = self.sonar.get_distance() self.particle_filter.sense(dist) return dist def turn(self, deltaTheta): start = self.odometry.theta s = np.sign(deltaTheta) self.create.drive_direct(s * 100, s * -100) goalStart = deltaTheta + self.odometry.theta goalEnd = deltaTheta + self.odometry.theta + s * np.pi / 12 while True: theta = self.odometry.theta while s * theta < s * start: theta += s * 2 * np.pi if s * goalStart <= s * theta <= s * goalEnd: break self.update_odom() self.create.drive_direct(0, 0) self.particle_filter.turn(self.odometry.theta - start) def update_odom(self): state = self.create.update() if state is not None: self.odometry.update(state.leftEncoderCounts, state.rightEncoderCounts) self.time.sleep(.01) def forward(self, distance): start_x = self.odometry.x start_y = self.odometry.y dist_now = lambda: np.sqrt((start_x - self.odometry.x)**2 + (start_y - self.odometry.y)**2) s = np.sign(distance) self.create.drive_direct(s * 100, s * 100) while dist_now() < np.abs(distance): self.update_odom() self.create.drive_direct(0, 0) self.particle_filter.forward(dist_now()) def run(self): self.create.start() self.create.safe() self.create.start_stream([ create2.Sensor.LeftEncoderCounts, create2.Sensor.RightEncoderCounts, ]) self.update_odom() self.particle_filter.draw(self.virtual_create) while True: b = self.virtual_create.get_last_button() if b == self.virtual_create.Button.MoveForward: print("Forward pressed!") self.forward(0.25) elif b == self.virtual_create.Button.TurnLeft: print("Turn Left pressed!") self.turn(np.pi / 6.) elif b == self.virtual_create.Button.TurnRight: print("Turn Right pressed!") self.turn(-np.pi / 6.) elif b == self.virtual_create.Button.Sense: self.sense() print("Sense pressed!") if b is not None: self.particle_filter.draw(self.virtual_create) self.time.sleep(0.01)