def main(): alg = CMP(cfg) print('1. Model [{}]'.format('READY')) try: if args.robot: baxter = RemoteBaxter(args.ip, args.port) res = baxter.connect() if not res: print('2. Robot [{}]'.format('FAILED')) raise RuntimeError print('2. Robot [{}]'.format('READY')) print('Back to init pose..') baxter.gotoPose(init_position, init_orintation) else: print('2. Bypass Robot [{}]'.format('READY')) # cam_p = mp.Process(target=camera_process_opencv, args=(args.camera,)) cam_p = mp.Process(target=camera_process_imglist, args=(imgList, )) cam_p.start() img = cam_queue.get() if not img: print('3. Camera [{}]'.format('FAILED')) raise RuntimeError else: print('3. Camera [{}]'.format('READY')) print('4. Commander [{}]'.format('READY')) print('=======Command Mode=======') i = -1 while True: i += 1 print('------ Round {} ------'.format(i)) cmd = int(input('Cmd:')) if cmd == -1: print('User specified exit..') print('------ Round end ------\n') break elif 0 <= cmd < cfg.number_of_tasks: print('Start to compute traj for task [{}:{}]'.format( cmd, tasks[cmd])) cam_retrieve.value = 1 img = cam_queue.get() if img is False: print('Camera shut down, stop') print('------ Round end ------\n') break # TODO: pre-process image img = cv2.resize(img, cfg.image_size) / 255. img = img[..., [2, 1, 0]] position_traj = RBF.generate( alg.eval(img, cmd)[0], cfg.number_time_samples) position_traj = np.hstack([ position_traj, init_position[-1] * np.ones( (len(position_traj), 1)) ]) orintation_traj = init_orintation[np.newaxis, ...].repeat( len(position_traj), 0) print('Finish traj computation for task [{}]'.format(cmd)) # plot the traj and img display(cfg, [position_traj], [img], [0]) choice = input('Confirmed?(y/n):') if choice != 'y': print('Traj canceled') print('------ Round end ------\n') continue print('Traj confirmed') # confirmed the result if args.robot: start_t = time.time() res = baxter.followTraj(position_traj, orintation_traj, continuous=True) if res: print('Task [{}] exec success!'.format(cmd)) else: print('Task [{}] exec failed!'.format(cmd)) print('Time cost:{:.4f}s'.format(time.time() - start_t)) print('Back to init pose..') baxter.gotoPose(init_position, init_orintation) else: print('Bypass exec..') print('------ Round end ------\n') else: print('Please input an valid task number!') print('------ Round end ------\n') if args.robot: print('5. Disconnecting with robot..') baxter.close() else: print('5. Bypass disconnecting with robot..') except: print('Something going wrong, exit.') cam_exit.value = 1 sys.exit(1) print('6. Finish and clean-up [{}]'.format('DONE')) cam_exit.value = 1 sys.exit(0)
def test(self): def batchToVariable(traj_batch): batch_im = torch.zeros(self.cfg.batch_size_test, self.cfg.image_channels, self.cfg.image_size[0], self.cfg.image_size[1]) batch_z = torch.normal( torch.zeros(self.cfg.batch_size_test, self.cfg.number_of_hidden), torch.ones(self.cfg.batch_size_test, self.cfg.number_of_hidden)) batch_w = torch.zeros(self.cfg.batch_size_test, self.cfg.number_of_MP_kernels, self.cfg.trajectory_dimension) batch_target = torch.zeros(self.cfg.batch_size_test, 2) if self.cfg.img_as_task: batch_c = torch.zeros(self.cfg.batch_size_test, self.cfg.image_channels, self.cfg.object_size[0], self.cfg.object_size[1]) else: batch_c = torch.zeros(self.cfg.batch_size_test, self.cfg.number_of_tasks) for i, b in enumerate(traj_batch): batch_w[i] = torch.from_numpy(b[0]) batch_target[i] = torch.from_numpy(b[-1]) if self.cfg.img_as_task: batch_c[i] = torch.from_numpy(b[2].transpose(2, 0, 1)) batch_im[i] = torch.from_numpy(b[3].transpose(2, 0, 1)) else: batch_c[i, b[1]] = 1. batch_im[i] = torch.from_numpy(b[2].transpose(2, 0, 1)) if self.use_gpu: return torch.autograd.Variable(batch_z.cuda(), volatile=True),\ torch.autograd.Variable(batch_c.cuda(), volatile=True),\ torch.autograd.Variable(batch_im.cuda(), volatile=True),\ batch_target,\ batch_w else: return torch.autograd.Variable(batch_z, volatile=True),\ torch.autograd.Variable(batch_c, volatile=True),\ torch.autograd.Variable(batch_im, volatile=True),\ batch_target,\ batch_w for batch in generator_test: break _, c, im, target, wgt = batchToVariable(batch) im_c = self.condition_net(im, c) z = self.encoder.sample(None, im_c, reparameterization=False, prior=True) if self.cfg.use_DMP: p0 = np.tile( np.asarray((0., self.cfg.image_y_range[0]), dtype=np.float32), (self.cfg.batch_size_test, 1)) w = self.decoder.sample(z, im_c).cpu().data.numpy() tauo = tuple( dmp.generate(w, target.cpu().numpy(), self.cfg.number_time_samples, p0=p0, init=True)) tau = tuple( dmp.generate(wgt.cpu().numpy(), target.cpu().numpy(), self.cfg.number_time_samples, p0=p0, init=True)) else: tauo = tuple( RBF.generate(wo, self.cfg.number_time_samples) for wo in self.decoder.sample(z, im_c).cpu().data.numpy()) tau = tuple( RBF.generate(wo, self.cfg.number_of_MP_kernels) for wo in wgt) if self.cfg.img_as_task: _, cls, _, imo, _ = tuple(zip(*batch)) else: _, cls, imo, _ = tuple(zip(*batch)) env = self.cfg.env(self.cfg) img = display(self.cfg, tauo, imo, cls, interactive=True) img_gt = display(self.cfg, tau, imo, cls, interactive=True) feature = self.condition_net.feature_map(im).data.cpu().numpy() if self.cfg.img_as_task: return img, img_gt, feature, c else: return img, img_gt, feature