def grasp(): # Load image logging.info('Loading image...') pic = Image.open('rgb.png', 'r') rgb = np.array(pic) pic = Image.open('depth.png', 'r') depth = np.expand_dims(np.array(pic), axis=2) # Load Network logging.info('Loading model...') net = torch.load(args.network, map_location=torch.device('cpu')) logging.info('Done') # Get the compute device device = get_device(False) img_data = CameraData(include_depth=1, include_rgb=1) x, depth_img, rgb_img = img_data.get_data(rgb=rgb, depth=depth) with torch.no_grad(): xc = x.to(device) pred = net.predict(xc) q_img, ang_img, width_img = post_process_output( pred['pos'], pred['cos'], pred['sin'], pred['width']) if args.save: save_results(rgb_img=img_data.get_rgb(rgb, False), depth_img=np.squeeze(img_data.get_depth(depth)), grasp_q_img=q_img, grasp_angle_img=ang_img, no_grasps=args.n_grasps, grasp_width_img=width_img) else: fig = plt.figure(figsize=(12, 3)) gs = plot_results(fig=fig, rgb_img=img_data.get_rgb(rgb, False), grasp_q_img=q_img, grasp_angle_img=ang_img, no_grasps=args.n_grasps, grasp_width_img=width_img) fig.savefig('img_result.png')
def predict_grasp_angle(network, rgb_path, depth_path): #args = parse_args() #network = "trained-models/cornell-randsplit-rgbd-grconvnet3-drop1-ch16/epoch_30_iou_0.97" #rgb_path = "C:/Users/yashs/OneDrive/Desktop/PS simulation/rgbd_images/color8.jpeg" #depth_path = "C:/Users/yashs/OneDrive/Desktop/PS simulation/rgbd_images/depth8.jpeg" use_depth = 1 use_rgb = 1 n_grasps = 1 save = 0 force_cpu = False # Load image logging.info('Loading image...') pic = Image.open(rgb_path, 'r') rgb = np.array(pic) pic = Image.open(depth_path, 'r') depth = np.expand_dims(np.array(pic), axis=2) # Load Network logging.info('Loading model...') net = torch.load(network,map_location=torch.device('cpu')) logging.info('Done') # Get the compute device device = get_device(force_cpu) img_data = CameraData(include_depth=use_depth, include_rgb=use_rgb) x, depth_img, rgb_img = img_data.get_data(rgb=rgb, depth=depth) with torch.no_grad(): xc = x.to(device) pred = net.predict(xc) q_img, ang_img, width_img = post_process_output(pred['pos'], pred['cos'], pred['sin'], pred['width']) #print(pred['pos'].size()) #print(pred['pos']) #print(pred['cos']) #print(pred['sin']) #print(pred['width']) if save: save_results( rgb_img=img_data.get_rgb(rgb, False), depth_img=np.squeeze(img_data.get_depth(depth)), grasp_q_img=q_img, grasp_angle_img=ang_img, no_grasps=n_grasps, grasp_width_img=width_img ) else: fig = plt.figure(figsize=(10, 10)) gs=plot_results(fig=fig, rgb_img=img_data.get_rgb(rgb, False), grasp_q_img=q_img, grasp_angle_img=ang_img, no_grasps=n_grasps, grasp_width_img=width_img) fig.savefig('img_result.pdf') for g in gs: print(g.center) print(g.angle) print(g.length) print(g.width) return gs #predict_grasp_angle("trained-models/cornell-randsplit-rgbd-grconvnet3-drop1-ch16/epoch_notbest_17_iou_0.00", "C:/Users/yashs/OneDrive/Desktop/PS simulation/rgbd_images/color8.png", "C:/Users/yashs/OneDrive/Desktop/PS simulation/rgbd_images/depth8.png")
if args.jacquard_output and args.dataset != 'jacquard': raise ValueError( '--jacquard-output can only be used with the --dataset jacquard option.' ) if args.jacquard_output and args.augment: raise ValueError( '--jacquard-output can not be used with data augmentation.') return args if __name__ == '__main__': args = parse_args() # Get the compute device device = get_device(args.force_cpu) # Load Dataset logging.info('Loading {} Dataset...'.format(args.dataset.title())) Dataset = get_dataset(args.dataset) test_dataset = Dataset(args.dataset_path, ds_rotate=args.ds_rotate, random_rotate=args.augment, random_zoom=args.augment, include_depth=args.use_depth, include_rgb=args.use_rgb) indices = list(range(test_dataset.length)) split = int(np.floor(args.split * test_dataset.length)) if args.ds_shuffle: np.random.seed(args.random_seed)
def load_model(self): print('Loading model... ') self.model = torch.load(self.saved_model_path) # Get the compute device self.device = get_device(force_cpu=False)
def run(): args = parse_args() # Set-up output directories dt = datetime.datetime.now().strftime('%y%m%d_%H%M') net_desc = '{}_{}'.format(dt, '_'.join(args.description.split())) save_folder = os.path.join(args.logdir, net_desc) if not os.path.exists(save_folder): os.makedirs(save_folder) tb = tensorboardX.SummaryWriter(save_folder) # Save commandline args if args is not None: params_path = os.path.join(save_folder, 'commandline_args.json') with open(params_path, 'w') as f: json.dump(vars(args), f) # Initialize logging logging.root.handlers = [] logging.basicConfig( level=logging.INFO, filename="{0}/{1}.log".format(save_folder, 'log'), format= '[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s', datefmt='%H:%M:%S') # set up logging to console console = logging.StreamHandler() console.setLevel(logging.DEBUG) # set a format which is simpler for console use formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s') console.setFormatter(formatter) # add the handler to the root logger logging.getLogger('').addHandler(console) # Get the compute device device = get_device(args.force_cpu) # Load Dataset logging.info('Loading {} Dataset...'.format(args.dataset.title())) Dataset = get_dataset(args.dataset) dataset = Dataset(args.dataset_path, ds_rotate=args.ds_rotate, random_rotate=True, random_zoom=True, include_depth=args.use_depth, include_rgb=args.use_rgb) logging.info('Dataset size is {}'.format(dataset.length)) # Creating data indices for training and validation splits indices = list(range(dataset.length)) split = int(np.floor(args.split * dataset.length)) if args.ds_shuffle: np.random.seed(args.random_seed) np.random.shuffle(indices) train_indices, val_indices = indices[:split], indices[split:] logging.info('Training size: {}'.format(len(train_indices))) logging.info('Validation size: {}'.format(len(val_indices))) # Creating data samplers and loaders train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_indices) val_sampler = torch.utils.data.sampler.SubsetRandomSampler(val_indices) train_data = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, num_workers=args.num_workers, sampler=train_sampler) val_data = torch.utils.data.DataLoader(dataset, batch_size=1, num_workers=args.num_workers, sampler=val_sampler) logging.info('Done') # Load the network logging.info('Loading Network...') input_channels = 1 * args.use_depth + 3 * args.use_rgb network = get_network(args.network) net = network(input_channels=input_channels, dropout=args.use_dropout, prob=args.dropout_prob, channel_size=args.channel_size) net = net.to(device) logging.info('Done') if args.optim.lower() == 'adam': optimizer = optim.Adam(net.parameters()) elif args.optim.lower() == 'sgd': optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9) else: raise NotImplementedError('Optimizer {} is not implemented'.format( args.optim)) # Print model architecture. summary(net, (input_channels, 224, 224)) f = open(os.path.join(save_folder, 'arch.txt'), 'w') sys.stdout = f summary(net, (input_channels, 224, 224)) sys.stdout = sys.__stdout__ f.close() best_iou = 0.0 for epoch in range(args.epochs): logging.info('Beginning Epoch {:02d}'.format(epoch)) train_results = train(epoch, net, device, train_data, optimizer, args.batches_per_epoch, vis=args.vis) # Log training losses to tensorboard tb.add_scalar('loss/train_loss', train_results['loss'], epoch) for n, l in train_results['losses'].items(): tb.add_scalar('train_loss/' + n, l, epoch) # Run Validation logging.info('Validating...') test_results = validate(net, device, val_data) logging.info('%d/%d = %f' % (test_results['correct'], test_results['correct'] + test_results['failed'], test_results['correct'] / (test_results['correct'] + test_results['failed']))) # Log validation results to tensorbaord tb.add_scalar( 'loss/IOU', test_results['correct'] / (test_results['correct'] + test_results['failed']), epoch) tb.add_scalar('loss/val_loss', test_results['loss'], epoch) for n, l in test_results['losses'].items(): tb.add_scalar('val_loss/' + n, l, epoch) # Save best performing network iou = test_results['correct'] / (test_results['correct'] + test_results['failed']) if iou > best_iou or epoch == 0 or (epoch % 10) == 0: torch.save( net, os.path.join(save_folder, 'epoch_%02d_iou_%0.2f' % (epoch, iou))) best_iou = iou