def infer_at_pose(self, images: List[OrthographicImage], pose: Affine): input_images = [] for image in images: draw_around_box(image, box=self.box) area_mat = get_area_of_interest_new( image, pose, size_cropped=(200, 200), size_result=(32, 32), border_color=image.value_from_depth( get_distance_to_box(image, self.box)), ).mat area_mat = np.array(area_mat, dtype=np.float32) / np.iinfo( area_mat.dtype).max # 2 * x - 1.0 area_mat_exp = np.expand_dims(area_mat, axis=-1) input_images.append(area_mat_exp) if self.monte_carlo: input_images_mc = [ np.array([image for i in range(self.monte_carlo)]) for image in input_images ] estimated_rewards_sampling = self.model.predict(input_images_mc) estimated_reward = np.mean(estimated_rewards_sampling, axis=0) estimated_reward_std = self.mutual_information( estimated_rewards_sampling) return estimated_reward, estimated_reward_std return self.model.predict([input_images])[0]
def get_images(self, orig_image: OrthographicImage): image = clone(orig_image) draw_around_box(image, box=self.box) background_color = image.value_from_depth( get_distance_to_box(image, self.box)) mat_image_resized = cv2.resize(image.mat, self.size_resized) mat_images = [] for a in self.a_space: rot_mat = cv2.getRotationMatrix2D( (self.size_resized[0] / 2, self.size_resized[1] / 2), a * 180.0 / np.pi, 1.0) rot_mat[:, 2] += [(self.size_rotated[0] - self.size_resized[0]) / 2, (self.size_rotated[1] - self.size_resized[1]) / 2] dst_depth = cv2.warpAffine(mat_image_resized, rot_mat, self.size_rotated, borderValue=background_color) mat_images.append(crop(dst_depth, self.size_cropped)) mat_images = np.array(mat_images) / np.iinfo(image.mat.dtype).max if len(mat_images.shape) == 3: mat_images = np.expand_dims(mat_images, axis=-1) # mat_images = 2 * mat_images - 1.0 return mat_images
def get_images(self, orig_image: OrthographicImage, draw_box=True, scale_around_zero=False): image = clone(orig_image) if draw_box: draw_around_box(image, box=self.box) mat_images = [] for a in self.a_space: rot_mat = cv2.getRotationMatrix2D( (self.size_input[0] / 2, self.size_input[1] / 2), a * 180.0 / np.pi, scale=self.size_output[0] / self.size_original_cropped[0], ) rot_mat[:, 2] += [(self.size_cropped[0] - self.size_input[0]) / 2, (self.size_cropped[1] - self.size_input[1]) / 2] dst_depth = cv2.warpAffine(image.mat, rot_mat, self.size_cropped, borderMode=cv2.BORDER_REPLICATE, flags=cv2.INTER_AREA) mat_images.append(dst_depth) mat_images = np.array(mat_images) / np.iinfo(orig_image.mat.dtype).max if len(mat_images.shape) == 3: mat_images = np.expand_dims(mat_images, axis=-1) if scale_around_zero: return 2 * mat_images - 1.0 return mat_images
def load_image(self, collection, episode_id, action_id, suffix): image = Loader.get_image(collection, episode_id, action_id, suffix, as_float=True) draw_around_box(image, box=Config.box) image.mat = cv2.resize(image.mat, (self.size_input[0] // self.size_memory_scale, self.size_input[1] // self.size_memory_scale)) image.pixel_size /= self.size_memory_scale return image
def api_image(collection_name: str, episode_id: str, action_id: str, suffix: str): def send_image(image): _, image_encoded = cv2.imencode('.jpg', image) return flask.send_file(io.BytesIO(image_encoded), mimetype='image/jpeg') def send_empty_image(): empty = np.zeros((480, 752, 1)) cv2.putText(empty, '?', (310, 300), cv2.FONT_HERSHEY_SIMPLEX, 6, 100, thickness=6) return send_image(empty) if flask.request.values.get('pose'): action = Action(data=json.loads(flask.request.values.get('pose'))) image = Loader.get_image(collection_name, episode_id, int(action_id), suffix, images=action.images) else: try: action, image = Loader.get_action(collection_name, episode_id, int(action_id), suffix) except Exception: app.logger.warn('Could not find image:', collection_name, episode_id, action_id, suffix) return send_empty_image() if suffix not in action.images.keys(): app.logger.warn( f'Could not find suffix {collection_name}-{episode_id}-{action_id}-{suffix}' ) return send_empty_image() draw_pose(image, action.pose, convert_to_rgb=True) # draw_pose(image, action.pose, convert_to_rgb=True, reference_pose=action.images[suffix]['pose']) if flask.request.values.get('box', default=0, type=int): draw_around_box(image, box=Config.box, draw_lines=True) return send_image(image.mat / 255)
import cv2 from config import Config from data.loader import Loader from utils.image import draw_around_box, draw_pose, get_area_of_interest_new if __name__ == '__main__': lateral = False suffix = 'ed-lateral_b-0_400' if lateral else 'ed-v' action, image = Loader.get_action('placing-3', '2019-12-12-16-07-12-857', 0, 'ed-v') # image = image.translate((0.0, 0.0, 0.05)) # image = image.rotate_x(-0.3, (0.0, 0.25)) draw_around_box(image, box=Config.box) # draw_pose(image, action.pose, convert_to_rgb=True) size_input = image.mat.shape[::-1] size_cropped = (200, 200) size_result = (32, 32) scale = 4 image.mat = cv2.resize(image.mat, (size_input[0] // scale, size_input[1] // scale)) image.pixel_size /= scale s = time.time() area_image = get_area_of_interest_new( image,
from utils.image import draw_around_box, get_area_of_interest_new if __name__ == '__main__': save_path = Path(__file__).parent.parent.parent / 'test' / 'generated' collection = 'placing-3' episode_id = '2020-01-30-11-30-51-981' combined_model = Loader.get_model('placing-3-21-part-type-2') action_grasp, image_grasp = Loader.get_action(collection, episode_id, 0, 'ed-v') action_place, image_place, image_goal = Loader.get_action( collection, episode_id, 1, ['ed-v', 'ed-goal']) draw_around_box(image_grasp, box=Config.box) draw_around_box(image_place, box=Config.box) draw_around_box(image_goal, box=Config.box) pose_grasp = action_grasp.pose # pose_grasp = RobotPose(Affine(x=-0.0053, y=0.0414, a=1.4708)) pose_place = action_place.pose # pose_place = RobotPose(Affine(x=-0.0025, y=0.0563, a=-1.4708)) image_grasp_area = get_area_of_interest_new(image_grasp, pose_grasp, size_cropped=(200, 200), size_result=(32, 32)).mat image_place_area = get_area_of_interest_new(image_place, pose_place,