def on_received_observations(self, context: Context, data: DB20Observations): logger.info("received", data=data) camera: JPGImage = data.camera odometry = data.odometry print(odometry) _rgb = jpg2rgb(camera.jpg_data)
def check_tensorflow_gpu(self): import tensorflow as tf req = os.environ.get('AIDO_REQUIRE_GPU', None) name = tf.test.gpu_device_name() logger.info(f'gpu_device_name: {name!r} AIDO_REQUIRE_GPU = {req!r}') if req is not None: if not name: # None or '' msg = 'Could not find gpu device.' logger.error(msg)
def check_tensorflow_gpu(self): import tensorflow as tf req = os.environ.get('AIDO_REQUIRE_GPU', None) force_cpu = os.environ.get('FORCE_CPU_INFERENCE', None) if force_cpu: return name = tf.test.gpu_device_name() logger.info(f'gpu_device_name: {name!r} AIDO_REQUIRE_GPU = {req!r}') if req is not None: if not name: # None or '' msg = 'Could not find gpu device.' logger.error(msg) raise RuntimeError(msg)
def limit_gpu_memory(memory_limit=1024): """ Restricts TensorFlow to only allocated 1GB of memory on the first GPU""" import tensorflow as tf physical_gpus = tf.config.experimental.list_physical_devices('GPU') if physical_gpus: try: c = [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=memory_limit)] tf.config.experimental.set_virtual_device_configuration(physical_gpus[0], c) logical_gpus = tf.config.experimental.list_logical_devices('GPU') logger.info(num_physical_gpus=len(physical_gpus), num_logical_gpus=len(logical_gpus)) except RuntimeError as e: # Virtual devices must be set before GPUs have been initialized logger.error(e)
def __init__(self, load_model=False, model_path=None): logger.info('PytorchAgent init') self.preprocessor = DTPytorchWrapper() self.model = Model() self.current_image = np.zeros((640, 480, 3)) self.steering_to_wheel_wrapper = SteeringToWheelVelWrapper() self.controller = Controller() self.dt = None self.last_t = None self.old_obs = None logger.info('PytorchAgent init complete')
def __init__(self, load_model=False, model_path=None): logger.info('PytorchRLTemplateAgent init') self.preprocessor = DTPytorchWrapper() self.image_size = (120,160, 3) self.wrapper = SteeringToWheelVelWrapper() self.model = Dronet() self._device = self.model._device self.model.to(self._device) self.current_image = np.zeros((3,self.image_size[0],self.image_size[1])) if load_model: logger.info('PytorchRLTemplateAgent loading models') fp = model_path if model_path else "model.pt" self.model.load(fp, "models", for_inference=True) logger.info('PytorchRLTemplateAgent model loaded') logger.info('PytorchRLTemplateAgent init complete')
def __init__(self, load_model=False, model_path=None): logger.info('PytorchRLTemplateAgent init') self.preprocessor = DTPytorchWrapper() self.model = DDPG(state_dim=self.preprocessor.shape, action_dim=2, max_action=1, net_type="cnn") self.current_image = np.zeros((640, 480, 3)) if load_model: logger.info('PytorchRLTemplateAgent loading models') fp = model_path if model_path else "model" self.model.load(fp, "models", for_inference=True) logger.info('PytorchRLTemplateAgent init complete')
def init(self, context: Context): self.check_gpu_available(context) logger.info("PytorchRLTemplateAgent init") from model import DDPG self.preprocessor = DTPytorchWrapper() self.model = DDPG(state_dim=self.preprocessor.shape, action_dim=2, max_action=1, net_type="cnn") self.current_image = np.zeros((640, 480, 3)) if self.load_model: logger.info("Pytorch Template Agent loading models") fp = self.model_path if self.model_path else "model" self.model.load(fp, "models", for_inference=True) logger.info("PytorchRLTemplateAgent init complete")
def on_received_episode_start(self, context: Context, data: EpisodeStart): context.info(f'Starting episode "{data.episode_name}"') logger.info(data=data)
def on_received_observations(self, context: Context, data: DB20Observations): logger.info("received", data=data) camera: JPGImage = data.camera self.rgb = dcu.bgr_from_rgb(dcu.bgr_from_jpg(camera.jpg_data))