def get_observation_space(sensor): '''Creates the observation space for the given sensor sensor - String with the desired sensor to add to the observation space ''' obs = StateSpace({}) if not isinstance(sensor, str): raise GenericError("None string type for sensor type: {}".format( type(sensor))) if sensor == Input.CAMERA.value or sensor == Input.OBSERVATION.value or \ sensor == Input.LEFT_CAMERA.value: obs[sensor] = ImageObservationSpace(shape=np.array( (TRAINING_IMAGE_SIZE[1], TRAINING_IMAGE_SIZE[0], 3)), high=255, channels_axis=-1) elif sensor == Input.STEREO.value: obs[sensor] = PlanarMapsObservationSpace(shape=np.array( (TRAINING_IMAGE_SIZE[1], TRAINING_IMAGE_SIZE[0], 2)), low=0, high=255, channels_axis=-1) elif sensor == Input.LIDAR.value: obs[sensor] = VectorObservationSpace(shape=TRAINING_LIDAR_SIZE, low=0.15, high=1.0) elif sensor == Input.SECTOR_LIDAR.value: obs[sensor] = VectorObservationSpace(shape=TRAINING_LIDAR_SIZE, low=0.15, high=SECTOR_LIDAR_CLIPPING_DIST) else: raise Exception( "Unable to set observation space for sensor {}".format(sensor)) return obs
def get_left_camera_embedders(network_type): '''Utility method for retrieving the input embedder for the left camera sensor, this needs to be in the util module due to the sagemaker/robomaker incompatibility network_type - The type of network for which to return the embedder for ''' if not isinstance(network_type, str): raise GenericError("None string type for network type: {}".format( type(network_type))) input_embedder = dict() if network_type == NeuralNetwork.DEEP_CONVOLUTIONAL_NETWORK_SHALLOW.value: input_embedder = { Input.LEFT_CAMERA.value: { SchemeInfo.CONV_INFO_LIST.value: [[32, 8, 4], [64, 4, 2], [64, 3, 1]], SchemeInfo.DENSE_LAYER_INFO_LIST.value: [], SchemeInfo.BN_INFO_CONV.value: [False, ActivationFunctions.RELU.value, 0.0], SchemeInfo.BN_INFO_DENSE.value: [False, ActivationFunctions.RELU.value, 0.0], SchemeInfo.IS_FIRST_LAYER_BN.value: False } } elif network_type == NeuralNetwork.DEEP_CONVOLUTIONAL_NETWORK.value: input_embedder = { Input.LEFT_CAMERA.value: { SchemeInfo.CONV_INFO_LIST.value: [[32, 5, 2], [32, 3, 1], [64, 3, 2], [64, 3, 1]], SchemeInfo.DENSE_LAYER_INFO_LIST.value: [64], SchemeInfo.BN_INFO_CONV.value: [False, ActivationFunctions.TANH.value, 0.0], SchemeInfo.BN_INFO_DENSE.value: [False, ActivationFunctions.TANH.value, 0.3], SchemeInfo.IS_FIRST_LAYER_BN.value: False } } elif network_type == NeuralNetwork.DEEP_CONVOLUTIONAL_NETWORK_DEEP.value: input_embedder = { Input.LEFT_CAMERA.value: { SchemeInfo.CONV_INFO_LIST.value: [[32, 8, 4], [32, 4, 2], [64, 4, 2], [64, 3, 1]], SchemeInfo.DENSE_LAYER_INFO_LIST.value: [512, 512], SchemeInfo.BN_INFO_CONV.value: [True, ActivationFunctions.RELU.value, 0.0], SchemeInfo.BN_INFO_DENSE.value: [False, ActivationFunctions.RELU.value, 0.0], SchemeInfo.IS_FIRST_LAYER_BN.value: False } } else: raise Exception( "Left camera sensor has no embedder for topology {}".format( network_type)) return input_embedder
def get_stereo_camera_embedders(network_type): """Utility method for retrieving the input embedder for the stereo camera sensor, this needs to be in the util module due to the sagemaker/robomaker incompatibility network_type - The type of network for which to return the embedder for """ if not isinstance(network_type, str): raise GenericError("None string type for network type: {}".format( type(network_type))) input_embedder = dict() if network_type == NeuralNetwork.DEEP_CONVOLUTIONAL_NETWORK_SHALLOW.value: input_embedder = { Input.STEREO.value: { SchemeInfo.CONV_INFO_LIST.value: [[32, 8, 4], [64, 4, 2], [64, 3, 1]], SchemeInfo.DENSE_LAYER_INFO_LIST.value: [], SchemeInfo.BN_INFO_CONV.value: [False, ActivationFunctions.RELU.value, 0.0], SchemeInfo.BN_INFO_DENSE.value: [False, ActivationFunctions.RELU.value, 0.0], SchemeInfo.IS_FIRST_LAYER_BN.value: False, } } #! TODO decide if we want to have a deep-deep topology that differes from deep elif (network_type == NeuralNetwork.DEEP_CONVOLUTIONAL_NETWORK.value or network_type == NeuralNetwork.DEEP_CONVOLUTIONAL_NETWORK_DEEP.value): input_embedder = { Input.STEREO.value: { SchemeInfo.CONV_INFO_LIST.value: [ [32, 3, 1], [64, 3, 2], [64, 3, 1], [128, 3, 2], [128, 3, 1], ], SchemeInfo.DENSE_LAYER_INFO_LIST.value: [], SchemeInfo.BN_INFO_CONV.value: [False, ActivationFunctions.RELU.value, 0.0], SchemeInfo.BN_INFO_DENSE.value: [False, ActivationFunctions.RELU.value, 0.0], SchemeInfo.IS_FIRST_LAYER_BN.value: False, } } else: raise Exception( "Stereo camera sensor has no embedder for topology {}".format( network_type)) return input_embedder
def get_observation_space(sensor, model_metadata=None): '''Creates the observation space for the given sensor sensor - String with the desired sensor to add to the observation space model_metadata - model metadata information ''' obs = StateSpace({}) if not isinstance(sensor, str): raise GenericError("None string type for sensor type: {}".format( type(sensor))) if sensor == Input.CAMERA.value or sensor == Input.OBSERVATION.value or \ sensor == Input.LEFT_CAMERA.value: obs[sensor] = ImageObservationSpace(shape=np.array( (TRAINING_IMAGE_SIZE[1], TRAINING_IMAGE_SIZE[0], 3)), high=255, channels_axis=-1) elif sensor == Input.STEREO.value: obs[sensor] = PlanarMapsObservationSpace(shape=np.array( (TRAINING_IMAGE_SIZE[1], TRAINING_IMAGE_SIZE[0], 2)), low=0, high=255, channels_axis=-1) elif sensor == Input.LIDAR.value: obs[sensor] = VectorObservationSpace(shape=TRAINING_LIDAR_SIZE, low=0.15, high=1.0) elif sensor == Input.SECTOR_LIDAR.value: obs[sensor] = VectorObservationSpace(shape=NUMBER_OF_LIDAR_SECTORS, low=0.0, high=1.0) elif sensor == Input.DISCRETIZED_SECTOR_LIDAR.value: lidar_config = model_metadata[ModelMetadataKeys.LIDAR_CONFIG.value] shape = lidar_config[ModelMetadataKeys.NUM_SECTORS.value] * \ lidar_config[ModelMetadataKeys.NUM_VALUES_PER_SECTOR.value] obs[sensor] = VectorObservationSpace(shape=shape, low=0.0, high=1.0) else: raise Exception( "Unable to set observation space for sensor {}".format(sensor)) return obs
def test_deepracer_exceptions(): """The function tests whether the user defined exceptions in deepracer_exceptions.py are getting raised properly when we call them from any part of SIMAPP code. The test function also checks whether the superclass Exception manages to provide the necessary error message passed along as well. Raises: RewardFunctionError GenericTrainerException GenericTrainerError GenericRolloutException GenericRolloutError GenericValidatorException GenericValidatorError GenericException GenericError """ with pytest.raises(RewardFunctionError, match=r".*RewardFunctionError.*"): raise RewardFunctionError("RewardFunctionError") with pytest.raises(GenericTrainerException, match=r".*GenericTrainerException.*"): raise GenericTrainerException("GenericTrainerException") with pytest.raises(GenericTrainerError, match=r".*GenericTrainerError.*"): raise GenericTrainerError("GenericTrainerError") with pytest.raises(GenericRolloutException, match=r".*GenericRolloutException.*"): raise GenericRolloutException("GenericRolloutException") with pytest.raises(GenericRolloutError, match=r".*GenericRolloutError.*"): raise GenericRolloutError("GenericRolloutError") with pytest.raises(GenericValidatorException, match=r".*GenericValidatorException.*"): raise GenericValidatorException("GenericValidatorException") with pytest.raises(GenericValidatorError, match=r".*GenericValidatorError.*"): raise GenericValidatorError("GenericValidatorError") with pytest.raises(GenericException, match=r".*GenericException.*"): raise GenericException("GenericException") with pytest.raises(GenericError, match=r".*GenericError.*"): raise GenericError("GenericError")