def from_tensor(self, *, tensor, batched): # Check whether underspecified if self.is_underspecified(): raise TensorforceError.unexpected() # Check whether TensorFlow tensor if not isinstance(tensor, tf.Tensor): raise TensorforceError.type( name='TensorSpec.from_tensor', argument='tensor', dtype=type(tensor) ) # Check whether tensor type and shape match if tf_util.dtype(x=tensor) != self.type: raise TensorforceError.value( name='TensorSpec.from_tensor', argument='tensor.dtype', value=tensor ) if tf_util.shape(x=tensor)[int(batched):] != self.shape: raise TensorforceError.value( name='TensorSpec.from_tensor', argument='tensor.shape', value=tensor ) # Convert tensor value to Numpy array value = tensor.numpy() # Check for nan or inf if np.isnan(value).any() or np.isinf(value).any(): raise TensorforceError.value( name='TensorSpec.from_tensor', argument='tensor', value=value ) # Check num_values if self.type == 'int' and self.num_values is not None: if (value < 0).any() or (value >= self.num_values).any(): raise TensorforceError.value( name='TensorSpec.from_tensor', argument='tensor', value=value ) # Check min/max_value elif self.type == 'int' or self.type == 'float': if self.min_value is not None: if (value < self.min_value).any(): raise TensorforceError.value( name='TensorSpec.from_tensor', argument='tensor', value=value ) if self.max_value is not None: if (value > self.max_value).any(): raise TensorforceError.value( name='TensorSpec.from_tensor', argument='tensor', value=value ) # If singleton shape, return Python object instead of Numpy array if self.shape == () and not batched: value = value.item() return value
def tf_assert(self, *, x, batch_size=None, include_type_shape=False, message=None): if not isinstance(x, (tf.Tensor, tf.Variable)): raise TensorforceError.type(name='TensorSpec.tf_assert', argument='x', dtype=type(x)) if batch_size is None: pass elif not isinstance(batch_size, tf.Tensor): raise TensorforceError.type( name='TensorSpec.tf_assert', argument='batch_size', dtype=type(batch_size) ) elif tf_util.dtype(x=batch_size) != 'int' or tf_util.shape(x=batch_size) != (): raise TensorforceError.value( name='TensorSpec.tf_assert', argument='batch_size', value=batch_size ) assertions = list() if message is not None and '{name}' in message: message = message.format(name='', issue='{issue}') # Type tf.debugging.assert_type( tensor=x, tf_type=self.tf_type(), message=(None if message is None else message.format(issue='type')) ) # Shape shape = tf_util.constant(value=self.shape, dtype='int') if batch_size is not None: shape = tf.concat(values=(tf.expand_dims(input=batch_size, axis=0), shape), axis=0) assertions.append( tf.debugging.assert_equal( x=tf_util.cast(x=tf.shape(input=x), dtype='int'), y=shape, message=(None if message is None else message.format(issue='shape')) ) ) if self.type == 'float': assertions.append(tf.debugging.assert_all_finite( x=x, message=('' if message is None else message.format(issue='inf/nan value')) )) # Min/max value (includes num_values) if self.type != 'bool' and self.min_value is not None: assertions.append(tf.debugging.assert_greater_equal( x=x, y=tf_util.constant(value=self.min_value, dtype=self.type), message=(None if message is None else message.format(issue='min value')) )) if self.type != 'bool' and self.max_value is not None: assertions.append(tf.debugging.assert_less_equal( x=x, y=tf_util.constant(value=self.max_value, dtype=self.type), message=(None if message is None else message.format(issue='max value')) )) return assertions