def validated_tf_function(x, internals, return_internals=False): if util.is_atomic_values_spec(values_spec=self.inputs_spec): if not util.is_consistent_with_value_spec(value_spec=self.inputs_spec, x=x): raise TensorforceError("Invalid input arguments for tf_apply.") else: if not all( util.is_consistent_with_value_spec(value_spec=spec, x=x[name]) for name, spec in self.inputs_spec.items() ): raise TensorforceError("Invalid input arguments for tf_apply.") if not all( util.is_consistent_with_value_spec(value_spec=spec, x=internals[name]) for name, spec in self.__class__.internals_spec(network=self).items() ): raise TensorforceError("Invalid input arguments for tf_apply.") if return_internals: x, internals = tf_function(x=x, internals=internals, return_internals=True) else: x = tf_function(x=x, internals=internals, return_internals=False) if not util.is_consistent_with_value_spec(value_spec=self.get_output_spec(), x=x): raise TensorforceError("Invalid output arguments for tf_apply.") if return_internals and not all( util.is_consistent_with_value_spec(value_spec=spec, x=internals[name]) for name, spec in self.__class__.internals_spec(network=self).items() ): raise TensorforceError("Invalid output arguments for tf_apply.") if return_internals: return x, internals else: return x
def validated_tf_function(xs): x = xs[:, 0, :] if not util.is_consistent_with_value_spec(value_spec=self.input_spec, x=x): raise TensorforceError("Invalid input arguments for tf_apply.") x = tf_function(xs=xs) if not util.is_consistent_with_value_spec(value_spec=self.output_spec, x=x): raise TensorforceError("Invalid output arguments for tf_apply.") return x
def validated_tf_function(x, previous): if not util.is_consistent_with_value_spec(value_spec=self.input_spec, x=x): raise TensorforceError("Invalid input arguments for tf_apply.") # previous spec! x, previous = tf_function(x=x, previous=previous) if not util.is_consistent_with_value_spec(value_spec=self.output_spec, x=x): raise TensorforceError("Invalid output arguments for tf_apply.") return x, previous
def validated_tf_function(x): if self.input_spec is not None and \ not util.is_consistent_with_value_spec(value_spec=self.input_spec, x=x): raise TensorforceError("Invalid input arguments for tf_apply.") x = tf_function(x=x) if self.output_spec is not None and \ not util.is_consistent_with_value_spec(value_spec=self.output_spec, x=x): raise TensorforceError("Invalid output arguments for tf_apply.") return x
def validated_tf_function(x): if self.input_spec is not None and \ not util.is_consistent_with_value_spec(value_spec=self.input_spec, x=x): raise TensorforceError.value(name='layer.apply', argument='input', value=x) x = tf_function(x=x) if self.output_spec is not None and \ not util.is_consistent_with_value_spec(value_spec=self.output_spec, x=x): raise TensorforceError.value(name='layer.apply', argument='output', value=x) return x
def update_tensor(name, tensor): # for n in range(len(Module.global_scope) + 1): # partial_scope = Module.global_scope[:len(Module.global_scope) - n] # scoped_name = util.join_scopes(*partial_scope, name) # if scoped_name in Module.global_tensors_spec: # break # else: # raise TensorforceError("Global tensor is not registered: {}.".format(name)) if name not in Module.global_tensors_spec: raise TensorforceError( "Global tensor is not registered: {}.".format(name)) scoped_name = name spec = Module.global_tensors_spec[scoped_name] if not util.is_consistent_with_value_spec(value_spec=spec, x=tensor): raise TensorforceError( "Invalid overwriting tensor: {}.".format(tensor)) scoped_name = util.join_scopes(*Module.global_scope, name) previous = Module.global_tensors.get(scoped_name) Module.global_tensors[scoped_name] = tensor return previous
def validated_tf_function(xs): x = xs[:, 0, :] if not util.is_consistent_with_value_spec( value_spec=self.input_spec, x=x): raise TensorforceError.value(name='layer.apply', argument='input', value=x) x = tf_function(xs=xs) if not util.is_consistent_with_value_spec( value_spec=self.output_spec, x=x): raise TensorforceError.value(name='layer.apply', argument='output', value=x) return x
def validated_tf_function(x): if util.is_atomic_values_spec(values_spec=self.inputs_spec): if not util.is_consistent_with_value_spec(value_spec=self.inputs_spec, x=x): raise TensorforceError("Invalid input arguments for tf_apply.") else: if not all( util.is_consistent_with_value_spec(value_spec=spec, x=x[name]) for name, spec in self.inputs_spec.items() ): raise TensorforceError("Invalid input arguments for tf_apply.") x = tf_function(x=x) if not util.is_consistent_with_value_spec(value_spec=self.get_output_spec(), x=x): raise TensorforceError("Invalid output arguments for tf_apply.") return x
def validated_tf_function(x, previous): if not util.is_consistent_with_value_spec( value_spec=self.input_spec, x=x): raise TensorforceError.value(name='layer.apply', argument='input', value=x) # previous spec! x, previous = tf_function(x=x, previous=previous) if not util.is_consistent_with_value_spec( value_spec=self.output_spec, x=x): raise TensorforceError.value(name='layer.apply', argument='output', value=x) return x, previous
def validated_tf_function(x, initial=None): if not util.is_consistent_with_value_spec(value_spec=self.input_spec, x=x): raise TensorforceError("Invalid input arguments for tf_apply.") # initial spec! if initial is None: x = tf_function(x=x) else: x, final = tf_function(x=x, initial=initial) if not util.is_consistent_with_value_spec(value_spec=self.output_spec, x=x): raise TensorforceError("Invalid output arguments for tf_apply.") if initial is None: return x else: return x, final
def validated_tf_function(x, initial=None): if not util.is_consistent_with_value_spec( value_spec=self.input_spec, x=x): raise TensorforceError.value(name='layer.apply', argument='input', value=x) # initial spec! if initial is None: x = tf_function(x=x) else: x, final = tf_function(x=x, initial=initial) if not util.is_consistent_with_value_spec( value_spec=self.output_spec, x=x): raise TensorforceError.value(name='layer.apply', argument='output', value=x) if initial is None: return x else: return x, final
def update_tensor(name, tensor): if name not in Module.global_tensors_spec: raise TensorforceError("Global tensor is not registered: {}.".format(name)) scoped_name = name spec = Module.global_tensors_spec[scoped_name] if not util.is_consistent_with_value_spec(value_spec=spec, x=tensor): raise TensorforceError("Invalid overwriting tensor: {}.".format(tensor)) scoped_name = util.join_scopes(*Module.global_scope, name) previous = Module.global_tensors.get(scoped_name) Module.global_tensors[scoped_name] = tensor if Module.cond_counter == 0 and Module.while_counter == 0: Module.queryable_tensors[scoped_name] = tensor return previous
def update_tensor(name, tensor): if name not in Module.global_tensors_spec: raise TensorforceError.value( name='Module.update_tensor', argument='name', value=name ) scoped_name = name spec = Module.global_tensors_spec[scoped_name] if not util.is_consistent_with_value_spec(value_spec=spec, x=tensor): raise TensorforceError.value( name='Module.update_tensor', argument='tensor', value=tensor ) scoped_name = util.join_scopes(*Module.global_scope, name) previous = Module.global_tensors.get(scoped_name) Module.global_tensors[scoped_name] = tensor if Module.cond_counter == 0 and Module.while_counter == 0: Module.queryable_tensors[scoped_name] = tensor return previous