def register_tensor_wrapper_class(cls): """Register the specified TensorWrapper type into TensorFlow type system. Parameters ---------- cls : type A subclass of `TensorWrapper`, which is to be registered. """ if not isinstance(cls, six.class_types) or \ not issubclass(cls, TensorWrapper): raise TypeError('`%s` is not a type, or not a subclass of ' '`TensorWrapper`.' % (cls, )) def to_tensor(value, dtype=None, name=None, as_ref=False): if dtype and not dtype.is_compatible_with(value.dtype): raise ValueError('Incompatible type conversion requested to type ' '%s for tensor of type %s' % (dtype.name, value.dtype.name)) if as_ref: raise ValueError('%r: Ref type not supported.' % value) return value.__wrapped__ tf.register_tensor_conversion_function(cls, to_tensor) # bring support for session.run(StochasticTensor), and for using as keys # in feed_dict. register_session_run_conversion_functions( cls, fetch_function=lambda t: ([getattr(t, '__wrapped__')], lambda val: val[0]), feed_function=lambda t, v: [(getattr(t, '__wrapped__'), v)], feed_function_for_partial_run=lambda t: [getattr(t, '__wrapped__')])
def register_tensor_wrapper_class(cls): """ Register a sub-class of :class:`TensorWrapper` into TensorFlow type system. Args: cls: The subclass of :class:`TensorWrapper` to be registered. """ if not isinstance(cls, six.class_types) or \ not issubclass(cls, TensorWrapper): raise TypeError('`{}` is not a type, or not a subclass of ' '`TensorWrapper`'.format(cls)) def to_tensor(value, dtype=None, name=None, as_ref=False): if dtype and not dtype.is_compatible_with(value.dtype): raise ValueError('Incompatible type conversion requested to type ' '{} for tensor of type {}'.format( dtype.name, value.dtype.name)) if as_ref: # pragma: no cover raise ValueError('{!r}: Ref type not supported'.format(value)) return value.tensor tf.register_tensor_conversion_function(cls, to_tensor) # bring support for session.run(StochasticTensor), and for using as keys # in feed_dict. register_session_run_conversion_functions( cls, fetch_function=lambda t: ([t.tensor], lambda val: val[0]), feed_function=lambda t, v: [(t.tensor, v)], feed_function_for_partial_run=lambda t: [t.tensor])
def register_tensor_conversion(convertable, overload_operators=True, priority=1): # higher then any tf conversion fetch_function = lambda variable: ([variable.read_value()], lambda val: val[0]) feed_function = lambda feed, feed_val: [(feed.read_value(), feed_val)] feed_function_for_partial_run = lambda feed: [feed.read_value()] register_session_run_conversion_functions( tensor_type=convertable, fetch_function=fetch_function, feed_function=feed_function, feed_function_for_partial_run=feed_function_for_partial_run) def _dense_var_to_tensor(var, dtype=None, name=None, as_ref=False): return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) ops.register_tensor_conversion_function(convertable, _dense_var_to_tensor, priority=priority) if overload_operators: convertable._OverloadAllOperators()
components = rt.nested_row_splits + (rt.inner_values,) return (components, _ragged_tensor_value_from_components) def _ragged_tensor_session_feed(feed_key, feed_val): key_components = feed_key.nested_row_splits + (feed_key.inner_values,) val_components = feed_val.nested_row_splits + (feed_val.inner_values,) return zip(key_components, val_components) def _ragged_tensor_session_feed_for_partial_run(feed_key): return feed_key.nested_row_splits + (feed_key.inner_values,) session.register_session_run_conversion_functions( RaggedTensor, _ragged_tensor_session_fetch, _ragged_tensor_session_feed, _ragged_tensor_session_feed_for_partial_run) class RaggedTensorType(object): """Encoding of a static type for a `RaggedTensor`. Use this type to express/declare that an output must have the type of `RaggedTensor`. """ def __init__(self, dtype, ragged_rank): """Initializes a RaggedTensorType object. Args: dtype: data type of the `RaggedTensor`'s inner values.
Return the Tensor representing the value of the variational objective. :return: A Tensor. """ if not hasattr(self, '_tensor'): self._tensor = self._objective() return self._tensor @staticmethod def _to_tensor(value, dtype=None, name=None, as_ref=False): tensor = value.tensor if dtype and not dtype.is_compatible_with(tensor.dtype): raise ValueError("Incompatible type conversion requested to type " "'{}' for variable of type '{}'".format( dtype.name, tensor.dtype.name)) if as_ref: raise ValueError("{}: Ref type not supported.".format(value)) return tensor tf.register_tensor_conversion_function(VariationalObjective, VariationalObjective._to_tensor) # bring support for session.run(VariationalObjective), and for using as keys # in feed_dict. register_session_run_conversion_functions( VariationalObjective, fetch_function=lambda t: ([t.tensor], lambda val: val[0]), feed_function=lambda t, v: [(t.tensor, v)], feed_function_for_partial_run=lambda t: [t.tensor])
values = next(component_values) node = LeafNodeValue(parent_index, values, prensor_type_spec._is_repeated) step_to_child = collections.OrderedDict() for step, child_spec in prensor_type_spec._children_specs: step_to_child[ step] = _prensor_value_from_type_spec_and_component_values( child_spec, component_values) return PrensorValue(node, step_to_child) def _prensor_value_fetch(prensor_tree: prensor.Prensor): """Fetch function for PrensorValue. See the document in session_lib.""" # pylint: disable=protected-access type_spec = prensor_tree._type_spec components = type_spec._to_components(prensor_tree) def _construct_prensor_value(component_values): return _prensor_value_from_type_spec_and_component_values( type_spec, iter(component_values)) return components, _construct_prensor_value session_lib.register_session_run_conversion_functions( prensor.Prensor, _prensor_value_fetch, feed_function=None, feed_function_for_partial_run=None)
return ([tensor.value()], lambda val: val[0]) @staticmethod def _session_run_conversion_feed_function(feed, feed_val): return [(feed.value(), feed_val)] @staticmethod def _session_run_conversion_feed_function_for_partial_run(feed): return [feed.value()] @staticmethod def _tensor_conversion_function(v, dtype=None, name=None, as_ref=False): _ = name, as_ref if dtype and not dtype.is_compatible_with(v.dtype): raise ValueError( "Incompatible type conversion requested to type '%s' for variable " "of type '%s'" % (dtype.name, v.dtype.name)) return v.value() RandomVariable._overload_all_operators() register_session_run_conversion_functions( RandomVariable, RandomVariable._session_run_conversion_fetch_function, RandomVariable._session_run_conversion_feed_function, RandomVariable._session_run_conversion_feed_function_for_partial_run) tf.register_tensor_conversion_function( RandomVariable, RandomVariable._tensor_conversion_function)
:param value: :param dtype: :param name: :param as_ref: :return: """ # if as_ref: # raise NotImplemented() return tf.convert_to_tensor(value.tensor, dtype=dtype, name=name) tf.register_tensor_conversion_function(MergedVariable, MergedVariable.tensor_conversion) # import tensorflow.client.session as tf_pcs register_session_run_conversion_functions( MergedVariable, lambda merged_var: ([merged_var.tensor], lambda val: val[0])) # def flatten_list(lst): from itertools import chain return list(chain(*lst)) def simple_size_of_with_pickle(obj): import pickle import os name = str(np.random.rand()) with open(name, mode='bw') as f: pickle.dump(obj, f) size = os.stat(name).st_size
def _session_run_conversion_fetch_function(tensor): return ([tensor.value], lambda val: val[0]) def _session_run_conversion_feed_function(feed, feed_val): return [(feed.value, feed_val)] def _session_run_conversion_feed_function_for_partial_run(feed): return [feed.value] def _tensor_conversion_function(v, dtype=None, name=None, as_ref=False): del name, as_ref # unused if dtype and not dtype.is_compatible_with(v.dtype): raise ValueError( "Incompatible type conversion requested to type '%s' for variable " "of type '%s'" % (dtype.name, v.dtype.name)) return v.value tf_session.register_session_run_conversion_functions( # enable sess.run, eval RandomVariable, _session_run_conversion_fetch_function, _session_run_conversion_feed_function, _session_run_conversion_feed_function_for_partial_run) tf.register_tensor_conversion_function( # enable tf.convert_to_tensor RandomVariable, _tensor_conversion_function)
raise ValueError("Incompatible type conversion requested to type " "'{}' for variable of type '{}'".format( dtype.name, value.dtype.name)) if as_ref: raise ValueError("{}: Ref type not supported.".format(value)) return value.tensor tf.register_tensor_conversion_function(StochasticTensor, StochasticTensor._to_tensor) # bring support for session.run(StochasticTensor), and for using as keys # in feed_dict. register_session_run_conversion_functions( StochasticTensor, fetch_function=lambda t: ([t.tensor], lambda val: val[0]), feed_function=lambda t, v: [(t.tensor, v)], feed_function_for_partial_run=lambda t: [t.tensor]) class BayesianNet(Context): """ The :class:`BayesianNet` class is a context class supporting model construction in ZhuSuan as Bayesian Networks (Directed graphical models). A `BayesianNet` represents a DAG with two kinds of nodes: * Deterministic nodes, made up of any tensorflow operations. * Stochastic nodes, constructed by :class:`StochasticTensor`. To start a :class:`BayesianNet` context::
# register the variable, which is used to detect dependencies contextmanager.randvar_registry.register_parameter(self) contextmanager.randvar_registry.update_graph() def _tensor_conversion_function(p, dtype=None, name=None, as_ref=False): """ Function that converts the inferpy variable into a Tensor. This will enable the use of enable tf.convert_to_tensor(rv) If the variable needs to be broadcast_to, do it right now """ return tf.convert_to_tensor(p.var) # register the conversion function into a tensor tf.register_tensor_conversion_function( # enable tf.convert_to_tensor Parameter, _tensor_conversion_function) def _session_run_conversion_fetch_function(p): """ This will enable run and operations with other tensors """ return ([tf.convert_to_tensor(p)], lambda val: val[0]) tf_session.register_session_run_conversion_functions( # enable sess.run, eval Parameter, _session_run_conversion_fetch_function)
components = rt.nested_row_splits + (rt.inner_values, ) return (components, _ragged_tensor_value_from_components) def _ragged_tensor_session_feed(feed_key, feed_val): key_components = feed_key.nested_row_splits + (feed_key.inner_values, ) val_components = feed_val.nested_row_splits + (feed_val.inner_values, ) return zip(key_components, val_components) def _ragged_tensor_session_feed_for_partial_run(feed_key): return feed_key.nested_row_splits + (feed_key.inner_values, ) session.register_session_run_conversion_functions( RaggedTensor, _ragged_tensor_session_fetch, _ragged_tensor_session_feed, _ragged_tensor_session_feed_for_partial_run) class RaggedTensorType(object): """Encoding of a static type for a `RaggedTensor`. Use this type to express/declare that an output must have the type of `RaggedTensor`. """ def __init__(self, dtype, ragged_rank): """Initializes a RaggedTensorType object. Args: dtype: data type of the `RaggedTensor`'s inner values. ragged_rank: ragged_rank of the declared `RaggedTensor`.
return ParametricFunction( other.x, [self.params, other.params], self.for_input(other.y), lambda _inp, _prm, **kwa: kwa['comp_arg1']. rule(_inp, _prm[0], **kwa['comp_arg1']._kwargs).__matmul__(kwa[ 'comp_arg2'].rule(_inp, _prm[1], **kwa['comp_arg2']._kwargs)), comp_arg1=self, comp_arg2=other) tf.register_tensor_conversion_function( ParametricFunction, lambda value, dtype=None, name=None, as_ref=False: tf.convert_to_tensor( value.y, dtype, name)) register_session_run_conversion_functions( ParametricFunction, lambda pf: ([pf.y], lambda val: val[0])) def _process_initializer(initializers, j, default): if callable(initializers): return initializers elif initializers is not None: return maybe_get(initializers, j) else: return default def _pass_shape(shape, initializer, j): init = maybe_get(initializer, j) return None if (hasattr(init, 'shape') or isinstance(init, list)) else shape
for node in graph: parents = set([]) def explore(v): for v_parent in v.op.inputs: if v_parent in node_map: parents.add(node_map[v_parent]) else: explore(v_parent) explore(node.sample()) if self in parents: children.add(node) return {context(c) for c in children} def markov_blanket(self): children = self.children() parents = self.parents() coparents = set([p for node in children for p in node.parents()]) - {self} return (children, parents, coparents) register_session_run_conversion_functions( Tensor, Tensor._session_run_conversion_fetch_function, Tensor._session_run_conversion_feed_function, Tensor._session_run_conversion_feed_function_for_partial_run) tf.register_tensor_conversion_function(Tensor, Tensor._tensor_conversion_function)
# Register a conversion function which reads the value of the variable, # allowing instances of the class to be used as tensors. def _tensor_conversion(var, dtype=None, name=None, as_ref=False): return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access def replicated_fetch_function(var): # pylint: disable=protected-access return ([var._dense_var_to_tensor()], lambda v: v[0]) # pylint: enable=protected-access ops.register_tensor_conversion_function(ReplicatedVariable, _tensor_conversion) ops.register_dense_tensor_like_type(ReplicatedVariable) session_lib.register_session_run_conversion_functions( ReplicatedVariable, replicated_fetch_function) def replicated_scope(num_replicas): """Variable scope for constructing replicated variables.""" def _replicated_variable_getter(getter, name, *args, **kwargs): """Getter that constructs replicated variables.""" collections = kwargs.pop("collections", None) if collections is None: collections = [ops.GraphKeys.GLOBAL_VARIABLES] kwargs["collections"] = [] logging.info("Constructing replicated variable %s", name) variables = [] index = {}
return unwrapped, rewrapper def _feed_function(big_tensor, feed_value): return [(big_tensor._raw, feed_value)] def _feed_function_for_partial_run(big_tensor): return [big_tensor._raw] # this allows tf_big.Tensor to be passed directly to tf.Session.run, # unwrapping and converting the result as needed tf_session.register_session_run_conversion_functions( tensor_type=Tensor, fetch_function=_fetch_function, feed_function=_feed_function, feed_function_for_partial_run=_feed_function_for_partial_run, ) def _tensor_conversion_function(tensor, dtype=None, name=None, as_ref=False): assert name is None, "Not implemented, name='{}'".format(name) assert not as_ref, "Not implemented, as_ref={}".format(as_ref) assert dtype in [tf.int32, None], dtype return export_tensor(tensor, dtype=dtype) # TODO(Morten) # this allows implicit convertion of tf_big.Tensor to tf.Tensor, # but since the output dtype is determined by the outer context # we essentially have to export with the implied risk of data loss
# Register a conversion function which reads the value of the variable, # allowing instances of the class to be used as tensors. def _tensor_conversion(var, dtype=None, name=None, as_ref=False): return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access def replicated_fetch_function(var): # pylint: disable=protected-access return ([var._dense_var_to_tensor()], lambda v: v[0]) # pylint: enable=protected-access ops.register_tensor_conversion_function(ReplicatedVariable, _tensor_conversion) ops.register_dense_tensor_like_type(ReplicatedVariable) session_lib.register_session_run_conversion_functions( ReplicatedVariable, replicated_fetch_function) def replicated_scope(num_replicas): """Variable scope for constructing replicated variables.""" def _replicated_variable_getter(getter, name, *args, **kwargs): """Getter that constructs replicated variables.""" collections = kwargs.pop("collections", None) if collections is None: collections = [ops.GraphKeys.GLOBAL_VARIABLES] kwargs["collections"] = [] variables = [] index = {} for i in range(num_replicas):
def _dense_var_to_tensor(var, dtype=None, name=None, as_ref=False): return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) ops.register_tensor_conversion_function(ComposedVariable, _dense_var_to_tensor) fetch_function = lambda variable: ([variable.read_value()], lambda val: val[0]) feed_function = lambda feed, feed_val: [(feed.read_value(), feed_val)] feed_function_for_partial_run = lambda feed: [feed.read_value()] from tensorflow.python.client.session import register_session_run_conversion_functions # ops.register_dense_tensor_like_type() register_session_run_conversion_functions( tensor_type=ComposedResourceVariable, fetch_function=fetch_function, feed_function=feed_function, feed_function_for_partial_run=feed_function_for_partial_run) register_session_run_conversion_functions( tensor_type=ComposedVariable, fetch_function=fetch_function, feed_function=feed_function, feed_function_for_partial_run=feed_function_for_partial_run) ComposedVariable._OverloadAllOperators() class BaseParameter(ZfitParameter, metaclass=MetaBaseParameter): pass
def _dense_var_to_tensor(var, dtype=None, name=None, as_ref=False): return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) ops.register_tensor_conversion_function(Data, _dense_var_to_tensor) fetch_function = lambda data: ([data.value()], lambda val: val[0]) feed_function = lambda data, feed_val: [(data.value(), feed_val)] feed_function_for_partial_run = lambda data: [data.value()] from tensorflow.python.client.session import register_session_run_conversion_functions # ops.register_dense_tensor_like_type() register_session_run_conversion_functions( tensor_type=Data, fetch_function=fetch_function, feed_function=feed_function, feed_function_for_partial_run=feed_function_for_partial_run) Data._OverloadAllOperators() class LightDataset: def __init__(self, tensor): if not isinstance(tensor, tf.Tensor): tensor = ztf.convert_to_tensor(tensor) self.tensor = tensor @classmethod def from_tensor(cls, tensor): return cls(tensor=tensor)
return ([tensor.value()], lambda val: val[0]) @staticmethod def _session_run_conversion_feed_function(feed, feed_val): return [(feed.value(), feed_val)] @staticmethod def _session_run_conversion_feed_function_for_partial_run(feed): return [feed.value()] @staticmethod def _tensor_conversion_function(v, dtype=None, name=None, as_ref=False): _ = name if dtype and not dtype.is_compatible_with(v.dtype): raise ValueError( "Incompatible type conversion requested to type '%s' for variable " "of type '%s'" % (dtype.name, v.dtype.name)) if as_ref: raise ValueError("%s: Ref type is not supported." % v) return v.value() register_session_run_conversion_functions( RandomVariable, RandomVariable._session_run_conversion_fetch_function, RandomVariable._session_run_conversion_feed_function, RandomVariable._session_run_conversion_feed_function_for_partial_run) tf.register_tensor_conversion_function( RandomVariable, RandomVariable._tensor_conversion_function)
def __nonzero__(self): raise TypeError( 'Using a `StochasticTensor` as a Python `bool` is not allowed. ' 'Use `if t is not None:` instead of `if t:` to test if a ' 'tensor is defined, and use TensorFlow ops such as ' 'tf.cond to execute subgraphs conditioned on the value of ' 'a tensor.' ) def _to_tensor(value, dtype=None, name=None, as_ref=False): if dtype and not dtype.is_compatible_with(value.dtype): raise ValueError('Incompatible type conversion requested to type ' '%s for tensor of type %s' % (dtype.name, value.dtype.name)) if as_ref: raise ValueError('%r: Ref type not supported.' % value) return value.__wrapped__ tf.register_tensor_conversion_function(StochasticTensor, _to_tensor) # bring support for session.run(StochasticTensor), and for using as keys # in feed_dict. register_session_run_conversion_functions( StochasticTensor, fetch_function=lambda t: ([getattr(t, '__wrapped__')], lambda val: val[0]), feed_function=lambda t, v: [(getattr(t, '__wrapped__'), v)], feed_function_for_partial_run=lambda t: [getattr(t, '__wrapped__')] )