def compute_gradients(self, loss, var_list=None, **kwargs): if var_list is None: var_list = variables.trainable_variables() + \ ops.get_collection(ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES) grads = gradients(loss, var_list) grads_and_vars = list(zip(grads, var_list)) return grads_and_vars
def get_variables(scope=None, suffix=None, collection=ops.GraphKeys.GLOBAL_VARIABLES): if isinstance(scope, variable_scope.VariableScope): scope = scope.name if suffix is not None: scope = (scope or '') + '.*' + suffix return ops.get_collection(collection, scope)
def get_variable_scope_store(): scope_store = ops.get_collection(_GLOBAL_VARIABLE_SCOPE_STORE_KEY) if not scope_store: scope_store = _VariableScopeStore() ops.add_to_collection(_GLOBAL_VARIABLE_SCOPE_STORE_KEY, scope_store) else: scope_store = scope_store[0] return scope_store
def compute_gradients(self, loss, var_list=None, **kwargs): if var_list is None: var_list = variables.trainable_variables() + \ ops.get_collection(ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES) self.loss = loss grads = T.grad(loss, var_list) grads_and_vars = list(zip(grads, var_list)) return grads_and_vars
def trainable_variables(): return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def model_variables(): return ops.get_collection(ops.GraphKeys.MODEL_VARIABLES)
def local_variables(): return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def global_variables(): return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)