def _projection_op(self, state, name=None): with ops.colocate_with(state): if self._maximum_multiplier_radius: projected_multipliers = _project_multipliers_wrt_euclidean_norm( state, self._maximum_multiplier_radius) else: projected_multipliers = standard_ops.maximum(state, 0.0) return state_ops.assign(state, projected_multipliers, name=name)
def _projection_op(self, state, name=None): with ops.colocate_with(state): if self._maximum_multiplier_radius: projected_multipliers = _project_multipliers_wrt_euclidean_norm( state, self._maximum_multiplier_radius) else: projected_multipliers = standard_ops.maximum(state, 0.0) return state_ops.assign(state, projected_multipliers, name=name)
def while_loop_body(iteration, matrix, inactive, old_inactive): """Performs one iteration of the projection.""" del old_inactive # Needed by the condition, but not the body. iteration += 1 scale = (1.0 - standard_ops.reduce_sum( matrix, axis=0, keepdims=True)) / standard_ops.maximum( 1.0, standard_ops.reduce_sum(inactive, axis=0, keepdims=True)) matrix = matrix + (scale * inactive) new_inactive = standard_ops.cast(matrix > 0, matrix.dtype) matrix = matrix * new_inactive return (iteration, matrix, new_inactive, inactive)
def while_loop_body(iteration, matrix, inactive, old_inactive): """Performs one iteration of the projection.""" del old_inactive # Needed by the condition, but not the body. iteration += 1 scale = (1.0 - standard_ops.reduce_sum( matrix, axis=0, keepdims=True)) / standard_ops.maximum( 1.0, standard_ops.reduce_sum(inactive, axis=0, keepdims=True)) matrix += scale * inactive new_inactive = standard_ops.cast(matrix > 0, matrix.dtype) matrix *= new_inactive return (iteration, matrix, new_inactive, inactive)
def while_loop_body(iteration, multipliers, inactive, old_inactive): """Performs one iteration of the projection.""" del old_inactive # Needed by the condition, but not the body. iteration += 1 scale = standard_ops.minimum( 0.0, (radius - standard_ops.reduce_sum(multipliers)) / standard_ops.maximum(1.0, standard_ops.reduce_sum(inactive))) multipliers = multipliers + (scale * inactive) new_inactive = standard_ops.cast(multipliers > 0, multipliers.dtype) multipliers = multipliers * new_inactive return (iteration, multipliers, new_inactive, inactive)
def while_loop_body(iteration, multipliers, inactive, old_inactive): """Performs one iteration of the projection.""" del old_inactive # Needed by the condition, but not the body. iteration += 1 scale = standard_ops.minimum( 0.0, (radius - standard_ops.reduce_sum(multipliers)) / standard_ops.maximum( 1.0, standard_ops.reduce_sum(inactive))) multipliers += scale * inactive new_inactive = standard_ops.cast(multipliers > 0, multipliers.dtype) multipliers *= new_inactive return (iteration, multipliers, new_inactive, inactive)
def _projection_op(self, state, name=None): with ops.colocate_with(state): # Gets the dimension of the state (num_constraints + 1)--all of these # assertions are of things that should be impossible, since the state # passed into this method will have the same shape as that returned by # _initial_state(). state_shape = state.get_shape() assert state_shape is not None assert state_shape.ndims == 2 assert state_shape[0] == state_shape[1] dimension = state_shape.dims[0].value assert dimension is not None minimum_log_multiplier = standard_ops.log( self._minimum_multiplier_radius / standard_ops.to_float(dimension)) return state_ops.assign( state, standard_ops.maximum( _project_log_stochastic_matrix_wrt_kl_divergence(state), minimum_log_multiplier), name=name)
def _projection_op(self, state, name=None): with ops.colocate_with(state): # Gets the dimension of the state (num_constraints + 1)--all of these # assertions are of things that should be impossible, since the state # passed into this method will have the same shape as that returned by # _initial_state(). state_shape = state.get_shape() assert state_shape is not None assert state_shape.ndims == 2 assert state_shape[0] == state_shape[1] dimension = state_shape[0].value assert dimension is not None minimum_log_multiplier = standard_ops.log( self._minimum_multiplier_radius / standard_ops.to_float(dimension)) return state_ops.assign( state, standard_ops.maximum( _project_log_stochastic_matrix_wrt_kl_divergence(state), minimum_log_multiplier), name=name)