Ejemplo n.º 1
0
def restore_variables(path, session=None):
    if session is None:
        session = get_session()
    # ====== load and check var meta ====== #
    with open(path + '.collections', 'rb') as f:
        collections, var_meta = cPickle.load(f)
    var_list = []
    allvars = {v.name.split(':')[0]: v for v in get_all_variables()}
    for name, dtype, shape in var_meta:
        if name in allvars:  # found predefined variable
            var_list.append(allvars[name])
        else:  # create new variable
            if tf.get_variable_scope().name:
                raise RuntimeError(
                    "The current variable scope is: %s, you can "
                    "only restore variables from default scope." %
                    tf.get_variable_scope().name)
            var_list.append(
                tf.get_variable(shape=shape, name=name, dtype=dtype))
    # ====== restore the variables ====== #
    name = '|'.join(sorted([v.name for v in var_list]))
    if name in _saver:
        saver = _saver[name]
    else:
        saver = tf.train.Saver(var_list=var_list,
                               restore_sequentially=False,
                               allow_empty=False)
    saver.restore(session, path)
    # ====== restore the collections ====== #
    for v in var_list:
        role.add_roles(v, collections[v.name])
Ejemplo n.º 2
0
def placeholder(shape=None, dtype=floatX, name=None, roles=[]):
    if shape is None and name is None:
        raise ValueError(
            "shape and name arguments cannot be None at the same time.")
    # ====== check duplicated placeholder ====== #
    if name is not None:
        all_placeholders = [
            o._outputs[0] for o in get_all_operations(otype='Placeholder')
        ]
        for v in all_placeholders:
            v_shape = tuple(v.shape.as_list())
            if v.name == name + ':0':  # found duplicated variable
                # set new value for variable
                if shape is not None:
                    if v_shape == shape:
                        return role.add_roles(v, roles)
                    else:
                        raise ValueError(
                            "Pre-defined placeholder with name: %s and"
                            " shape: %s, which is different from given shape: %s"
                            % (name, v_shape, shape))
                # just get the variable
                else:
                    return role.add_roles(v, roles)
    # ====== Modify add name prefix ====== #
    plh = tf.placeholder(dtype=dtype, shape=shape, name=name)
    return role.add_roles(plh, roles)
Ejemplo n.º 3
0
def restore_variables(path, session=None):
  if session is None:
    session = get_session()
  # ====== load and check var meta ====== #
  with open(path + '.collections', 'rb') as f:
    collections, var_meta = cPickle.load(f)
  var_list = []
  allvars = {v.name.split(':')[0]: v for v in get_all_variables()}
  for name, dtype, shape in var_meta:
    if name in allvars: # found predefined variable
      var_list.append(allvars[name])
    else: # create new variable
      if tf.get_variable_scope().name:
        raise RuntimeError("The current variable scope is: %s, you can "
            "only restore variables from default scope."
            % tf.get_variable_scope().name)
      var_list.append(tf.get_variable(
          shape=shape, name=name, dtype=dtype))
  # ====== restore the variables ====== #
  name = '|'.join(sorted([v.name for v in var_list]))
  if name in _saver:
    saver = _saver[name]
  else:
    saver = tf.train.Saver(var_list=var_list, restore_sequentially=False,
                           allow_empty=False)
  saver.restore(session, path)
  # ====== restore the collections ====== #
  for v in var_list:
    role.add_roles(v, collections[v.name])
Ejemplo n.º 4
0
 def get_updates(self, loss_or_grads, params):
   grads_vars = self.get_gradients(loss_or_grads, params)
   with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE) as scope:
     scope_name = scope.name
     updates = self.algorithm.apply_gradients(grads_vars,
                                              global_step=self._step)
   for v in get_all_variables(scope=scope_name):
     add_roles(v, roles=OptimizerVariable)
   return updates
Ejemplo n.º 5
0
def _as_variable(x, name, roles=None):
  # nothing to do
  if x is None:
    return None
  # create variable
  if not is_tensor(x):
    x = tf.Variable(x, dtype=floatX, name=name)
    get_session().run(x.initializer)
  return add_roles(x, roles)
Ejemplo n.º 6
0
 def get_gradients(self, loss_or_grads, params):
   """
   Note
   ----
   The returned gradients may contain None value
   """
   # check valid algorithm
   if self.algorithm is None or \
   not hasattr(self.algorithm, 'compute_gradients') or \
   not hasattr(self.algorithm, 'apply_gradients'):
     raise RuntimeError("Optimizer is None, or doesn't has attributes: "
                        "compute_gradients and apply_gradients.")
   with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE) as scope:
     scope_name = scope.name
     # get the gradient
     grads_var = self.algorithm.compute_gradients(loss_or_grads,
                                                  var_list=params)
     grads_var = {g: v for g, v in grads_var if g is not None}
     grads = list(grads_var.keys())
     params = list(grads_var.values())
     # ====== clipnorm ====== #
     if self.clipnorm is not None:
       if self.clip_alg == 'norm':
         grads = [tf.clip_by_norm(g, self.clipnorm)
                  for g in grads]
       elif self.clip_alg == 'total_norm':
         grads, _ = tf.clip_by_global_norm(grads, self.clipnorm)
       elif self.clip_alg == 'avg_norm':
         grads = [tf.clip_by_average_norm(g, self.clipnorm)
                  for g in grads]
       else:
         raise ValueError("Unknown norm clipping algorithm: '%s'" % self.clip_alg)
     # ====== clipvalue ====== #
     if self.clipvalue is not None:
       grads = [tf.clip_by_value(g, -self.clipvalue, self.clipvalue)
                for g in grads]
     # ====== get final norm value ====== #
     self._norm = add_roles(tf.global_norm(grads, name="GradientNorm"),
                            GradientsNorm)
   # ====== setting Optimizer roles ====== #
   for v in get_all_variables(scope=scope_name):
     add_roles(v, roles=OptimizerVariable)
   return [(g, p) for g, p in zip(grads, params)]
Ejemplo n.º 7
0
    def test_computational_graph2(self):
        np.random.seed(1208)

        X = K.variable(np.zeros((8, 12)), name='X')
        Y = K.variable(np.random.rand(12, 8), name='Y')
        Z = K.placeholder(shape=(8, 8), name='Z')
        a = K.dot(X, Y)
        add_roles(a, Auxiliary)
        a = a + Z
        g1 = K.ComputationGraph(a)

        self.assertEqual(len(g1.trainable_variables), 2)
        self.assertEqual(len(g1.placeholders), 1)
        self.assertEqual(len(g1.updates), 1)
        self.assertEqual(len(g1.auxiliary_variables), 1)

        f = K.function(Z, [a] + g1.auxiliary_variables)

        output = f(np.random.rand(8, 8))
        self.assertEqual(repr(np.sum(output[0]))[:5], "32.20")
        self.assertEqual(np.sum(output[1]), 0)
        self.assertEqual(np.unique(K.eval(X)).tolist(), [12.])
Ejemplo n.º 8
0
def placeholder(shape=None, dtype=floatX, name=None, roles=[]):
  if shape is None and name is None:
    raise ValueError("shape and name arguments cannot be None at the same time.")
  # ====== check duplicated placeholder ====== #
  if name is not None:
    all_placeholders = [
        o._outputs[0] for o in get_all_operations(otype='Placeholder')]
    for v in all_placeholders:
      v_shape = tuple(v.shape.as_list())
      if v.name == name + ':0': # found duplicated variable
        # set new value for variable
        if shape is not None:
          if v_shape == shape:
            return role.add_roles(v, roles)
          else:
            raise ValueError("Pre-defined placeholder with name: %s and"
                " shape: %s, which is different from given shape: %s"
                % (name, v_shape, shape))
        # just get the variable
        else:
          return role.add_roles(v, roles)
  # ====== Modify add name prefix ====== #
  plh = tf.placeholder(dtype=dtype, shape=shape, name=name)
  return role.add_roles(plh, roles)
Ejemplo n.º 9
0
def confusion_matrix(y_true, y_pred, labels=None, normalize=False,
                     name=None):
  """
  Computes the confusion matrix of given vectors containing
  actual observations and predicted observations.

  Parameters
  ----------
  y_true : 1-d or 2-d tensor variable
      true values
  y_pred : 1-d or 2-d tensor variable
      prediction values
  normalize : bool
      if True, normalize each row to [0., 1.]
  labels : array, shape = [nb_classes], int (nb_classes)
      List of labels to index the matrix. This may be used to reorder
      or select a subset of labels.
      If none is given, those that appear at least once
      in ``y_true`` or ``y_pred`` are used in sorted order.

  Note
  ----
  if you want to calculate: Precision, Recall, F1 scores from the
  confusion matrix, set `normalize=False`

  """
  # ====== numpy ndarray ====== #
  if isinstance(y_true, np.ndarray) or isinstance(y_pred, np.ndarray):
    from sklearn.metrics import confusion_matrix as sk_cm
    nb_classes = None
    if y_true.ndim > 1:
      nb_classes = y_true.shape[1]
      y_true = np.argmax(y_true, axis=-1)
    if y_pred.ndim > 1:
      nb_classes = y_pred.shape[1]
      y_pred = np.argmax(y_pred, axis=-1)
    # get number of classes
    if labels is None:
      if nb_classes is None:
        raise RuntimeError("Cannot infer the number of classes for confusion matrix")
      labels = int(nb_classes)
    elif is_number(labels):
      labels = list(range(labels))
    cm = sk_cm(y_true=y_true, y_pred=y_pred, labels=labels)
    if normalize:
      cm = cm.astype('float32') / np.sum(cm, axis=1, keepdims=True)
    return cm
  # ====== tensorflow tensor ====== #
  with tf.name_scope(name, 'confusion_matrix', [y_true, y_pred]):
    from tensorflow.contrib.metrics import confusion_matrix as tf_cm
    nb_classes = None
    if y_true.shape.ndims == 2:
      nb_classes = y_true.shape.as_list()[-1]
      y_true = tf.argmax(y_true, -1)
    elif y_true.shape.ndims != 1:
      raise ValueError('actual must be 1-d or 2-d tensor variable')
    if y_pred.shape.ndims == 2:
      nb_classes = y_pred.shape.as_list()[-1]
      y_pred = tf.argmax(y_pred, -1)
    elif y_pred.shape.ndims != 1:
      raise ValueError('pred must be 1-d or 2-d tensor variable')
    # check valid labels
    if labels is None:
      if nb_classes is None:
        raise RuntimeError("Cannot infer the number of classes for confusion matrix")
      labels = int(nb_classes)
    elif is_number(labels):
      labels = int(labels)
    elif hasattr(labels, '__len__'):
      labels = len(labels)
    # transpose to match the format of sklearn
    cm = tf_cm(labels=y_true, predictions=y_pred,
               num_classes=labels)
    if normalize:
      cm = tf.cast(cm, dtype='float32')
      cm = cm / tf.reduce_sum(cm, axis=1, keep_dims=True)
    return add_roles(cm, ConfusionMatrix)