Beispiel #1
0
  def call(self, x):
    if not isinstance(x, list):
      input_shape = model_ops.int_shape(x)
    else:
      x = x[0]
      input_shape = model_ops.int_shape(x)
    self.build(input_shape)
    if self.mode == 0 or self.mode == 2:

      reduction_axes = list(range(len(input_shape)))
      del reduction_axes[self.axis]
      broadcast_shape = [1] * len(input_shape)
      broadcast_shape[self.axis] = input_shape[self.axis]

      x_normed, mean, std = model_ops.normalize_batch_in_training(
          x, self.gamma, self.beta, reduction_axes, epsilon=self.epsilon)

      if self.mode == 0:
        self.add_update([
            model_ops.moving_average_update(self.running_mean, mean,
                                            self.momentum),
            model_ops.moving_average_update(self.running_std, std,
                                            self.momentum)
        ], x)

        if sorted(reduction_axes) == range(model_ops.get_ndim(x))[:-1]:
          x_normed_running = tf.nn.batch_normalization(
              x,
              self.running_mean,
              self.running_std,
              self.beta,
              self.gamma,
              epsilon=self.epsilon)
        else:
          # need broadcasting
          broadcast_running_mean = tf.reshape(self.running_mean,
                                              broadcast_shape)
          broadcast_running_std = tf.reshape(self.running_std, broadcast_shape)
          broadcast_beta = tf.reshape(self.beta, broadcast_shape)
          broadcast_gamma = tf.reshape(self.gamma, broadcast_shape)
          x_normed_running = tf.batch_normalization(
              x,
              broadcast_running_mean,
              broadcast_running_std,
              broadcast_beta,
              broadcast_gamma,
              epsilon=self.epsilon)

        # pick the normalized form of x corresponding to the training phase
        x_normed = model_ops.in_train_phase(x_normed, x_normed_running)

    elif self.mode == 1:
      # sample-wise normalization
      m = model_ops.mean(x, axis=-1, keepdims=True)
      std = model_ops.sqrt(
          model_ops.var(x, axis=-1, keepdims=True) + self.epsilon)
      x_normed = (x - m) / (std + self.epsilon)
      x_normed = self.gamma * x_normed + self.beta
    return x_normed
def squared_hinge(y_true, y_pred):
    return model_ops.mean(tf.square(tf.maximum(1. - y_true * y_pred, 0.)),
                          axis=-1)
def mean_squared_logarithmic_error(y_true, y_pred):
    first_log = tf.log(model_ops.clip(y_pred, model_ops.epsilon(), None) + 1.)
    second_log = tf.log(model_ops.clip(y_true, model_ops.epsilon(), None) + 1.)
    return model_ops.mean(tf.square(first_log - second_log), axis=-1)
def mean_absolute_percentage_error(y_true, y_pred):
    diff = tf.abs((y_true - y_pred) /
                  model_ops.clip(tf.abs(y_true), model_ops.epsilon(), None))
    return 100. * model_ops.mean(diff, axis=-1)
def mean_absolute_error(y_true, y_pred):
    return model_ops.mean(tf.abs(y_pred - y_true), axis=-1)
def mean_squared_error(y_true, y_pred):
    return model_ops.mean(tf.square(y_pred - y_true), axis=-1)
Beispiel #7
0
def mean_absolute_error(y_true, y_pred):
  return model_ops.mean(tf.abs(y_pred - y_true), axis=-1)
def poisson(y_true, y_pred):
    return model_ops.mean(y_pred -
                          y_true * tf.log(y_pred + model_ops.epsilon()),
                          axis=-1)
Beispiel #9
0
def cosine_proximity(y_true, y_pred):
  y_true = model_ops.l2_normalize(y_true, axis=-1)
  y_pred = model_ops.l2_normalize(y_pred, axis=-1)
  return -model_ops.mean(y_true * y_pred, axis=-1)
Beispiel #10
0
def poisson(y_true, y_pred):
  return model_ops.mean(
      y_pred - y_true * tf.log(y_pred + model_ops.epsilon()), axis=-1)
Beispiel #11
0
def binary_crossentropy(y_true, y_pred):
  return model_ops.mean(model_ops.binary_crossentropy(y_pred, y_true), axis=-1)
Beispiel #12
0
def hinge(y_true, y_pred):
  return model_ops.mean(tf.maximum(1. - y_true * y_pred, 0.), axis=-1)
Beispiel #13
0
def squared_hinge(y_true, y_pred):
  return model_ops.mean(
      tf.square(tf.maximum(1. - y_true * y_pred, 0.)), axis=-1)
Beispiel #14
0
def mean_squared_logarithmic_error(y_true, y_pred):
  first_log = tf.log(model_ops.clip(y_pred, model_ops.epsilon(), None) + 1.)
  second_log = tf.log(model_ops.clip(y_true, model_ops.epsilon(), None) + 1.)
  return model_ops.mean(tf.square(first_log - second_log), axis=-1)
Beispiel #15
0
def mean_absolute_percentage_error(y_true, y_pred):
  diff = tf.abs((y_true - y_pred) / model_ops.clip(
      tf.abs(y_true), model_ops.epsilon(), None))
  return 100. * model_ops.mean(diff, axis=-1)
def hinge(y_true, y_pred):
    return model_ops.mean(tf.maximum(1. - y_true * y_pred, 0.), axis=-1)
def binary_crossentropy(y_true, y_pred):
    return model_ops.mean(model_ops.binary_crossentropy(y_pred, y_true),
                          axis=-1)
Beispiel #18
0
    def call(self, x, mask=None):
        """Execute this layer on input tensors.

        This layer is meant to be executed on a Graph. So x is expected to
        be a list of placeholders, with the first placeholder the list of
        atom_features (learned or input) at this level, the second the deg_slice,
        the third the membership, and the remaining the deg_adj_lists.

        Visually

        x = [atom_features, deg_slice, membership, deg_adj_list placeholders...]

        Parameters
        ----------
        x: list
          list of Tensors of form described above.
        mask: bool, optional
          Ignored. Present only to shadow superclass call() method.

        Returns
        -------
        atom_features: tf.Tensor
          Of shape (n_atoms, nb_filter)
        """
        # Add trainable weights
        # self.build()

        # Extract atom_features
        atom_features_ori = x[0]

        # Extract graph topology
        deg_slice, membership, deg_adj_lists = x[1], x[2], x[3:]
        training = x[-2]

        # Perform the mol conv
        atom_features, gather_feature = graph_conv(atom_features_ori, deg_adj_lists, deg_slice,
                                   self.max_deg, self.min_deg, self.W_list,
                                   self.b_list, membership, self.batch_size)

        atom_features = self.activation(atom_features)
        gather_feature = self.activation(gather_feature)

        xx = atom_features
        yy = gather_feature
        if not isinstance(xx, list):
            input_shape = model_ops.int_shape(xx)
        else:
            xx = xx[0]
            input_shape = model_ops.int_shape(xx)
        self.build_bn(input_shape)

        m = model_ops.mean(xx, axis=-1, keepdims=True)
        std = model_ops.sqrt(
            model_ops.var(xx, axis=-1, keepdims=True) + self.epsilon)
        x_normed = (xx - m) / (std + self.epsilon)
        x_normed = self.gamma * x_normed + self.beta
        m_1 = model_ops.mean(yy, axis=-1, keepdims=True)
        std_1 = model_ops.sqrt(
            model_ops.var(yy, axis=-1, keepdims=True) + self.epsilon)
        y_normed = (yy - m_1) / (std_1 + self.epsilon)
        y_normed = self.gamma * y_normed + self.beta

        atom_features = x_normed
        gather_norm = gather_node(x_normed, membership, self.batch_size)
        gather = tf.convert_to_tensor(gather_norm, dtype=tf.float32)

        if self.dropout is not None:
            atom_features = training * tf.nn.dropout(atom_features, 1-self.dropout) + (1 -training) * atom_features
            gather = training * tf.nn.dropout(gather_feature, 1-self.dropout) + (1 -training) * gather_feature
        return atom_features, y_normed, gather
def cosine_proximity(y_true, y_pred):
    y_true = model_ops.l2_normalize(y_true, axis=-1)
    y_pred = model_ops.l2_normalize(y_pred, axis=-1)
    return -model_ops.mean(y_true * y_pred, axis=-1)
Beispiel #20
0
def mean_squared_error(y_true, y_pred):
  return model_ops.mean(tf.square(y_pred - y_true), axis=-1)