Пример #1
0
  def call(self, x):
    if not isinstance(x, list):
      input_shape = model_ops.int_shape(x)
    else:
      x = x[0]
      input_shape = model_ops.int_shape(x)
    self.build(input_shape)
    if self.mode == 0 or self.mode == 2:

      reduction_axes = list(range(len(input_shape)))
      del reduction_axes[self.axis]
      broadcast_shape = [1] * len(input_shape)
      broadcast_shape[self.axis] = input_shape[self.axis]

      x_normed, mean, std = model_ops.normalize_batch_in_training(
          x, self.gamma, self.beta, reduction_axes, epsilon=self.epsilon)

      if self.mode == 0:
        self.add_update([
            model_ops.moving_average_update(self.running_mean, mean,
                                            self.momentum),
            model_ops.moving_average_update(self.running_std, std,
                                            self.momentum)
        ], x)

        if sorted(reduction_axes) == range(model_ops.get_ndim(x))[:-1]:
          x_normed_running = tf.nn.batch_normalization(
              x,
              self.running_mean,
              self.running_std,
              self.beta,
              self.gamma,
              epsilon=self.epsilon)
        else:
          # need broadcasting
          broadcast_running_mean = tf.reshape(self.running_mean,
                                              broadcast_shape)
          broadcast_running_std = tf.reshape(self.running_std, broadcast_shape)
          broadcast_beta = tf.reshape(self.beta, broadcast_shape)
          broadcast_gamma = tf.reshape(self.gamma, broadcast_shape)
          x_normed_running = tf.batch_normalization(
              x,
              broadcast_running_mean,
              broadcast_running_std,
              broadcast_beta,
              broadcast_gamma,
              epsilon=self.epsilon)

        # pick the normalized form of x corresponding to the training phase
        x_normed = model_ops.in_train_phase(x_normed, x_normed_running)

    elif self.mode == 1:
      # sample-wise normalization
      m = model_ops.mean(x, axis=-1, keepdims=True)
      std = model_ops.sqrt(
          model_ops.var(x, axis=-1, keepdims=True) + self.epsilon)
      x_normed = (x - m) / (std + self.epsilon)
      x_normed = self.gamma * x_normed + self.beta
    return x_normed
Пример #2
0
    def __init__(self, input_shape=None, batch_input_shape=None,
                 input_dtype=None, input_tensor=None, name=None):
      self.input_spec = None
      self.uses_learning_phase = False
      self.trainable = False
      self.built = True
      self._trainable_weights = []
      self._non_trainable_weights = []
      self.inbound_nodes = []
      self.outbound_nodes = []
      self.constraints = {}

      if not name:
        prefix = 'input'
        # TODO(rbharath): Keras uses a global var here to maintain
        # unique counts. This seems dangerous. How does tensorflow handle?
        name = prefix + '_' + str(model_ops.get_uid(prefix))
      self.name = name

      if input_shape and batch_input_shape:
        raise ValueError('Only provide the input_shape OR '
                         'batch_input_shape argument to '
                         'InputLayer, not both at the same time.')
      if input_tensor is not None:
        # Attempt automatic input shape inference.
        try:
          batch_input_shape = model_ops.int_shape(input_tensor)
        except:
          if not input_shape and not batch_input_shape:
            raise ValueError('InputLayer was provided '
                             'an input_tensor argument, '
                             'but its input shape cannot be '
                             'automatically inferred. '
                             'You should pass an input_shape or '
                             'batch_input_shape argument.')
      if not batch_input_shape:
        if not input_shape:
          raise ValueError('An Input layer should be passed either '
                           'a `batch_input_shape` or an `input_shape`.')
        else:
          batch_input_shape = (None,) + tuple(input_shape)
      else:
        batch_input_shape = tuple(batch_input_shape)

      if not input_dtype:
        if input_tensor is None:
          input_dtype = tf.float32
        else:
          input_dtype = model_ops.get_dtype(input_tensor)

      self.batch_input_shape = batch_input_shape
      self.input_dtype = input_dtype

      if input_tensor is None:
        input_tensor = tf.placeholder(dtype=input_dtype,
                                      shape=batch_input_shape,
                                      name=self.name)
      else:
        input_tensor._keras_shape = batch_input_shape
      # Create an input node to add to self.outbound_node
      # and set output_tensors' _keras_history.
      input_tensor._uses_learning_phase = False
      input_tensor._keras_history = (self, 0, 0)
      Node(self,
           inbound_layers=[],
           node_indices=[],
           tensor_indices=[],
           input_tensors=[input_tensor],
           output_tensors=[input_tensor],
           input_shapes=[batch_input_shape],
           output_shapes=[batch_input_shape])
Пример #3
0
  def __call__(self, x):
    """Wrapper around self.call(), for handling
    internal Keras references.

    If a tensor is passed:
      - We call self.add_inbound_node().
      - If necessary, we `build` the layer to match
          the _keras_shape of the input(s).
      - We update the _keras_shape of every input tensor with
          its new shape (obtained via self.get_output_shape_for).
          This is done as part of add_inbound_node().
      - We update the _keras_history of the output tensor(s)
          with the current layer.
          This is done as part of add_inbound_node().

    Parameters
    ----------
    x: Can be a tensor or list/tuple of tensors.
    """
    if not self.built:
      # Collect input shapes to build layer.
      input_shapes = []
      for x_elem in to_list(x):
        if hasattr(x_elem, '_keras_shape'):
          input_shapes.append(x_elem._keras_shape)
        else:
          input_shapes.append(model_ops.int_shape(x_elem))
      if len(input_shapes) == 1:
        self.build(input_shapes[0])
      else:
        self.build(input_shapes)
      self.built = True

    input_tensors = to_list(x)
    inbound_layers = []
    node_indices = []
    tensor_indices = []
    for input_tensor in input_tensors:
      if hasattr(input_tensor, '_keras_history') and input_tensor._keras_history:
        # This is a tensor.
        previous_layer, node_index, tensor_index = input_tensor._keras_history
        inbound_layers.append(previous_layer)
        node_indices.append(node_index)
        tensor_indices.append(tensor_index)
      else:
        inbound_layers = None
        break

    if inbound_layers:
      # This will call layer.build() if necessary.
      self.add_inbound_node(inbound_layers, node_indices, tensor_indices)
      # Outputs were already computed when calling self.add_inbound_node.
      outputs = self.inbound_nodes[-1].output_tensors
    else:
      # This case appears if the input was not a tensor.
      outputs = to_list(self.call(x))

    # Apply activity regularizer if any:
    if hasattr(self, 'activity_regularizer') and self.activity_regularizer is not None:
      regularization_losses = [self.activity_regularizer(x) for x in outputs]
      self.add_loss(regularization_losses, input_tensors)

    # If single output tensor: return it,
    # else return a list (at least 2 elements).
    if len(outputs) == 1:
      return outputs[0]
    else:
      return outputs
Пример #4
0
    def __init__(self,
                 input_shape=None,
                 batch_input_shape=None,
                 input_dtype=None,
                 input_tensor=None,
                 name=None):
        self.input_spec = None
        self.uses_learning_phase = False
        self.trainable = False
        self.built = True
        self._trainable_weights = []
        self._non_trainable_weights = []
        self.inbound_nodes = []
        self.outbound_nodes = []
        self.constraints = {}

        if not name:
            prefix = 'input'
            # TODO(rbharath): Keras uses a global var here to maintain
            # unique counts. This seems dangerous. How does tensorflow handle?
            name = prefix + '_' + str(model_ops.get_uid(prefix))
        self.name = name

        if input_shape and batch_input_shape:
            raise ValueError('Only provide the input_shape OR '
                             'batch_input_shape argument to '
                             'InputLayer, not both at the same time.')
        if input_tensor is not None:
            # Attempt automatic input shape inference.
            try:
                batch_input_shape = model_ops.int_shape(input_tensor)
            except:
                if not input_shape and not batch_input_shape:
                    raise ValueError('InputLayer was provided '
                                     'an input_tensor argument, '
                                     'but its input shape cannot be '
                                     'automatically inferred. '
                                     'You should pass an input_shape or '
                                     'batch_input_shape argument.')
        if not batch_input_shape:
            if not input_shape:
                raise ValueError('An Input layer should be passed either '
                                 'a `batch_input_shape` or an `input_shape`.')
            else:
                batch_input_shape = (None, ) + tuple(input_shape)
        else:
            batch_input_shape = tuple(batch_input_shape)

        if not input_dtype:
            if input_tensor is None:
                input_dtype = tf.float32
            else:
                input_dtype = model_ops.get_dtype(input_tensor)

        self.batch_input_shape = batch_input_shape
        self.input_dtype = input_dtype

        if input_tensor is None:
            input_tensor = tf.placeholder(dtype=input_dtype,
                                          shape=batch_input_shape,
                                          name=self.name)
        else:
            input_tensor._keras_shape = batch_input_shape
        # Create an input node to add to self.outbound_node
        # and set output_tensors' _keras_history.
        input_tensor._uses_learning_phase = False
        input_tensor._keras_history = (self, 0, 0)
        Node(self,
             inbound_layers=[],
             node_indices=[],
             tensor_indices=[],
             input_tensors=[input_tensor],
             output_tensors=[input_tensor],
             input_shapes=[batch_input_shape],
             output_shapes=[batch_input_shape])
Пример #5
0
    def __call__(self, x):
        """Wrapper around self.call(), for handling
    internal Keras references.

    If a tensor is passed:
      - We call self.add_inbound_node().
      - If necessary, we `build` the layer to match
          the _keras_shape of the input(s).
      - We update the _keras_shape of every input tensor with
          its new shape (obtained via self.get_output_shape_for).
          This is done as part of add_inbound_node().
      - We update the _keras_history of the output tensor(s)
          with the current layer.
          This is done as part of add_inbound_node().

    Parameters
    ----------
    x: Can be a tensor or list/tuple of tensors.
    """
        if not self.built:
            # Collect input shapes to build layer.
            input_shapes = []
            for x_elem in to_list(x):
                if hasattr(x_elem, '_keras_shape'):
                    input_shapes.append(x_elem._keras_shape)
                else:
                    input_shapes.append(model_ops.int_shape(x_elem))
            if len(input_shapes) == 1:
                self.build(input_shapes[0])
            else:
                self.build(input_shapes)
            self.built = True

        input_tensors = to_list(x)
        inbound_layers = []
        node_indices = []
        tensor_indices = []
        for input_tensor in input_tensors:
            if hasattr(input_tensor,
                       '_keras_history') and input_tensor._keras_history:
                # This is a tensor.
                previous_layer, node_index, tensor_index = input_tensor._keras_history
                inbound_layers.append(previous_layer)
                node_indices.append(node_index)
                tensor_indices.append(tensor_index)
            else:
                inbound_layers = None
                break

        if inbound_layers:
            # This will call layer.build() if necessary.
            self.add_inbound_node(inbound_layers, node_indices, tensor_indices)
            # Outputs were already computed when calling self.add_inbound_node.
            outputs = self.inbound_nodes[-1].output_tensors
        else:
            # This case appears if the input was not a tensor.
            outputs = to_list(self.call(x))

        # Apply activity regularizer if any:
        if hasattr(self, 'activity_regularizer'
                   ) and self.activity_regularizer is not None:
            regularization_losses = [
                self.activity_regularizer(x) for x in outputs
            ]
            self.add_loss(regularization_losses, input_tensors)

        # If single output tensor: return it,
        # else return a list (at least 2 elements).
        if len(outputs) == 1:
            return outputs[0]
        else:
            return outputs
Пример #6
0
    def call(self, x, mask=None):
        """Execute this layer on input tensors.

        This layer is meant to be executed on a Graph. So x is expected to
        be a list of placeholders, with the first placeholder the list of
        atom_features (learned or input) at this level, the second the deg_slice,
        the third the membership, and the remaining the deg_adj_lists.

        Visually

        x = [atom_features, deg_slice, membership, deg_adj_list placeholders...]

        Parameters
        ----------
        x: list
          list of Tensors of form described above.
        mask: bool, optional
          Ignored. Present only to shadow superclass call() method.

        Returns
        -------
        atom_features: tf.Tensor
          Of shape (n_atoms, nb_filter)
        """
        # Add trainable weights
        # self.build()

        # Extract atom_features
        atom_features_ori = x[0]

        # Extract graph topology
        deg_slice, membership, deg_adj_lists = x[1], x[2], x[3:]
        training = x[-2]

        # Perform the mol conv
        atom_features, gather_feature = graph_conv(atom_features_ori, deg_adj_lists, deg_slice,
                                   self.max_deg, self.min_deg, self.W_list,
                                   self.b_list, membership, self.batch_size)

        atom_features = self.activation(atom_features)
        gather_feature = self.activation(gather_feature)

        xx = atom_features
        yy = gather_feature
        if not isinstance(xx, list):
            input_shape = model_ops.int_shape(xx)
        else:
            xx = xx[0]
            input_shape = model_ops.int_shape(xx)
        self.build_bn(input_shape)

        m = model_ops.mean(xx, axis=-1, keepdims=True)
        std = model_ops.sqrt(
            model_ops.var(xx, axis=-1, keepdims=True) + self.epsilon)
        x_normed = (xx - m) / (std + self.epsilon)
        x_normed = self.gamma * x_normed + self.beta
        m_1 = model_ops.mean(yy, axis=-1, keepdims=True)
        std_1 = model_ops.sqrt(
            model_ops.var(yy, axis=-1, keepdims=True) + self.epsilon)
        y_normed = (yy - m_1) / (std_1 + self.epsilon)
        y_normed = self.gamma * y_normed + self.beta

        atom_features = x_normed
        gather_norm = gather_node(x_normed, membership, self.batch_size)
        gather = tf.convert_to_tensor(gather_norm, dtype=tf.float32)

        if self.dropout is not None:
            atom_features = training * tf.nn.dropout(atom_features, 1-self.dropout) + (1 -training) * atom_features
            gather = training * tf.nn.dropout(gather_feature, 1-self.dropout) + (1 -training) * gather_feature
        return atom_features, y_normed, gather