Ejemplo n.º 1
0
    def call(self, inputs, **kwargs):
        input_shape = K.int_shape(inputs)
        # Prepare broadcasting shape.
        ndim = len(input_shape)
        reduction_axes = list(range(len(input_shape)))
        del reduction_axes[self.axis]
        broadcast_shape = [1] * len(input_shape)
        broadcast_shape[self.axis] = input_shape[self.axis]

        reshape_group_shape = list(input_shape)
        reshape_group_shape[self.axis] = input_shape[self.axis] // self.groups
        group_shape = [tf.shape(inputs)[0], self.groups]
        group_shape.extend(reshape_group_shape[1:])
        group_shape = [n if n is not None else -1 for n in group_shape]
        group_reduction_axes = list(range(len(group_shape)))

        # Determines whether broadcasting is needed.
        needs_broadcasting = sorted(reduction_axes) != list(range(ndim))[:-1]

        inputs = K.reshape(inputs, group_shape)

        mean = K.mean(inputs, axis=group_reduction_axes[2:], keepdims=True)
        variance = K.var(inputs, axis=group_reduction_axes[2:], keepdims=True)

        inputs = (inputs - mean) / (K.sqrt(variance + self.epsilon))

        original_shape = [tf.shape(inputs)[0]] + list(input_shape[1:])
        original_shape = [n if n is not None else -1 for n in original_shape]
        inputs = K.reshape(inputs, original_shape)

        if needs_broadcasting:
            outputs = inputs

            # In this case we must explicitly broadcast all parameters.
            if self.scale:
                broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
                outputs = outputs * broadcast_gamma

            if self.center:
                broadcast_beta = K.reshape(self.beta, broadcast_shape)
                outputs = outputs + broadcast_beta
        else:
            outputs = inputs

            if self.scale:
                outputs = outputs * self.gamma

            if self.center:
                outputs = outputs + self.beta

        return outputs
Ejemplo n.º 2
0
    def call(self, inputs, **kwargs):
        input_shape = K.int_shape(inputs)
        tensor_input_shape = K.shape(inputs)

        # Prepare broadcasting shape.
        reduction_axes = list(range(len(input_shape)))
        del reduction_axes[self.axis]
        broadcast_shape = [1] * len(input_shape)
        broadcast_shape[self.axis] = input_shape[self.axis] // self.groups
        broadcast_shape.insert(1, self.groups)

        reshape_group_shape = K.shape(inputs)
        group_axes = [reshape_group_shape[i] for i in range(len(input_shape))]
        group_axes[self.axis] = input_shape[self.axis] // self.groups
        group_axes.insert(1, self.groups)

        # reshape inputs to new group shape
        group_shape = [group_axes[0], self.groups] + group_axes[2:]
        group_shape = K.stack(group_shape)
        inputs = K.reshape(inputs, group_shape)

        group_reduction_axes = list(range(len(group_axes)))
        group_reduction_axes = group_reduction_axes[2:]

        mean = K.mean(inputs, axis=group_reduction_axes, keepdims=True)
        variance = K.var(inputs, axis=group_reduction_axes, keepdims=True)

        inputs = (inputs - mean) / (K.sqrt(variance + self.epsilon))

        # prepare broadcast shape
        inputs = K.reshape(inputs, group_shape)
        outputs = inputs

        # In this case we must explicitly broadcast all parameters.
        if self.scale:
            broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
            outputs = outputs * broadcast_gamma

        if self.center:
            broadcast_beta = K.reshape(self.beta, broadcast_shape)
            outputs = outputs + broadcast_beta

        outputs = K.reshape(outputs, tensor_input_shape)

        return outputs
Ejemplo n.º 3
0
def apply_local_cmvn(feats, epsilon=1e-9):
    ''' feats: (NHWC) '''
    mean = tf.expand_dims(keras_backend.mean(feats, axis=1), axis=1)
    var = tf.expand_dims(keras_backend.var(feats, axis=1), axis=1)
    feats = (feats - mean) * tf.rsqrt(var + epsilon)
    return feats
Ejemplo n.º 4
0
 def metric_var_y(_y_true_unused, _y_pred_unused):
     return K.mean(K.var(representation_layer_yx))
Ejemplo n.º 5
0
    def call(self, x, mask=None):
        """Execute this layer on input tensors.

        This layer is meant to be executed on a Graph. So x is expected to
        be a list of placeholders, with the first placeholder the list of
        atom_features (learned or input) at this level, the second the deg_slice,
        the third the membership, and the remaining the deg_adj_lists.

        Visually

        x = [atom_features, deg_slice, membership, deg_adj_list placeholders...]

        Parameters
        ----------
        x: list
          list of Tensors of form described above.
        mask: bool, optional
          Ignored. Present only to shadow superclass call() method.

        Returns
        -------
        atom_features: tf.Tensor
          Of shape (n_atoms, nb_filter)
        """
        # Add trainable weights
        # self.build()

        # Extract atom_features
        atom_features_ori = x[0]

        # Extract graph topology
        deg_slice, membership, deg_adj_lists = x[1], x[2], x[3:]
        training = x[-2]

        # Perform the mol conv
        atom_features, gather_feature = graph_conv(
            atom_features_ori, deg_adj_lists, deg_slice, self.max_deg,
            self.min_deg, self.W_list, self.b_list, self.gather_W_list,
            self.gather_b_list, membership, self.batch_size)

        atom_features = self.activation(atom_features)
        gather_feature = self.activation(gather_feature)

        xx = atom_features
        yy = gather_feature
        if not isinstance(xx, list):
            input_shape = model_ops.int_shape(xx)
        else:
            xx = xx[0]
            input_shape = model_ops.int_shape(xx)

        m = model_ops.mean(xx, axis=-1, keepdims=True)
        std = model_ops.sqrt(
            model_ops.var(xx, axis=-1, keepdims=True) + self.epsilon)
        x_normed = (xx - m) / (std + self.epsilon)
        x_normed = self.gamma * x_normed + self.beta
        m_1 = model_ops.mean(yy, axis=-1, keepdims=True)
        std_1 = model_ops.sqrt(
            model_ops.var(yy, axis=-1, keepdims=True) + self.epsilon)
        y_normed = (yy - m_1) / (std_1 + self.epsilon)
        y_normed = self.gamma * y_normed + self.beta

        atom_features = x_normed
        gather_norm = gather_node(x_normed, membership, self.batch_size)
        gather = tf.convert_to_tensor(gather_norm, dtype=tf.float32)

        if self.dropout is not None:
            atom_features = training * tf.nn.dropout(
                atom_features,
                1 - self.dropout) + (1 - training) * atom_features
            gather = training * tf.nn.dropout(
                gather_feature,
                1 - self.dropout) + (1 - training) * gather_feature
        return [atom_features, y_normed, gather]
Ejemplo n.º 6
0
    def call(self, inputs, training=None):
        input_shape = K.int_shape(inputs)

        # Prepare broadcasting shape.
        reduction_axes = list(range(len(input_shape)))
        del reduction_axes[self.axis]

        if self.axis != 0:
            del reduction_axes[0]

        broadcast_shape = [1] * len(input_shape)
        broadcast_shape[self.axis] = input_shape[self.axis]

        mean_instance = K.mean(inputs, reduction_axes, keepdims=True)
        variance_instance = K.var(inputs, reduction_axes, keepdims=True)

        mean_layer = K.mean(mean_instance, self.axis, keepdims=True)
        temp = variance_instance + K.square(mean_instance)
        variance_layer = K.mean(temp, self.axis,
                                keepdims=True) - K.square(mean_layer)

        def training_phase():
            mean_batch = K.mean(mean_instance, axis=0, keepdims=True)
            variance_batch = K.mean(temp, axis=0,
                                    keepdims=True) - K.square(mean_batch)

            mean_batch_reshaped = K.flatten(mean_batch)
            variance_batch_reshaped = K.flatten(variance_batch)

            if K.backend() != 'cntk':
                sample_size = K.prod(
                    [K.shape(inputs)[axis] for axis in reduction_axes])
                sample_size = K.cast(sample_size, dtype=K.dtype(inputs))

                # sample variance - unbiased estimator of population variance
                variance_batch_reshaped *= sample_size / (sample_size -
                                                          (1.0 + self.epsilon))

            self.add_update([
                K.moving_average_update(self.moving_mean, mean_batch_reshaped,
                                        self.momentum),
                K.moving_average_update(self.moving_variance,
                                        variance_batch_reshaped, self.momentum)
            ], inputs)

            return normalize_func(mean_batch, variance_batch)

        def inference_phase():
            mean_batch = self.moving_mean
            variance_batch = self.moving_variance

            return normalize_func(mean_batch, variance_batch)

        def normalize_func(mean_batch, variance_batch):
            mean_batch = K.reshape(mean_batch, broadcast_shape)
            variance_batch = K.reshape(variance_batch, broadcast_shape)

            mean_weights = K.softmax(self.mean_weights, axis=0)
            variance_weights = K.softmax(self.variance_weights, axis=0)

            mean = (mean_weights[0] * mean_instance +
                    mean_weights[1] * mean_layer +
                    mean_weights[2] * mean_batch)

            variance = (variance_weights[0] * variance_instance +
                        variance_weights[1] * variance_layer +
                        variance_weights[2] * variance_batch)

            outputs = (inputs - mean) / (K.sqrt(variance + self.epsilon))

            if self.scale:
                broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
                outputs = outputs * broadcast_gamma

            if self.center:
                broadcast_beta = K.reshape(self.beta, broadcast_shape)
                outputs = outputs + broadcast_beta

            return outputs

        if training in {0, False}:
            return inference_phase()

        return K.in_train_phase(training_phase,
                                inference_phase,
                                training=training)