def single_mode_dot(A, B):
    a_sparse = K.is_sparse(A)
    b_sparse = K.is_sparse(B)
    if a_sparse and b_sparse:
        raise ValueError('Sparse x Sparse matmul is not implemented yet.')
    elif a_sparse:
        output = tf.sparse_tensor_dense_matmul(A, B)
    elif b_sparse:
        output = transpose(
            tf.sparse_tensor_dense_matmul(transpose(B), transpose(A)))
    else:
        output = tf.matmul(A, B)

    return output
Example #2
0
    def test_sparse_concat(self):
        x_d = np.array([0, 7, 2, 3], dtype=np.float32)
        x_r = np.array([0, 2, 2, 3], dtype=np.int64)
        x_c = np.array([4, 3, 2, 3], dtype=np.int64)

        x_sparse_1 = sparse.csr_matrix((x_d, (x_r, x_c)), shape=(4, 5))

        x_d = np.array([0, 7, 2, 3], dtype=np.float32)
        x_r = np.array([0, 2, 2, 3], dtype=np.int64)
        x_c = np.array([4, 3, 2, 3], dtype=np.int64)

        x_sparse_2 = sparse.csr_matrix((x_d, (x_r, x_c)), shape=(4, 5))

        x_dense_1 = x_sparse_1.toarray()
        x_dense_2 = x_sparse_2.toarray()

        backends = [KTF]
        if KTH.th_sparse_module:
            # Theano has some dependency issues for sparse
            backends.append(KTH)

        for K in backends:
            k_s = K.concatenate(
                [K.variable(x_sparse_1),
                 K.variable(x_sparse_2)])
            assert K.is_sparse(k_s)

            k_s_d = K.eval(k_s)

            k_d = K.eval(
                K.concatenate([K.variable(x_dense_1),
                               K.variable(x_dense_2)]))

            assert k_s_d.shape == k_d.shape
            assert_allclose(k_s_d, k_d, atol=1e-05)
Example #3
0
    def test_sparse_concat(self):
        x_d = np.array([0, 7, 2, 3], dtype=np.float32)
        x_r = np.array([0, 2, 2, 3], dtype=np.int64)
        x_c = np.array([4, 3, 2, 3], dtype=np.int64)

        x_sparse_1 = sparse.csr_matrix((x_d, (x_r, x_c)), shape=(4, 5))

        x_d = np.array([0, 7, 2, 3], dtype=np.float32)
        x_r = np.array([0, 2, 2, 3], dtype=np.int64)
        x_c = np.array([4, 3, 2, 3], dtype=np.int64)

        x_sparse_2 = sparse.csr_matrix((x_d, (x_r, x_c)), shape=(4, 5))

        x_dense_1 = x_sparse_1.toarray()
        x_dense_2 = x_sparse_2.toarray()

        backends = [KTF]
        if KTH.th_sparse_module:
            # Theano has some dependency issues for sparse
            backends.append(KTH)

        for K in backends:
            k_s = K.concatenate([K.variable(x_sparse_1), K.variable(x_sparse_2)])
            assert K.is_sparse(k_s)

            k_s_d = K.eval(k_s)

            k_d = K.eval(K.concatenate([K.variable(x_dense_1), K.variable(x_dense_2)]))

            assert k_s_d.shape == k_d.shape
            assert_allclose(k_s_d, k_d, atol=1e-05)
Example #4
0
    def __call__(self, inputs):

        current_step = K.eval(self.global_step)
        run_summary = ((self.summary_frequency > 0)
                       and (current_step % self.summary_frequency == 0)
                       and (self.summary_writer is not None))

        if not isinstance(inputs, (list, tuple)):
            raise TypeError('`inputs` should be a list or tuple.')
        feed_dict = {}
        for tensor, value in zip(self.inputs, inputs):
            if K.is_sparse(tensor):
                sparse_coo = value.tocoo()
                indices = numpy.concatenate((numpy.expand_dims(
                    sparse_coo.row, 1), numpy.expand_dims(sparse_coo.col, 1)),
                                            1)
                value = (indices, sparse_coo.data, sparse_coo.shape)
            feed_dict[tensor] = value

        fetches = self.outputs + [self.updates_op]
        if run_summary:
            fetches += [self.summary_operation]

        session = K.get_session()
        returned_fetches = session.run(fetches, feed_dict=feed_dict)
        if run_summary:
            self.summary_writer.add_summary(returned_fetches[-1], current_step)

        return returned_fetches[:len(self.outputs)]
def matmul_AT_B_A(A, B):
    mode = autodetect_mode(A, B)
    if mode == modes['S']:
        # Single (rank(A)=2, rank(B)=2)
        output = single_mode_dot(single_mode_dot(transpose(A), B), A)
    elif mode == modes['M']:
        # Mixed (rank(A)=2, rank(B)=3)
        output = mixed_mode_dot(transpose(A), B)
        if K.is_sparse(A):
            output = transpose(
                mixed_mode_dot(transpose(A), transpose(output, (0, 2, 1))),
                (0, 2, 1))
        else:
            output = K.dot(output, A)
    elif mode == modes['iM']:
        # Inverted mixed (rank(A)=3, rank(B)=2)
        # Works only with dense tensors
        output = mixed_mode_dot(B, A)
        output = K.batch_dot(transpose(A, (0, 2, 1)), output)
    elif mode == modes['B']:
        # Batch (rank(A)=3, rank(B)=3)
        # Works only with dense tensors
        output = K.batch_dot(K.batch_dot(transpose(A, (0, 2, 1)), B), A)
    else:
        raise ValueError('A and B must have rank 2 or 3.')

    return output
def reshape(A, shape=None, name=None):
    if K.is_sparse(A):
        reshape_op = tf.sparse.reshape
    else:
        reshape_op = tf.reshape

    return reshape_op(A, shape=shape, name=name)
Example #7
0
def matmul_AT_B_A(A, B):
    """
    Computes A.T * B * A, dealing with sparsity and single/batch/mixed modes
    automatically. Mixed mode multiplication also works when A has rank 3 and
    B has rank 2. Sparse multiplication does not work with batch mode.
    :param A: Tensor or SparseTensor with rank 2 or 3.
    :param B: Tensor or SparseTensor with rank 2 or 3.
    :return:
    """
    mode = autodetect_mode(A, B)
    if mode == modes['S']:
        # Single (rank(A)=2, rank(B)=2)
        output = single_mode_dot(single_mode_dot(transpose(A), B), A)
    elif mode == modes['M']:
        # Mixed (rank(A)=2, rank(B)=3)
        output = mixed_mode_dot(transpose(A), B)
        if K.is_sparse(A):
            output = transpose(
                mixed_mode_dot(transpose(A), transpose(output, (0, 2, 1))),
                (0, 2, 1))
        else:
            output = K.dot(output, A)
    elif mode == modes['iM']:
        # Inverted mixed (rank(A)=3, rank(B)=2)
        # Works only with dense tensors
        output = mixed_mode_dot(B, A)
        output = K.batch_dot(transpose(A, (0, 2, 1)), output)
    elif mode == modes['B']:
        # Batch (rank(A)=3, rank(B)=3)
        # Works only with dense tensors
        output = K.batch_dot(K.batch_dot(transpose(A, (0, 2, 1)), B), A)
    else:
        raise ValueError('A and B must have rank 2 or 3.')

    return output
def degrees(A):
    if K.is_sparse(A):
        D = tf.sparse.reduce_sum(A, axis=-1)
    else:
        D = tf.reduce_sum(A, axis=-1)

    return D
def transpose(A, perm=None, name=None):
    if K.is_sparse(A):
        transpose_op = tf.sparse.transpose
    else:
        transpose_op = tf.transpose

    if perm is None:
        perm = (1, 0)  # Make explicit so that shape will always be preserved
    return transpose_op(A, perm=perm, name=name)
Example #10
0
    def test_sparse_concat_partial_dense(self):
        test_sparse_matrix_1 = self.generate_test_sparse_matrix()
        test_sparse_matrix_2 = self.generate_test_sparse_matrix()

        assert K.is_sparse(K.variable(test_sparse_matrix_1))
        assert K.is_sparse(K.variable(test_sparse_matrix_2))

        test_dense_matrix_1 = test_sparse_matrix_1.toarray()
        test_dense_matrix_2 = test_sparse_matrix_2.toarray()

        k_s = K.concatenate(tensors=[K.variable(test_sparse_matrix_1), K.variable(test_dense_matrix_2)], axis=0)
        k_s_d = K.eval(k_s)

        # mx.sym.sparse.concat only supported for axis=0
        k_d = K.eval(K.concatenate(tensors=[K.variable(test_dense_matrix_1), K.variable(test_dense_matrix_2)], axis=0))

        assert k_s_d.shape == k_d.shape
        assert_allclose(k_s_d, k_d, atol=1e-05)
def expand(tensor):
  rank = ndims(tensor)
  if K.is_sparse(tensor):
    new_shape = expand_idxs(tf.shape(tensor.indices)[0:1], 0, (1,))

    new_idxs = expand_idxs(tensor.indices, rank-1, new_shape)
    new_shape = expand_idxs(tensor.shape, 0, (1,))
    return tf.SparseTensor(indices=new_idxs, values=tensor.values, shape=new_shape)
  else:
    return tf.expand_dims(tensor, rank)
Example #12
0
    def call(self, inputs):
        if len(inputs) == 3:
            X, A, I = inputs
            self.data_mode = 'graph'
        else:
            X, A = inputs
            I = tf.zeros(tf.shape(X)[:1], dtype=tf.int32)
            self.data_mode = 'single'
        if K.ndim(I) == 2:
            I = I[:, 0]

        A_is_sparse = K.is_sparse(A)

        # Get mask
        y = K.dot(X, self.kernel)
        y = filter_dot(A, y)
        N = K.shape(X)[-2]
        indices = ops.segment_top_k(y[:, 0], I, self.ratio, self.top_k_var)
        mask = tf.scatter_nd(tf.expand_dims(indices, 1), tf.ones_like(indices),
                             (N, ))

        # Multiply X and y to make layer differentiable
        features = X * self.gating_op(y)

        axis = 0 if len(K.int_shape(
            A)) == 2 else 1  # Cannot use negative axis in tf.boolean_mask
        # Reduce X
        X_pooled = tf.boolean_mask(features, mask, axis=axis)

        # Compute A^2
        if A_is_sparse:
            A_dense = tf.sparse.to_dense(A)
        else:
            A_dense = A
        A_squared = K.dot(A, A_dense)

        # Reduce A
        A_pooled = tf.boolean_mask(A_squared, mask, axis=axis)
        A_pooled = tf.boolean_mask(A_pooled, mask, axis=axis + 1)
        if A_is_sparse:
            A_pooled = tf.contrib.layers.dense_to_sparse(A_pooled)

        output = [X_pooled, A_pooled]

        # Reduce I
        if self.data_mode == 'graph':
            I_pooled = tf.boolean_mask(I[:, None], mask)[:, 0]
            output.append(I_pooled)

        if self.return_mask:
            output.append(mask)

        return output
Example #13
0
def single_mode_dot(A, B):
    """
    Dot product between two rank 2 matrices. Deals automatically with either A
    or B being sparse.
    :param A: rank 2 Tensor or SparseTensor.
    :param B: rank 2 Tensor or SparseTensor.
    :return: rank 2 Tensor or SparseTensor.
    """
    a_sparse = K.is_sparse(A)
    b_sparse = K.is_sparse(B)
    if a_sparse and b_sparse:
        raise ValueError('Sparse x Sparse matmul is not implemented yet.')
    elif a_sparse:
        output = tf.sparse_tensor_dense_matmul(A, B)
    elif b_sparse:
        output = transpose(
            tf.sparse_tensor_dense_matmul(transpose(B), transpose(A)))
    else:
        output = tf.matmul(A, B)

    return output
Example #14
0
def degrees(A):
    """
    Computes the degrees of each node in A, dealing with sparse A and batch mode
    automatically.
    :param A: Tensor or SparseTensor with rank k = {2, 3}.
    :return: Tensor or SparseTensor of rank k - 1.
    """
    if K.is_sparse(A):
        D = tf.sparse.reduce_sum(A, axis=-1)
    else:
        D = tf.reduce_sum(A, axis=-1)

    return D
Example #15
0
    def test_sparse_mean_axis_none(self):
        test_sparse_matrix = self.generate_test_sparse_matrix()
        test_dense_matrix = test_sparse_matrix.toarray()

        sparse_var = K.variable(test_sparse_matrix)
        dense_var = K.variable(test_dense_matrix)

        k_s = K.eval(K.mean(sparse_var))
        k_d = K.eval(K.mean(dense_var))

        assert K.is_sparse(sparse_var)
        assert k_s.shape == k_d.shape
        assert_allclose(k_s, k_d, atol=1e-05)
Example #16
0
def reshape(A, shape=None, name=None):
    """
    Reshapes A according to shape, dealing with sparse A automatically.
    :param A: Tensor or SparseTensor.
    :param shape: new shape.
    :param name: name for the operation.
    :return: Tensor or SparseTensor.
    """
    if K.is_sparse(A):
        reshape_op = tf.sparse.reshape
    else:
        reshape_op = tf.reshape

    return reshape_op(A, shape=shape, name=name)
Example #17
0
def transpose(A, perm=None, name=None):
    """
    Transposes A according to perm, dealing with sparse A automatically.
    :param A: Tensor or SparseTensor with rank k.
    :param perm: permutation indices of size k.
    :param name: name for the operation.
    :return: Tensor or SparseTensor with rank k.
    """
    if K.is_sparse(A):
        transpose_op = tf.sparse_transpose
    else:
        transpose_op = tf.transpose

    return transpose_op(A, perm=perm, name=name)
Example #18
0
def transpose(A, perm=None, name=None):
    """
    Transposes A according to perm, dealing with sparse A automatically.
    :param A: Tensor or SparseTensor with rank k.
    :param perm: permutation indices of size k.
    :param name: name for the operation.
    :return: Tensor or SparseTensor with rank k.
    """
    if K.is_sparse(A):
        transpose_op = tf.sparse.transpose
    else:
        transpose_op = tf.transpose

    if perm is None:
        perm = (1, 0)  # Make explicit so that shape will always be preserved
    return transpose_op(A, perm=perm, name=name)
def matrix_power(x, k):
    if K.ndim(x) != 2:
        raise ValueError('x must have rank 2.')
    sparse = K.is_sparse(x)
    if sparse:
        x_dense = tf.sparse.to_dense(x)
    else:
        x_dense = x

    x_k = x_dense
    for _ in range(k - 1):
        x_k = K.dot(x_k, x_dense)

    if sparse:
        return tf.contrib.layers.dense_to_sparse(x_k)
    else:
        return x_k
Example #20
0
    def call(self, inputs):
        rank = inputs.shape.rank
        if rank is not None and rank > 2:
            h_prob = standard_ops.sigmoid(
                standard_ops.tensordot(inputs, self.h, [[rank - 1], [0]]))
            h_state = tf.nn.relu(
                tf.sign(h_prob - backend.random_uniform(tf.shape(h_prob))))
        else:
            inputs = math_ops.cast(inputs, self._compute_dtype)
            if K.is_sparse(inputs):
                h_prob = sparse_ops.sigmoid(
                    sparse_ops.sparse_tensor_dense_matmul(inputs, self.h))
                h_state = tf.nn.relu(
                    tf.sign(h_prob - backend.random_uniform(tf.shape(h_prob))))
            else:
                h_prob = gen_math_ops.sigmoid(
                    gen_math_ops.mat_mul(inputs, self.h))
                h_state = tf.nn.relu(
                    tf.sign(h_prob - backend.random_uniform(tf.shape(h_prob))))

        return h_state
Example #21
0
    def call(self, inputs):
        features = inputs[0]
        fltr = inputs[1]

        if not K.is_sparse(fltr):
            fltr = tf.sparse.from_dense(tf.transpose(fltr, [0,2,1]))

        weights_neigh = tf.sparse.softmax(fltr)
        weights_neigh = tf.sparse.to_dense(weights_neigh)
        features_neigh = tf.matmul(weights_neigh, features)

        output = K.concatenate([features, features_neigh])
        output = K.dot(output, self.kernel)

        if self.use_bias:
            output = K.bias_add(output, self.bias)
        if self.activation is not None:
            output = self.activation(output)

        if self.BN is not None:
            output = self.BN(output)
        return output
Example #22
0
def matrix_power(x, k):
    """
    Computes the k-th power of a square matrix.
    :param x: a square matrix (Tensor or SparseTensor)
    :param k: exponent
    :return: matrix of same type and dtype as the input
    """
    if K.is_sparse(x):
        sparse = True
        x_dense = tf.sparse.to_dense(x)
    else:
        sparse = False
        x_dense = x

    x_k = x_dense
    for _ in range(k - 1):
        x_k = K.dot(x_k, x_dense)

    if sparse:
        return tf.contrib.layers.dense_to_sparse(x_k)
    else:
        return x_k
Example #23
0
def matrix_power(x, k):
    """
    Computes the k-th power of a square matrix.
    :param x: a square matrix (Tensor or SparseTensor)
    :param k: exponent
    :return: matrix of same type and dtype as the input
    """
    if K.ndim(x) != 2:
        raise ValueError('x must have rank 2.')
    sparse = K.is_sparse(x)
    if sparse:
        x_dense = tf.sparse.to_dense(x)
    else:
        x_dense = x

    x_k = x_dense
    for _ in range(k - 1):
        x_k = K.dot(x_k, x_dense)

    if sparse:
        return tf.contrib.layers.dense_to_sparse(x_k)
    else:
        return x_k
Example #24
0
def batch(X, batch_size, seed=0, iterator=True):
    """"
    Partitions a dataset into batches, returning a batch dataset or an iterator.
    :param X: The dataset to batch
    :param batch_size: The size of each batch
    :param seed: The shuffle seed
    :retrun: A tensor batch dataset, or as a numpy iterator.
    """
    # buffer_size = int(1e6)
    buffer_size = X.shape[0]  # For perfect shuffle, buff is the size of X

    if K.is_sparse(X):  # If a sparse tensor
        X = K.to_dense(X)
    elif sp.sparse.issparse(X):  # If a sparse matrix
        X = X.todense()

    batches = Dataset.from_tensor_slices(X). \
        shuffle(buffer_size=buffer_size, seed=seed). \
        batch(batch_size, drop_remainder=True)

    if iterator:
        batches = batches.as_numpy_iterator()

    return batches
Example #25
0
def to_dense(x):
    if K.is_sparse(x):
        return tf.sparse_tensor_to_dense(x, default_value=-1)
    return x
Example #26
0
    def compile(self,
                optimizer,
                loss,
                metrics=[],
                loss_weights=None,
                sample_weight_mode=None,
                **kwargs):
        #super(sModel, self).compile(optimizer, loss, metrics, loss_weights,
        #                            sample_weights_mode, **kwargs)
        self.optimizer = optimizers.get(optimizer)
        self.sample_weight_mode = sample_weight_mode
        self.loss = loss
        self.loss_weights = loss_weights

        # prepare loss weights
        if loss_weights is None:
            loss_weights_list = [1. for _ in range(len(self.outputs))]
        elif isinstance(loss_weights, dict):
            for name in loss_weights:
                if name not in self.output_names:
                    raise ValueError('Unknown entry in loss_weights '
                                     'dictionary: "' + name + '". '
                                     'Only expected the following keys: ' +
                                     str(self.output_names))
            loss_weights_list = []
            for name in self.output_names:
                loss_weights_list.append(loss_weights.get(name, 1.))
        elif isinstance(loss_weights, list):
            if len(loss_weights) != len(self.outputs):
                raise ValueError('When passing a list as loss_weights, '
                                 'it should have one entry per model outputs. '
                                 'The model has ' + str(len(self.outputs)) +
                                 ' outputs, but you passed loss_weights=' +
                                 str(loss_weights))
            loss_weights_list = loss_weights
        else:
            raise TypeError('Could not interpret loss_weights argument: ' +
                            str(loss_weights) + ' - expected a list of dicts.')

        # prepare loss functions
        if isinstance(loss, dict):
            for name in loss:
                if name not in self.output_names:
                    raise ValueError('Unknown entry in loss '
                                     'dictionary: "' + name + '". '
                                     'Only expected the following keys: ' +
                                     str(self.output_names))
            loss_functions = []
            for name in self.output_names:
                if name not in loss:
                    raise ValueError('Output "' + name +
                                     '" missing from loss dictionary.')
                loss_functions.append(objectives.get(loss[name]))
        elif isinstance(loss, list):
            if len(loss) != len(self.outputs):
                raise ValueError('When passing a list as loss, '
                                 'it should have one entry per model outputs. '
                                 'The model has ' + str(len(self.outputs)) +
                                 ' outputs, but you passed loss=' + str(loss))
            loss_functions = [objectives.get(l) for l in loss]
        else:
            loss_function = objectives.get(loss)
            loss_functions = [loss_function for _ in range(len(self.outputs))]
        self.loss_functions = loss_functions
        weighted_losses = [weighted_objective(fn) for fn in loss_functions]

        # prepare output masks
        masks = self.compute_mask(self.inputs, mask=None)
        if masks is None:
            masks = [None for _ in self.outputs]
        if not isinstance(masks, list):
            masks = [masks]

        # prepare sample weights
        if isinstance(sample_weight_mode, dict):
            for name in sample_weight_mode:
                if name not in self.output_names:
                    raise ValueError('Unknown entry in '
                                     'sample_weight_mode dictionary: "' +
                                     name + '". '
                                     'Only expected the following keys: ' +
                                     str(self.output_names))
            sample_weights = []
            sample_weight_modes = []
            for name in self.output_names:
                if name not in sample_weight_mode:
                    raise ValueError('Output "' + name +
                                     '" missing from sample_weight_modes '
                                     'dictionary')
                if sample_weight_mode.get(name) == 'temporal':
                    weight = K.placeholder(ndim=2,
                                           name=name + '_sample_weights')
                    sample_weight_modes.append('temporal')
                else:
                    weight = K.placeholder(ndim=1,
                                           name=name + '_sample_weights')
                    sample_weight_modes.append(None)
                sample_weights.append(weight)
        elif isinstance(sample_weight_mode, list):
            if len(sample_weight_mode) != len(self.outputs):
                raise ValueError('When passing a list as sample_weight_mode, '
                                 'it should have one entry per model outputs. '
                                 'The model has ' + str(len(self.outputs)) +
                                 ' outputs, but you passed '
                                 'sample_weight_mode=' +
                                 str(sample_weight_mode))
            sample_weights = []
            sample_weight_modes = []
            for mode, name in zip(sample_weight_mode, self.output_names):
                if mode == 'temporal':
                    weight = K.placeholder(ndim=2,
                                           name=name + '_sample_weights')
                    sample_weight_modes.append('temporal')
                else:
                    weight = K.placeholder(ndim=1,
                                           name=name + '_sample_weights')
                    sample_weight_modes.append(None)
                sample_weights.append(weight)
        else:
            if sample_weight_mode == 'temporal':
                sample_weights = [
                    K.placeholder(ndim=2, name=name + '_sample_weights')
                    for name in self.output_names
                ]
                sample_weight_modes = [
                    'temporal' for name in self.output_names
                ]
            else:
                sample_weights = [
                    K.placeholder(ndim=1, name=name + '_sample_weights')
                    for name in self.output_names
                ]
                sample_weight_modes = [None for name in self.output_names]
        self.sample_weight_modes = sample_weight_modes

        # prepare targets of model
        self.targets = []
        for i in range(len(self.outputs)):
            shape = self.internal_output_shapes[i]
            name = self.output_names[i]
            self.targets.append(
                K.placeholder(ndim=len(shape),
                              name=name + '_target',
                              sparse=K.is_sparse(self.outputs[i]),
                              dtype=K.dtype(self.outputs[i])))

        # prepare metrics
        self.metrics = metrics
        self.metrics_names = ['loss']
        self.metrics_tensors = []

        # compute total loss
        total_loss = None
        for i in range(len(self.outputs)):
            y_true = self.targets[i]
            y_pred = self.outputs[i]
            weighted_loss = weighted_losses[i]
            sample_weight = sample_weights[i]
            mask = masks[i]
            loss_weight = loss_weights_list[i]
            output_loss = weighted_loss(y_true, y_pred, sample_weight, mask)
            if len(self.outputs) > 1:
                self.metrics_tensors.append(output_loss)
                self.metrics_names.append(self.output_names[i] + '_loss')
            if total_loss is None:
                total_loss = loss_weight * output_loss
            else:
                total_loss += loss_weight * output_loss

        # add regularization penalties
        # and other layer-specific losses
        for loss_tensor in self.losses:
            total_loss += loss_tensor

        # list of same size as output_names.
        # contains tuples (metrics for output, names of metrics)
        nested_metrics = collect_metrics(metrics, self.output_names)

        def append_metric(layer_num, metric_name, metric_tensor):
            """Helper function, used in loop below"""
            if len(self.output_names) > 1:
                metric_name = self.output_layers[
                    layer_num].name + '_' + metric_name

            self.metrics_names.append(metric_name)
            self.metrics_tensors.append(metric_tensor)

        for i in range(len(self.outputs)):
            y_true = self.targets[i]
            y_pred = self.outputs[i]
            output_metrics = nested_metrics[i]

            for metric in output_metrics:
                if metric == 'accuracy' or metric == 'acc':
                    # custom handling of accuracy
                    # (because of class mode duality)
                    output_shape = self.internal_output_shapes[i]
                    acc_fn = None
                    if output_shape[-1] == 1 or self.loss_functions[
                            i] == objectives.binary_crossentropy:
                        # case: binary accuracy
                        acc_fn = metrics_module.binary_accuracy
                    elif self.loss_functions[
                            i] == objectives.sparse_categorical_crossentropy:
                        # case: categorical accuracy with sparse targets
                        acc_fn = metrics_module.sparse_categorical_accuracy
                    else:
                        acc_fn = metrics_module.categorical_accuracy

                    append_metric(i, 'acc', acc_fn(y_true, y_pred))
                else:
                    metric_fn = metrics_module.get(metric)
                    metric_result = metric_fn(y_true, y_pred)

                    if not isinstance(metric_result, dict):
                        metric_result = {metric_fn.__name__: metric_result}

                    for name, tensor in six.iteritems(metric_result):
                        append_metric(i, name, tensor)

        # prepare gradient updates and state updates
        self.total_loss = total_loss
        self.sample_weights = sample_weights

        # functions for train, test and predict will
        # be compiled lazily when required.
        # This saves time when the user is not using all functions.
        self._function_kwargs = kwargs

        self.train_function = None
        self.test_function = None
        self.predict_function = None

        # collected trainable weights and sort them deterministically.
        trainable_weights = self.trainable_weights
        # Sort weights by name
        if trainable_weights:
            if K.backend() == 'theano':
                trainable_weights.sort(
                    key=lambda x: x.name if x.name else x.auto_name)
            else:
                trainable_weights.sort(key=lambda x: x.name)
        self._collected_trainable_weights = trainable_weights
Example #27
0
def predict_loop(model, f, ins, batch_size=32, verbose=0, steps=None):
    """Abstract method to loop over some data in batches.

    # Arguments
        model: Keras model instance.
        f: Keras function returning a list of tensors.
        ins: list of tensors to be fed to `f`.
        batch_size: integer batch size.
        verbose: verbosity mode.
        steps: Total number of steps (batches of samples)
            before declaring `predict_loop` finished.
            Ignored with the default value of `None`.

    # Returns
        Array of predictions (if the model has a single output)
        or list of arrays of predictions
        (if the model has multiple outputs).
    """
    num_samples = check_num_samples(ins,
                                    batch_size=batch_size,
                                    steps=steps,
                                    steps_name='steps')
    if verbose == 1:
        if steps is not None:
            progbar = Progbar(target=steps)
        else:
            progbar = Progbar(target=num_samples)

    indices_for_conversion_to_dense = []
    is_sparse = False
    for i in range(len(model._feed_inputs)):
        if issparse(ins[i]) and not K.is_sparse(model._feed_inputs[i]):
            indices_for_conversion_to_dense.append(i)
        elif issparse(ins[i]) and K.is_sparse(model._feed_inputs[i]):
            is_sparse = True
    if steps is not None:
        # Step-based predictions.
        # Since we do not know how many samples
        # we will see, we cannot pre-allocate
        # the returned Numpy arrays.
        # Instead, we store one array per batch seen
        # and concatenate them upon returning.
        unconcatenated_outs = []
        for step in range(steps):
            batch_outs = f(ins)
            batch_outs = to_list(batch_outs)
            if step == 0:
                for batch_out in batch_outs:
                    unconcatenated_outs.append([])
            for i, batch_out in enumerate(batch_outs):
                unconcatenated_outs[i].append(batch_out)
            if verbose == 1:
                progbar.update(step + 1)
        if is_sparse:
            if len(unconcatenated_outs) == 1:
                return vstack(unconcatenated_outs[0], 'csr')
            return [
                vstack(unconcatenated_outs[i], 'csr')
                for i in range(len(unconcatenated_outs))
            ]
        if len(unconcatenated_outs) == 1:
            return np.concatenate(unconcatenated_outs[0], axis=0)
        return [
            np.concatenate(unconcatenated_outs[i], axis=0)
            for i in range(len(unconcatenated_outs))
        ]
    else:
        # Sample-based predictions.
        outs = []
        batches = make_batches(num_samples, batch_size)
        index_array = np.arange(num_samples)
        for batch_index, (batch_start, batch_end) in enumerate(batches):
            batch_ids = index_array[batch_start:batch_end]
            if ins and isinstance(ins[-1], float):
                # Do not slice the training phase flag.
                ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
            else:
                ins_batch = slice_arrays(ins, batch_ids)
            for i in indices_for_conversion_to_dense:
                ins_batch[i] = ins_batch[i].toarray()

            batch_outs = f(ins_batch)
            batch_outs = to_list(batch_outs)
            if batch_index == 0:
                # Pre-allocate the results arrays.
                for batch_out in batch_outs:
                    shape = (num_samples, ) + batch_out.shape[1:]
                    if is_sparse:
                        outs.append(lil_matrix(shape, dtype=batch_out.dtype))
                    else:
                        outs.append(np.zeros(shape, dtype=batch_out.dtype))
            for i, batch_out in enumerate(batch_outs):
                outs[i][batch_start:batch_end] = batch_out
            if verbose == 1:
                progbar.update(batch_end)
        if is_sparse:
            return unpack_singleton(list(map(lambda oo: oo.tocsr(), outs)))
        return unpack_singleton(outs)
Example #28
0
    def call(self, inputs):
        # Note that I is useless, because thee layer cannot be used in graph
        # batch mode.
        if len(inputs) == 3:
            X, A, I = inputs
        else:
            X, A = inputs
            I = None

        N = K.shape(A)[-1]
        # Check if the layer is operating in batch mode (X and A have rank 3)
        mode = ops.autodetect_mode(A, X)
        self.reduce_loss = mode in (ops._modes['M'], ops._modes['B'])

        # Get normalized adjacency
        if K.is_sparse(A):
            I_ = tf.sparse.eye(N, dtype=A.dtype)
            A_ = tf.sparse.add(A, I_)
        else:
            I_ = tf.eye(N, dtype=A.dtype)
            A_ = A + I_
        fltr = ops.normalize_A(A_)

        # Node embeddings
        Z = K.dot(X, self.kernel_emb)
        Z = ops.filter_dot(fltr, Z)
        if self.activation is not None:
            Z = self.activation(Z)

        # Compute cluster assignment matrix
        S = K.dot(X, self.kernel_pool)
        S = ops.filter_dot(fltr, S)
        S = activations.softmax(S, axis=-1)  # softmax applied row-wise

        # Link prediction loss
        S_gram = ops.matmul_A_BT(S, S)
        if K.is_sparse(A):
            LP_loss = tf.sparse.add(A, -S_gram)  # A/tf.norm(A) - S_gram/tf.norm(S_gram)
        else:
            LP_loss = A - S_gram
        LP_loss = tf.norm(LP_loss, axis=(-1, -2))
        if self.reduce_loss:
            LP_loss = K.mean(LP_loss)
        self.add_loss(LP_loss)

        # Entropy loss
        entr = tf.negative(tf.reduce_sum(tf.multiply(S, K.log(S + K.epsilon())), axis=-1))
        entr_loss = K.mean(entr, axis=-1)
        if self.reduce_loss:
            entr_loss = K.mean(entr_loss)
        self.add_loss(entr_loss)

        # Pooling
        X_pooled = ops.matmul_AT_B(S, Z)
        A_pooled = ops.matmul_AT_B_A(S, A)

        if K.ndim(A_pooled) == 3:
            self.mixed_mode = True

        output = [X_pooled, A_pooled]

        if I is not None:
            I_mean = tf.segment_mean(I, I)
            I_pooled = ops.repeat(I_mean, tf.ones_like(I_mean) * self.k)
            output.append(I_pooled)

        if self.return_mask:
            output.append(S)

        return output
def get_nonzero_vals(tensor):
  if K.is_sparse(tensor):
    return tensor.values
  else:
    return tensor
Example #30
0
def model_iteration(model,
                    inputs,
                    targets=None,
                    sample_weights=None,
                    batch_size=None,
                    epochs=1,
                    verbose=1,
                    callbacks=None,
                    val_inputs=None,
                    val_targets=None,
                    val_sample_weights=None,
                    shuffle=True,
                    initial_epoch=0,
                    steps_per_epoch=None,
                    validation_steps=None,
                    validation_freq=1,
                    mode=ModeKeys.TRAIN,
                    validation_in_fit=False,
                    prepared_feed_values_from_dataset=False,
                    steps_name="steps",
                    **kwargs):
    """Loop function for arrays of data with modes TRAIN/TEST/PREDICT.

    Args:
        model: Keras Model instance.
        inputs: Either a list or dictionary of arrays, or a dataset instance.
        targets: List/dictionary of input arrays.
        sample_weights: Optional list of sample weight arrays.
        batch_size: Integer batch size or None if unknown.
        epochs: Number of times to iterate over the data
        verbose: 0, 1, or 2. Verbosity mode.
          0 = silent, 1 = progress bar, 2 = one line per epoch.
          Note that the progress bar is not particularly useful when
          logged to a file, so verbose=2 is recommended when not running
          interactively (eg, in a production environment).
        callbacks: List of callbacks to be called during training
        val_inputs: Either a list or dictionary of arrays, or a dataset
          instance.
        val_targets: List/dictionary of target arrays.
        val_sample_weights: Optional list of sample weight arrays.
        shuffle: Whether to shuffle the data at the beginning of each epoch
          concatenation of list the display names of the outputs of `f` and the
          list of display names of the outputs of `f_val`.
        initial_epoch: Epoch at which to start training (useful for resuming a
          previous training run)
        steps_per_epoch: Total number of steps (batches of samples) before
          declaring one epoch finished and starting the next epoch. Ignored with
          the default value of `None`.
        validation_steps: Number of steps to run validation for (only if doing
          validation from data tensors). Ignored with the default value of
          `None`.
        validation_freq: Only relevant if validation data is provided. Integer
          or `collections.abc.Container` instance (e.g. list, tuple, etc.). If
          an integer, specifies how many training epochs to run before a new
          validation run is performed, e.g. `validation_freq=2` runs validation
          every 2 epochs. If a Container, specifies the epochs on which to run
          validation, e.g. `validation_freq=[1, 2, 10]` runs validation at the
          end of the 1st, 2nd, and 10th epochs.
        mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
        validation_in_fit: if true, then this method is invoked from within
          training iteration (for validation). In the case where `val_inputs` is
          a dataset, this flag indicates that its iterator and feed values are
          already created so should properly reuse resources.
        prepared_feed_values_from_dataset: if True, `inputs` is a list of feed
          tensors returned from `_prepare_feed_values` call on the validation
          dataset, so do not call it again on `inputs`. Should only be used for
          inline validation (i.e., only if `validation_in_fit` is also True).
        steps_name: The string name of the steps argument, either `steps`,
          `validation_steps`, or `steps_per_epoch`. Only used for error message
          formatting.
        **kwargs: Additional arguments for backwards compatibility.

    Returns:
        - In TRAIN mode: `History` object.
        - In TEST mode: Evaluation metrics.
        - In PREDICT mode: Outputs of the Model called on inputs.

    Raises:
        ValueError: in case of invalid arguments.
    """
    # Backwards compatibility.
    if "steps" in kwargs:
        steps_per_epoch = kwargs.pop("steps")
    if kwargs:
        raise TypeError("Unknown arguments: %s" % (kwargs, ))

    # In case we were passed a dataset, we extract symbolic tensors from it.
    reset_dataset_after_each_epoch = False
    input_iterator = None
    is_dataset = isinstance(inputs,
                            (tf.compat.v1.data.Dataset, tf.data.Dataset))
    # TODO(fchollet): consider moving `steps_per_epoch` inference to
    # _standardize_user_data and set reset_dataset_after_each_epoch as an
    # attribute on the dataset instance.
    if is_dataset:
        if steps_per_epoch is None:
            reset_dataset_after_each_epoch = True
            steps_per_epoch = training_utils_v1.infer_steps_for_dataset(
                model,
                inputs,
                steps_per_epoch,
                epochs=epochs,
                steps_name=steps_name,
            )
        input_iterator = _get_iterator(inputs, model._distribution_strategy)

    # Enter tf.distribute.Strategy scope.
    if model._distribution_strategy:
        scope = distributed_training_utils_v1.distributed_scope(
            strategy=model._distribution_strategy,
            learning_phase=(1 if mode == ModeKeys.TRAIN else 0),
        )
        scope.__enter__()

    use_steps = is_dataset or steps_per_epoch is not None
    do_validation = val_inputs is not None

    # Prepare input data.
    inputs = input_iterator or inputs
    if validation_in_fit and prepared_feed_values_from_dataset:
        # When invoking validation in training loop, avoid creating iterator and
        # list of feed values for the same validation dataset multiple times
        # (which essentially would call `iterator.get_next()` that slows down
        # execution and leads to OOM errors eventually.
        ins = inputs
    else:
        ins = _prepare_feed_values(model, inputs, targets, sample_weights,
                                   mode)
        # `ins` is a function when a distribute strategy is used in Eager mode.
        # In that case `is_dataset` is True.  The code branches that have
        # requirements about the type of `ins` do not trigger in the distributed
        # case.

    if not is_dataset:
        num_samples_or_steps = _get_num_samples_or_steps(
            ins, batch_size, steps_per_epoch)
    else:
        num_samples_or_steps = steps_per_epoch

    # Update sample_weight_mode of the model if sample_weights is specified by
    # the user. We need to call this function after we have a handle on the
    # inputs (both numpy arrays and datasets) in order to determine if the user
    # has specified sample_weights.
    _update_sample_weight_mode(model, mode, ins)

    # Get step function and loop type. As part of building the execution
    # function we recompile the metrics based on the updated
    # sample_weight_mode value.
    f = _make_execution_function(model, mode)

    # Prepare validation data. Hold references to the iterator and the input
    # list to properly reinitialize and reuse in multiple validation passes.
    val_iterator = None
    if isinstance(val_inputs, (tf.compat.v1.data.Dataset, tf.data.Dataset)):
        if validation_steps is None:
            # Because we pass an iterator feed instead of a Dataset to the eval
            # model_iteration() call, it will not trigger the dataset-input path
            # that determines the number of steps required. To avoid this issue,
            # set validation_steps here if validation_steps is None.
            validation_steps = training_utils_v1.infer_steps_for_dataset(
                model,
                val_inputs,
                validation_steps,
                epochs=epochs,
                steps_name="validation_steps",
            )
        val_iterator = _get_iterator(val_inputs, model._distribution_strategy)
        val_inputs = _prepare_feed_values(model, val_iterator, val_targets,
                                          val_sample_weights, ModeKeys.TEST)
        # Get num steps for printing.
        val_samples_or_steps = validation_steps
    else:
        # Get num samples for printing.
        val_samples_or_steps = (val_inputs
                                and tf.nest.flatten(val_inputs)[0].shape[0]
                                or None)

    if mode == ModeKeys.TRAIN and verbose:
        _print_train_info(num_samples_or_steps, val_samples_or_steps,
                          is_dataset)

    # Configure callbacks.
    count_mode = "steps" if use_steps else "samples"
    callbacks = cbks.configure_callbacks(
        callbacks,
        model,
        do_validation=do_validation,
        batch_size=batch_size,
        epochs=epochs,
        steps_per_epoch=steps_per_epoch,
        samples=num_samples_or_steps,
        count_mode=count_mode,
        verbose=verbose,
        mode=mode,
    )

    # Find beforehand arrays that need sparse-to-dense conversion.
    if issparse is not None and not use_steps:
        indices_for_conversion_to_dense = []
        feed = _get_model_feed(model, mode)
        for i, (input_data, feed_tensor) in enumerate(zip(ins, feed)):
            if issparse(input_data) and not backend.is_sparse(feed_tensor):
                indices_for_conversion_to_dense.append(i)

    # Select aggregation method.
    if mode == ModeKeys.PREDICT:
        aggregator = training_utils_v1.OutputsAggregator(
            use_steps,
            num_samples=None if steps_per_epoch else num_samples_or_steps,
            steps=steps_per_epoch,
        )
    else:
        aggregator = training_utils_v1.MetricsAggregator(
            use_steps,
            num_samples=None if steps_per_epoch else num_samples_or_steps,
            steps=steps_per_epoch,
        )

    if model._compile_distribution:
        distributed_training_utils_v1._copy_weights_to_distributed_model(
            model, mode)

    callbacks.model.stop_training = False
    callbacks._call_begin_hook(mode)

    initial_epoch = model._maybe_load_initial_epoch_from_ckpt(
        initial_epoch, mode)

    for epoch in range(initial_epoch, epochs):
        if callbacks.model.stop_training:
            break

        # Setup work for each epoch
        epoch_logs = {}
        if mode != ModeKeys.PREDICT:
            # Collecting and resetting metrics has non-zero cost and will
            # needlessly slow down model.predict.
            model.reset_metrics()
        if mode == ModeKeys.TRAIN:
            callbacks.on_epoch_begin(epoch, epoch_logs)

        if use_steps:
            # Step-wise loop.
            if steps_per_epoch is None:
                # Loop over dataset until `OutOfRangeError` is raised.
                target_steps = np.inf
            else:
                # Loop over dataset for the specified number of steps.
                target_steps = steps_per_epoch

            step = 0
            while step < target_steps:
                batch_logs = {"batch": step, "size": 1}
                callbacks._call_batch_hook(mode, "begin", step, batch_logs)

                # Get outputs.
                try:
                    # `ins` can be callable in tf.distribute.Strategy + eager
                    # case.
                    if not callable(ins) or (
                            model._distribution_strategy
                            and not distributed_training_utils_v1.
                            is_distributing_by_cloning(  # noqa: E501
                                model)):
                        actual_inputs = ins
                    else:
                        actual_inputs = ins()
                    batch_outs = f(actual_inputs)
                except tf.errors.OutOfRangeError:
                    if is_dataset:
                        # The dataset passed by the user ran out of batches.
                        # Now we know the cardinality of the dataset.  If
                        # steps_per_epoch was specified, then running out of
                        # data is unexpected, so we stop training and inform the
                        # user.
                        if steps_per_epoch:
                            callbacks.model.stop_training = True
                            logging.warning(
                                "Your dataset ran out of data; interrupting "
                                "training. Make sure that your dataset can "
                                "generate at least `%s * epochs` batches (in "
                                "this case, %d batches). You may need to use "
                                "the repeat() function when building your "
                                "dataset." %
                                (steps_name, steps_per_epoch * epochs))
                        elif step > 0:
                            steps_per_epoch = step
                            aggregator.steps = steps_per_epoch
                    else:
                        # We ran out of batches while the user passed an
                        # iterator (legacy).
                        callbacks.model.stop_training = True
                        logging.warning(
                            "Your dataset iterator ran out of data; "
                            "interrupting training. Make sure that your "
                            "iterator can generate at least `%s * epochs` "
                            "batches (in this case, %d batches). You may need "
                            "to use the repeat() function when building your "
                            "dataset." %
                            (steps_name, steps_per_epoch * epochs))
                    break

                if not isinstance(batch_outs, list):
                    batch_outs = [batch_outs]

                if model._distribution_strategy:
                    batch_outs = distributed_training_utils_v1._per_replica_aggregate_batch(  # noqa: E501
                        model._distribution_strategy, batch_outs, model, mode)

                # Aggregate results.
                if step == 0:
                    aggregator.create(batch_outs)
                aggregator.aggregate(batch_outs)

                # Callbacks batch end.
                batch_logs = cbks.make_logs(model, batch_logs, batch_outs,
                                            mode)
                callbacks._call_batch_hook(mode, "end", step, batch_logs)
                step += 1

                if callbacks.model.stop_training:
                    break
        else:
            # Sample-wise loop.
            index_array = np.arange(num_samples_or_steps)
            if shuffle == "batch":
                index_array = training_utils_v1.batch_shuffle(
                    index_array, batch_size)
            elif shuffle:
                np.random.shuffle(index_array)
            batches = make_batches(num_samples_or_steps, batch_size)
            for batch_index, (batch_start, batch_end) in enumerate(batches):
                batch_ids = index_array[batch_start:batch_end]
                # Slice into a batch.
                if len(batches) == 1:
                    # If we only have one batch, do not slice. This takes care
                    # of composite tensors in non-Dataset modes; we currently
                    # don't support slicing them.
                    # TODO(b/133517906): Add slicing support.
                    ins_batch = ins
                else:
                    try:
                        if ins and isinstance(ins[-1], int):
                            # Do not slice the training phase flag.
                            ins_batch = slice_arrays(ins[:-1],
                                                     batch_ids) + [ins[-1]]
                        else:
                            ins_batch = slice_arrays(ins, batch_ids)
                    except TypeError:
                        raise TypeError("TypeError while preparing batch. "
                                        "If using HDF5 input data, "
                                        'pass shuffle="batch".')

                # Sparse to dense conversion.
                if issparse is not None:
                    for i in indices_for_conversion_to_dense:
                        ins_batch[i] = ins_batch[i].toarray()

                # Callbacks batch_begin.
                batch_logs = {"batch": batch_index, "size": len(batch_ids)}
                callbacks._call_batch_hook(mode, "begin", batch_index,
                                           batch_logs)

                # Get outputs.
                batch_outs = f(ins_batch)
                if not isinstance(batch_outs, list):
                    batch_outs = [batch_outs]

                # Aggregate results.
                if batch_index == 0:
                    aggregator.create(batch_outs)
                aggregator.aggregate(batch_outs, batch_start, batch_end)

                # Callbacks batch end.
                batch_logs = cbks.make_logs(model, batch_logs, batch_outs,
                                            mode)
                callbacks._call_batch_hook(mode, "end", batch_index,
                                           batch_logs)

                if callbacks.model.stop_training:
                    break

        aggregator.finalize()
        results = aggregator.results
        epoch_logs = cbks.make_logs(model, epoch_logs, results, mode)
        if len(results) == 1:
            results = results[0]

        # Run the test loop every `validation_freq` epochs during training.
        if (do_validation and training_utils_v1.should_run_validation(
                validation_freq, epoch) and not callbacks.model.stop_training):

            if model._compile_distribution:
                # Since we create a new clone from the original model we need to
                # copy the weights back to the original model before we can run
                # validation.
                distributed_training_utils_v1._copy_weights_to_original_model(
                    model, ModeKeys.TRAIN)

            val_results = model_iteration(
                model,
                val_inputs,
                targets=val_targets,
                sample_weights=val_sample_weights,
                batch_size=batch_size,
                steps_per_epoch=validation_steps,
                callbacks=callbacks,
                verbose=0,
                mode=ModeKeys.TEST,
                validation_in_fit=True,
                prepared_feed_values_from_dataset=(val_iterator is not None),
                steps_name="validation_steps",
            )
            if not isinstance(val_results, list):
                val_results = [val_results]
            epoch_logs = cbks.make_logs(model,
                                        epoch_logs,
                                        val_results,
                                        mode,
                                        prefix="val_")
            if val_iterator and epoch < epochs - 1:
                _reinitialize_iterator(val_iterator,
                                       model._distribution_strategy)

        if mode == ModeKeys.TRAIN:
            # Epochs only apply to `fit`.
            callbacks.on_epoch_end(epoch, epoch_logs)

        # Reinitialize dataset iterator for the next epoch.
        if reset_dataset_after_each_epoch and epoch < epochs - 1:
            _reinitialize_iterator(input_iterator,
                                   model._distribution_strategy)

    model._successful_loop_finish = True
    callbacks._call_end_hook(mode)

    if model._distribution_strategy:
        if model._compile_distribution:
            # TODO(priyag, psv): Copy back metrics to the original model as
            # well?
            distributed_training_utils_v1._copy_weights_to_original_model(
                model, mode)
        scope.__exit__(None, None, None)

    if mode == ModeKeys.TRAIN:
        return model.history
    return results
def ndims(tensor):
  if K.is_sparse(tensor):
    return tensor.shape.get_shape()[0]
  else:
    return len(tensor.get_shape()._dims)