Example #1
0
    def report_gradient(self, grads):
        """
        report gradient to ps, return (accepted, model_version) from rpc call.
        """
        req = elasticdl_pb2.ReportGradientRequest()
        origin_vars = self._model.trainable_variables
        origin_var_n = len(origin_vars)
        # should keep the same order as self.get_trainable_items()
        for g, v in zip(grads[:origin_var_n], origin_vars):
            if isinstance(g, tf.IndexedSlices):
                req.gradient[v.name].CopyFrom(
                    ndarray_to_tensor(
                        g.values.numpy(), tuple(g.indices.numpy())
                    )
                )
            else:
                req.gradient[v.name].CopyFrom(ndarray_to_tensor(g.numpy()))

        # deal with gradients of ElasticDL embedding layer
        # should keep the same order as self.get_trainable_items()
        if self._embedding_layers:
            grads_edlembedding = grads[origin_var_n:]

            bet_number = 0
            for layer in self._embedding_layers:
                bet_number += len(layer.bet_ids_pair)
            if len(grads_edlembedding) != bet_number:
                raise ValueError(
                    "elasticdl.layers.embedding related gradient number %d "
                    "does not match the number of its output tensor %d."
                    % (len(grads_edlembedding), bet_number)
                )

            it = 0
            for layer in self._embedding_layers:
                g_values = None
                g_indices = None
                for bet, ids in layer.bet_ids_pair:
                    grad = grads_edlembedding[it]
                    it += 1
                    # ElasticDL embedding layer with Sparse Gradients
                    if isinstance(grad, tf.IndexedSlices):
                        grad = grad.values
                    if g_values is not None:
                        g_values = tf.concat([g_values, grad], axis=0)
                        g_indices = tf.concat([g_indices, ids], axis=0)
                    else:
                        g_values = grad
                        g_indices = ids

                req.gradient[layer.name].CopyFrom(
                    ndarray_to_tensor(
                        g_values.numpy(), tuple(g_indices.numpy())
                    )
                )

        req.model_version = self._model_version
        res = self._stub.ReportGradient(req)
        return res.accepted, res.model_version
Example #2
0
    def report_gradient(self, grads):
        """
        report gradient to ps, return (accepted, model_version) from rpc call.
        """
        req = elasticdl_pb2.ReportGradientRequest()
        non_embed_vars_n = len(self._non_embed_vars)
        # The first `non_embed_vars_n` items in `grads` are gradients for
        # `self._non_embed_vars`
        for g, v in zip(grads[:non_embed_vars_n], self._non_embed_vars):
            if isinstance(g, tf.IndexedSlices):
                req.gradient[v.name].CopyFrom(
                    ndarray_to_tensor(g.values.numpy(),
                                      tuple(g.indices.numpy())))
            else:
                req.gradient[v.name].CopyFrom(ndarray_to_tensor(g.numpy()))

        # Accumulate gradients of ElasticDL embedding layer
        if self._embedding_layers:
            # The `edl_embedding_grads` are gradients for bets in
            # `self._embedding_layers`
            edl_embedding_grads = grads[non_embed_vars_n:]

            # Check that the number of bet equal to the number of gradients.
            # Please note that every embedding layer may have more than one
            # `bet_id_pair`.
            bet_number = 0
            for layer in self._embedding_layers:
                bet_number += len(layer.embedding_and_ids)
            if len(edl_embedding_grads) != bet_number:
                raise ValueError(
                    "elasticdl.layers.embedding related gradient number %d "
                    "does not match the number of its output tensor %d." %
                    (len(edl_embedding_grads), bet_number))

            grad_accum_iter = 0
            for layer in self._embedding_layers:
                g_values = None
                g_indices = None
                for _, ids in layer.embedding_and_ids:
                    grad = edl_embedding_grads[grad_accum_iter]
                    grad_accum_iter += 1
                    # ElasticDL embedding layer with Sparse Gradients
                    if isinstance(grad, tf.IndexedSlices):
                        grad = grad.values
                    if g_values is not None:
                        g_values = tf.concat([g_values, grad], axis=0)
                        g_indices = tf.concat([g_indices, ids], axis=0)
                    else:
                        g_values = grad
                        g_indices = ids

                req.gradient[layer.name].CopyFrom(
                    ndarray_to_tensor(g_values.numpy(),
                                      tuple(g_indices.numpy())))

        req.model_version = self._model_version
        res = self._stub.ReportGradient(req)
        return res.accepted, res.model_version
Example #3
0
 def makeGrad():
     """ Make a ReportGradientRequest compatible with model"""
     req = elasticdl_pb2.ReportGradientRequest()
     req.gradient["x"].CopyFrom(
         ndarray_to_tensor(np.array([0.1], dtype=np.float32)))
     req.gradient["y"].CopyFrom(
         ndarray_to_tensor(np.array([0.03, 0.06], dtype=np.float32)))
     req.model_version = 1
     return req
Example #4
0
 def makeGrad():
     """ Make a ReportGradientRequest compatible with model"""
     req = elasticdl_pb2.ReportGradientRequest()
     emplace_tensor_pb_from_ndarray(req.gradient,
                                    np.array([0.1], np.float32),
                                    name="x")
     emplace_tensor_pb_from_ndarray(req.gradient,
                                    np.array([0.03, 0.06], np.float32),
                                    name="y")
     req.model_version = 1
     return req