コード例 #1
0
    def _test_correctness(self, optimizer_class, X, Y, seed, **kwargs):
        """Test the correctness of specific TensorFlow optimizer."""
        _model_file = get_module_file_path(
            os.path.dirname(os.path.realpath(__file__)),
            "embedding_test_module.KerasEmbeddingModel",
        )
        model_module = load_module(_model_file).__dict__

        # train model with TensorFlow optimizer
        weights = self._random_init_model_weight(
            [(4, 4), (4, 4), (72, 1), (1,)], seed
        )
        loss_fn = model_module["loss"]
        model1 = model_module["KerasEmbeddingModel"](4, 4, weights)
        opt1 = optimizer_class(**kwargs)
        _train(model1, opt1, X, Y, loss_fn, random_seed=seed)

        model2 = model_module["EdlEmbeddingModel"](4, weights[2:])
        opt2 = optimizer_class(**kwargs)

        layer_names = [layer.name for layer in find_layer(model2, Embedding)]
        embed_dims = dict([(layer_name, 4) for layer_name in layer_names])

        # intialize embedding vectors in kv store
        mock_kv_store = MockKvStore({})
        for layer, embed_table in zip(layer_names, weights[:2]):
            for i, embed_vector in enumerate(embed_table):
                mock_kv_store.update(["%s-%d" % (layer, i)], [embed_vector])

        # train model with optimizer wrapper
        with mock.patch.object(
            EmbeddingService, "lookup_embedding", mock_kv_store.lookup
        ), mock.patch.object(
            EmbeddingService, "update_embedding", mock_kv_store.update
        ):
            _train_edl_embedding_with_optimizer_wrapper(
                model2, opt2, X, Y, loss_fn, embed_dims, random_seed=seed
            )

        # compare trained parameters
        wrong_msg = (
            "The updated parameters of Optimizer Wrapper and TensorFlow "
            "optimizer %s differ." % opt1.get_config()["name"]
        )

        for layer1, layer2 in zip(model1.layers, model2.layers):
            if "embedding" in layer2.name:
                w1 = layer1.weights[0].numpy()
                keys = [Embedding.get_key([layer2.name, i]) for i in range(4)]
                w2 = np.concatenate(mock_kv_store.lookup(keys)[0]).reshape(
                    4, -1
                )
                self.assertTrue((w1 - w2 < 0.0001).all(), msg=wrong_msg)
            else:
                for w1, w2 in zip(layer1.weights, layer2.weights):
                    self.assertTrue(
                        (w1 - w2 < 0.0001).numpy().all(), msg=wrong_msg
                    )
コード例 #2
0
 def test_initialize_in_lookup(self):
     opt = Adam()
     opt_wrapper = OptimizerWrapper(opt, None, {"test_1": 4})
     grads_and_vars = [(tf.IndexedSlices(None, tf.constant([0])), "test_1")]
     mock_kv_store = MockKvStore({})
     mock_kv_store.update(
         keys=[Embedding.get_key(["test_1", 0])],
         values=[np.random.rand(4).astype(np.float32)],
     )
     with mock.patch.object(EmbeddingService, "lookup_embedding",
                            mock_kv_store.lookup):
         embeddings, slot_values = opt_wrapper._lookup_embeddings_and_slots(
             grads_and_vars)
     self.assertTrue((slot_values["test_1"]["m"] < 0.0001).all())
     self.assertTrue((slot_values["test_1"]["v"] < 0.0001).all())
コード例 #3
0
    def test_report_to_kv_store(self):
        opt = SGD(momentum=0.1)
        opt_wrapper = OptimizerWrapper(opt, None, {})

        ids_list = [[1, 5], [10]]
        opt_wrapper._unique_ids_all_layers = {
            "test_1": np.array(ids_list[0]),
            "test_2": np.array(ids_list[1]),
        }
        t = np.array([1.0, 1.0, 1.0])
        opt_wrapper._embed_variables = {
            "test_1": tf.Variable([t, t * 5]),
            "test_2": tf.Variable([t * 10]),
        }
        opt_wrapper._slot_variables = {
            "test_1": {
                "momentum": tf.Variable([t / 10.0, t / 2.0])
            },
            "test_2": {
                "momentum": tf.Variable([t])
            },
        }

        mock_kv_store = MockKvStore({})
        with mock.patch.object(EmbeddingService, "update_embedding",
                               mock_kv_store.update):
            opt_wrapper._report_to_kv_store()

        expected_mock_kv_store = MockKvStore({})
        expected_mock_kv_store.update(
            keys=["test_1-1", "test_1-5", "test_2-10"],
            values=[t, t * 5.0, t * 10.0],
        )
        expected_mock_kv_store.update(
            keys=[
                "test_1-momentum-1",
                "test_1-momentum-5",
                "test_2-momentum-10",
            ],
            values=[t / 10.0, t / 2.0, t],
        )
        for k, ids in zip(["test_1", "test_2"], ids_list):
            for id in ids:
                key = Embedding.get_key([k, id])
                v, _ = mock_kv_store.lookup([key])
                expected_v, _ = expected_mock_kv_store.lookup([key])
                self.assertTrue((v[0] == expected_v[0]).all())
コード例 #4
0
    def test_lookup(self):
        opt = Adam()
        opt_wrapper = OptimizerWrapper(opt, None, {})
        embedding_dim = 4
        layers = ["embedding_0", "embedding_1"]
        grads = [
            tf.IndexedSlices(None, tf.constant([2, 0, 2])),
            tf.IndexedSlices(None, tf.constant([1, 2, 0, 2])),
        ]
        ids_list = [[2, 0], [1, 2, 0]]
        grads_and_vars = list(zip(grads, layers))
        mock_kv_store = MockKvStore({})
        for layer in layers:
            for id in range(3):
                mock_kv_store.update(
                    keys=[Embedding.get_key([layer, id])],
                    values=[np.random.rand(embedding_dim).astype(np.float32)],
                )
                for i, slot in enumerate(["m", "v"]):
                    mock_kv_store.update(
                        keys=[Embedding.get_key([layer, slot, id])],
                        values=[
                            np.random.rand(embedding_dim).astype(np.float32)
                        ],
                    )

        with mock.patch.object(
            EmbeddingService, "lookup_embedding", mock_kv_store.lookup
        ):
            embeddings, slot_values = opt_wrapper._lookup_embeddings_and_slots(
                grads_and_vars
            )

        grad0 = grads_and_vars[0][0]
        self.assertTrue((grad0.indices.numpy() == [0, 1, 0]).all())
        grad1 = grads_and_vars[1][0]
        self.assertTrue((grad1.indices.numpy() == [0, 1, 2, 1]).all())

        for ids, layer in zip(ids_list, layers):
            self.assertTrue(
                (opt_wrapper._unique_ids_all_layers[layer] == ids).all()
            )

            values, _ = mock_kv_store.lookup(
                keys=[Embedding.get_key([layer, id]) for id in ids]
            )
            values = np.concatenate(values).reshape(-1, embedding_dim)
            self.assertTrue((embeddings[layer] - values < 0.0001).all())

            for slot in ["m", "v"]:
                values, _ = mock_kv_store.lookup(
                    keys=[Embedding.get_key([layer, slot, id]) for id in ids]
                )
                values = np.concatenate(values).reshape(-1, embedding_dim)
                self.assertTrue(
                    (slot_values[layer][slot] - values < 0.0001).all()
                )
コード例 #5
0
    def test_train_acceleration_with_embedding(self):
        kv_store = MockKvStore()
        model_inst = CustomModel()
        master = MasterServicer(
            2,
            2,
            tf.optimizers.SGD(0.1),
            None,
            init_var=model_inst.trainable_variables,
            checkpoint_filename_for_init=None,
            checkpoint_service=None,
            evaluation_service=None,
        )
        arguments = [
            "--worker_id",
            1,
            "--job_type",
            JobType.TRAINING_ONLY,
            "--minibatch_size",
            32,
            "--model_zoo",
            _model_zoo_path,
            "--model_def",
            "embedding_test_module.EdlEmbeddingModel",
        ]
        args = parse_worker_args(arguments)
        worker = Worker(args)
        worker._stub = InProcessMaster(master)

        inputs_list = [
            {
                "f1": tf.constant([[0], [1], [2]], tf.int64),
                "f2": tf.constant([[2], [1], [0]], tf.int64),
            },
            {
                "f1": tf.constant([[3], [4], [3]], tf.int64),
                "f2": tf.constant([[2], [1], [0]], tf.int64),
            },
        ]
        labels_list = [[0, 1, 0], [1, 1, 0]]
        input_dim = 5
        embedding_dim = 16
        worker.set_model(model_inst)

        # initialize kv store
        for layer in model_inst.layers:
            if isinstance(layer, Embedding):
                name = layer.name
                keys = [Embedding.get_key([name, i]) for i in range(input_dim)]
                values = [
                    np.random.rand(embedding_dim).astype(np.float32)
                    for i in range(input_dim)
                ]
                kv_store.update(keys, values)

        with mock.patch.object(
            EmbeddingService, "lookup_embedding", kv_store.lookup
        ), mock.patch.object(
            EmbeddingService, "update_embedding", kv_store.update
        ):
            worker._init_embedding_layer()
            worker._run_model_call_before_training(inputs_list[0])

            # run training process without tf.function
            correct_grads = []
            correct_ids_list = []
            for features, labels in zip(inputs_list, labels_list):
                loss, grads = worker.training_process_eagerly(features, labels)
                correct_grads.append(grads)
                ids = {}
                for layer in worker._embedding_layers:
                    ids[layer.name] = layer.embedding_and_ids[0].batch_ids
                correct_ids_list.append(ids)
                worker._reset_embedding()

            # run training process with tf.function
            test_grads = []
            test_ids_list = []
            for features, labels in zip(inputs_list, labels_list):
                self.assertFalse(worker._train_eagerly)
                loss, grads = worker.training_process(features, labels)
                test_grads.append(grads)
                ids = {}
                for layer in worker._embedding_layers:
                    ids[layer.name] = copy.deepcopy(
                        layer.embedding_and_ids[0].batch_ids
                    )
                test_ids_list.append(ids)
                worker._reset_embedding()

        # compare the gradients
        for test_g, correct_g in zip(test_grads, correct_grads):
            for g1, g2 in zip(test_g, correct_g):
                if isinstance(g1, tf.IndexedSlices):
                    self.assertTrue(np.isclose(g1.values, g2.values).all())
                    self.assertTrue(np.isclose(g1.indices, g2.indices).all())
                else:
                    self.assertTrue(np.isclose(g1, g2).all())

        for test_ids, correct_ids in zip(correct_ids_list, test_ids_list):
            for layer_name in correct_ids.keys():
                self.assertTrue(
                    tf.equal(test_ids[layer_name], correct_ids[layer_name])
                    .numpy()
                    .all()
                )
コード例 #6
0
    def _test_async_correctness(
        self,
        grads_and_vars_batches,
        embed_values,
        expected_non_embed_values,
        expected_embed_values=None,
    ):
        """Checks the correctness of async OptimizerWrapper. This function
        creates many threads and these threads call
        `OptimizerWrapper.apply_gradients` simultaneously.

        Args:
            grads_and_vars_batches: A python list of `grads_and_vars`. Every
                thread takes a `grads_and_vars` and calls `apply_gradients`.
            embed_values: A python dictionary of
                `(layer_name, embedding table)`.
            expected_non_embed_values: A python list of expected non-embdding
                values after applying gradients.
            expected_embed_values: A python dictionary of expected embedding
                values after applying gradients. None means no need to check
                embedding values.
        """
        thread_num = len(grads_and_vars_batches)
        embed_dims = {}
        embed_var_n = len(embed_values)
        mock_kv_store = MockKvStore()
        for layer, values in embed_values.items():
            embed_dims[layer] = values.shape[1]
            input_dim = values.shape[0]

            keys = [
                Embedding.get_key([layer, idx]) for idx in range(input_dim)
            ]
            mock_kv_store.update(keys, values)

        opt = SGD(0.1)
        opt_wrapper = OptimizerWrapper(opt, None, embed_dims, True)

        with mock.patch.object(EmbeddingService, "lookup_embedding",
                               mock_kv_store.lookup), mock.patch.object(
                                   EmbeddingService, "update_embedding",
                                   mock_kv_store.update):
            # call optimizer_wrapper.apply_gradients asynchronously
            def _apply_gradients(opt_wrapper, grads_and_vars):
                # sleep 1s to wait that all threads are in this method call
                time.sleep(1)
                opt_wrapper.apply_gradients(grads_and_vars)

            executor = ThreadPoolExecutor(max_workers=thread_num)
            tasks = [
                executor.submit(_apply_gradients, opt_wrapper, grads_and_vars)
                for grads_and_vars in grads_and_vars_batches
            ]
            _ = [task.result() for task in tasks]

            # check updated results of non-embedding variables
            non_embed_vars = [
                var for grad, var in grads_and_vars_batches[0][:-embed_var_n]
            ]
            for var, expected_value in zip(non_embed_vars,
                                           expected_non_embed_values):
                self.assertTrue(np.isclose(var.numpy(), expected_value).all())

            # `expected_embed_values=None` means that no need to check
            # embedding table
            if not expected_embed_values:
                return
            # check updated results of embedding table
            for layer, expected_values in expected_embed_values.items():
                keys = [
                    Embedding.get_key([layer, idx]) for idx in range(input_dim)
                ]
                raw_value, _ = mock_kv_store.lookup(keys)
                value = np.concatenate(raw_value).reshape(input_dim, -1)

                self.assertTrue(
                    any([
                        np.isclose(value, expected).all()
                        for expected in expected_values
                    ]))