Exemple #1
0
    def test_same_fft_optimized_median_initializations_for_transform(self):
        """Transform with interpolation based optimization should be deterministic."""
        x_train, x_test = train_test_split(self.iris,
                                           test_size=0.33,
                                           random_state=42)

        embedding = fastTSNE.TSNE(
            early_exaggeration_iter=10,
            n_iter=10,
            neighbors="exact",
            negative_gradient_method="fft",
            random_state=42,
        ).fit(x_train)

        for init in self.transform_initializations:
            new_embedding_1 = embedding.transform(x_test,
                                                  initialization=init,
                                                  n_iter=10,
                                                  learning_rate=10)
            new_embedding_2 = embedding.transform(x_test,
                                                  initialization=init,
                                                  n_iter=10,
                                                  learning_rate=10)

            np.testing.assert_equal(new_embedding_1, new_embedding_2, init)
Exemple #2
0
    def test_iris_bh_transform_equivalency_with_one_by_one(self):
        """Compare one by one embedding vs all at once using BH gradients."""
        x_train, x_test = train_test_split(self.iris.data,
                                           test_size=0.33,
                                           random_state=42)

        # Set up the initial embedding
        embedding = fastTSNE.TSNE(
            early_exaggeration_iter=0,
            n_iter=50,
            neighbors="exact",
            negative_gradient_method="bh",
        ).fit(x_train)

        params = dict(n_iter=100, perplexity=5)
        # Build up an embedding by adding points one by one
        new_embedding_1 = np.vstack([
            embedding.transform(np.atleast_2d(point), **params)
            for point in x_test
        ])
        # Add new points altogether
        new_embedding_2 = embedding.transform(x_test, **params)

        # Verify that the embedding has actually been optimized
        self.assertRaises(
            AssertionError,
            np.testing.assert_almost_equal,
            embedding.prepare_partial(x_test, perplexity=params["perplexity"]),
            new_embedding_1,
        )
        # Check that both methods produced the same embedding
        np.testing.assert_almost_equal(new_embedding_1, new_embedding_2)
Exemple #3
0
    def test_random_state_parameter_is_propagated_pca_init_approx(
            self, init, neighbors):
        random_state = 1

        tsne = fastTSNE.TSNE(
            neighbors="approx",
            initialization="pca",
            random_state=random_state,
        )
        tsne.prepare_initial(self.x)

        # Verify that `random_state` was passed
        init.assert_called_once()
        check_mock_called_with_kwargs(init, {"random_state": random_state})
        neighbors.assert_called_once()
        check_mock_called_with_kwargs(neighbors,
                                      {"random_state": random_state})
Exemple #4
0
    def test_iris_fft_transform_equivalency_with_one_by_one(self):
        """Compare one by one embedding vs all at once using FFT gradients.

        Note that this won't return the exact same embedding both times because
        the grid placed over the embedding will differ when placing points one
        by one vs. when placing them at once. The min/max coords will differ,
        thus changing the overall approximation. They should be quite similar
        though.

        """
        x_train, x_test = train_test_split(self.iris.data,
                                           test_size=0.33,
                                           random_state=42)

        # Set up the initial embedding
        embedding = fastTSNE.TSNE(
            early_exaggeration_iter=0,
            n_iter=50,
            neighbors="exact",
            negative_gradient_method="fft",
        ).fit(x_train)

        params = dict(n_iter=100, perplexity=5)
        # Build up an embedding by adding points one by one
        new_embedding_1 = np.vstack([
            embedding.transform(np.atleast_2d(point), **params)
            for point in x_test
        ])
        # Add new points altogether
        new_embedding_2 = embedding.transform(x_test, **params)

        # Verify that the embedding has actually been optimized
        self.assertRaises(
            AssertionError,
            np.testing.assert_almost_equal,
            embedding.prepare_partial(x_test, perplexity=params["perplexity"]),
            new_embedding_1,
        )
        # Check that both methods produced the same embedding
        np.testing.assert_almost_equal(new_embedding_1,
                                       new_embedding_2,
                                       decimal=1)
Exemple #5
0
    def test_same_unoptimized_initializations_for_transform(self):
        """Initializations should be deterministic."""
        x_train, x_test = train_test_split(self.iris,
                                           test_size=0.33,
                                           random_state=42)

        embedding = fastTSNE.TSNE(
            early_exaggeration_iter=50,
            n_iter=50,
            neighbors="exact",
            negative_gradient_method="bh",
            random_state=42,
        ).fit(x_train)

        for init in self.transform_initializations:
            new_embedding_1 = embedding.prepare_partial(x_test,
                                                        initialization=init)
            new_embedding_2 = embedding.prepare_partial(x_test,
                                                        initialization=init)

            np.testing.assert_equal(new_embedding_1, new_embedding_2, init)
Exemple #6
0
    def test_iris_fft_transform_correctness(self):
        x_train, x_test, y_train, y_test = train_test_split(self.iris.data,
                                                            self.iris.target,
                                                            test_size=0.33,
                                                            random_state=42)

        # Set up the initial embedding
        embedding = fastTSNE.TSNE(
            neighbors="exact",
            negative_gradient_method="fft",
            early_exaggeration_iter=0,
            n_iter=50,
        ).fit(x_train)

        # Evaluate t-SNE optimization using a KNN classifier
        knn = KNeighborsClassifier(n_neighbors=10)
        knn.fit(embedding, y_train)

        new_embedding = embedding.transform(x_test, n_iter=100, perplexity=100)
        predictions = knn.predict(new_embedding)
        self.assertGreater(accuracy_score(predictions, y_test), 0.99)
Exemple #7
0
 def __init__(self,
              n_components=2,
              perplexity=30,
              learning_rate=200,
              early_exaggeration_iter=250,
              early_exaggeration=12,
              n_iter=750,
              exaggeration=None,
              theta=0.5,
              min_num_intervals=10,
              ints_in_interval=1,
              initialization='random',
              metric='euclidean',
              n_jobs=1,
              neighbors='exact',
              negative_gradient_method='bh',
              callbacks=None,
              callbacks_every_iters=50,
              random_state=None,
              preprocessors=None):
     super().__init__(preprocessors=preprocessors)
     self.tsne = fastTSNE.TSNE(
         n_components=n_components,
         perplexity=perplexity,
         learning_rate=learning_rate,
         early_exaggeration=early_exaggeration,
         early_exaggeration_iter=early_exaggeration_iter,
         n_iter=n_iter,
         exaggeration=exaggeration,
         theta=theta,
         min_num_intervals=min_num_intervals,
         ints_in_interval=ints_in_interval,
         initialization=initialization,
         metric=metric,
         n_jobs=n_jobs,
         neighbors=neighbors,
         negative_gradient_method=negative_gradient_method,
         callbacks=callbacks,
         callbacks_every_iters=callbacks_every_iters,
         random_state=random_state)