Esempio n. 1
0
def test_reduction_to_one_component():
    # t-SNE should allow reduction to one component (issue #4154).
    random_state = check_random_state(0)
    tsne = TSNE(n_components=1)
    X = random_state.randn(5, 2)
    X_embedded = tsne.fit(X).embedding_
    assert(np.all(np.isfinite(X_embedded)))
Esempio n. 2
0
def test_accessible_kl_divergence():
    # Ensures that the accessible kl_divergence matches the computed value
    random_state = check_random_state(0)
    X = random_state.randn(100, 2)
    tsne = TSNE(n_iter_without_progress=2, verbose=2,
                random_state=0, method='exact')

    old_stdout = sys.stdout
    sys.stdout = StringIO()
    try:
        tsne.fit_transform(X)
    finally:
        out = sys.stdout.getvalue()
        sys.stdout.close()
        sys.stdout = old_stdout

    # The output needs to contain the accessible kl_divergence as the error at
    # the last iteration
    for line in out.split('\n')[::-1]:
        if 'Iteration' in line:
            _, _, error = line.partition('error = ')
            if error:
                error, _, _ = error.partition(',')
                break
    assert_almost_equal(tsne.kl_divergence_, float(error), decimal=5)
Esempio n. 3
0
def check_uniform_grid(method, seeds=[0, 1, 2], n_iter=1000):
    """Make sure that TSNE can approximately recover a uniform 2D grid

    Due to ties in distances between point in X_2d_grid, this test is platform
    dependent for ``method='barnes_hut'`` due to numerical imprecision.

    Also, t-SNE is not assured to converge to the right solution because bad
    initialization can lead to convergence to bad local minimum (the
    optimization problem is non-convex). To avoid breaking the test too often,
    we re-run t-SNE from the final point when the convergence is not good
    enough.
    """
    for seed in seeds:
        tsne = TSNE(n_components=2, init='random', random_state=seed,
                    perplexity=20, n_iter=n_iter, method=method)
        Y = tsne.fit_transform(X_2d_grid)

        try_name = "{}_{}".format(method, seed)
        try:
            assert_uniform_grid(Y, try_name)
        except AssertionError:
            # If the test fails a first time, re-run with init=Y to see if
            # this was caused by a bad initialization. Note that this will
            # also run an early_exaggeration step.
            try_name += ":rerun"
            tsne.init = Y
            Y = tsne.fit_transform(X_2d_grid)
            assert_uniform_grid(Y, try_name)
Esempio n. 4
0
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
    # Nearest neighbors should be preserved approximately.
    random_state = check_random_state(0)
    X = random_state.randn(100, 2)
    D = squareform(pdist(X), "sqeuclidean")
    tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0, metric="precomputed", random_state=0, verbose=0)
    X_embedded = tsne.fit_transform(D)
    assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1, precomputed=True), 1.0, decimal=1)
Esempio n. 5
0
def test_64bit():
    # Ensure 64bit arrays are handled correctly.
    random_state = check_random_state(0)
    methods = ["barnes_hut", "exact"]
    for method in methods:
        for dt in [np.float32, np.float64]:
            X = random_state.randn(100, 2).astype(dt)
            tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0, random_state=0, method=method)
            tsne.fit_transform(X)
Esempio n. 6
0
def test_fit_csr_matrix():
    # X can be a sparse matrix.
    random_state = check_random_state(0)
    X = random_state.randn(100, 2)
    X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
    X_csr = sp.csr_matrix(X)
    tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0, random_state=0, method="exact")
    X_embedded = tsne.fit_transform(X_csr)
    assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0, decimal=1)
Esempio n. 7
0
def test_preserve_trustworthiness_approximately():
    """Nearest neighbors should be preserved approximately."""
    random_state = check_random_state(0)
    X = random_state.randn(100, 2)
    for init in ('random', 'pca'):
        tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
                    init=init, random_state=0)
        X_embedded = tsne.fit_transform(X)
        assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
                            decimal=1)
Esempio n. 8
0
def test_kl_divergence_not_nan(method):
    # Ensure kl_divergence_ is computed at last iteration
    # even though n_iter % n_iter_check != 0, i.e. 1003 % 50 != 0
    random_state = check_random_state(0)

    X = random_state.randn(50, 2)
    tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
                random_state=0, method=method, verbose=0, n_iter=1003)
    tsne.fit_transform(X)

    assert not np.isnan(tsne.kl_divergence_)
Esempio n. 9
0
def test_optimization_minimizes_kl_divergence():
    """t-SNE should give a lower KL divergence with more iterations."""
    random_state = check_random_state(0)
    X, _ = make_blobs(n_features=3, random_state=random_state)
    kl_divergences = []
    for n_iter in [200, 250, 300]:
        tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0, n_iter=n_iter, random_state=0)
        tsne.fit_transform(X)
        kl_divergences.append(tsne.kl_divergence_)
    assert_less_equal(kl_divergences[1], kl_divergences[0])
    assert_less_equal(kl_divergences[2], kl_divergences[1])
def TSNE_Gist(name, csvfilename):
    idsT = imagesHandler.get_all_img_ids()
    ids = []
    for id in idsT:
        ids.append(str(id[0]))
    print ids
    gistVals = util.loadCSV(csvfilename)
    X = np.array(gistVals)
    model = TSNE(n_components=2, random_state=0)
    tsne_vals = model.fit_transform(X)
    tsneHandler.storeTsneValsWIds(name, tsne_vals, ids)
    return tsne_vals, ids
Esempio n. 11
0
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
    # Nearest neighbors should be preserved approximately.
    random_state = check_random_state(0)
    for i in range(3):
        X = random_state.randn(100, 2)
        D = squareform(pdist(X), "sqeuclidean")
        tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
                    early_exaggeration=2.0, metric="precomputed",
                    random_state=i, verbose=0)
        X_embedded = tsne.fit_transform(D)
        t = trustworthiness(D, X_embedded, n_neighbors=1, metric="precomputed")
        assert t > .95
Esempio n. 12
0
def test_64bit(method, dt):
    # Ensure 64bit arrays are handled correctly.
    random_state = check_random_state(0)

    X = random_state.randn(50, 2).astype(dt)
    tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
                random_state=0, method=method, verbose=0)
    X_embedded = tsne.fit_transform(X)
    effective_type = X_embedded.dtype

    # tsne cython code is only single precision, so the output will
    # always be single precision, irrespectively of the input dtype
    assert effective_type == np.float32
def test_preserve_trustworthiness_approximately():
    # Nearest neighbors should be preserved approximately.
    random_state = check_random_state(0)
    n_components = 2
    methods = ['exact', 'barnes_hut']
    X = random_state.randn(50, n_components).astype(np.float32)
    for init in ('random', 'pca'):
        for method in methods:
            tsne = TSNE(n_components=n_components, init=init, random_state=0,
                        method=method)
            X_embedded = tsne.fit_transform(X)
            t = trustworthiness(X, X_embedded, n_neighbors=1)
            assert_greater(t, 0.9)
Esempio n. 14
0
def test_n_iter_used():
    # check that the ``n_iter`` parameter has an effect
    random_state = check_random_state(0)
    n_components = 2
    methods = ['exact', 'barnes_hut']
    X = random_state.randn(25, n_components).astype(np.float32)
    for method in methods:
        for n_iter in [251, 500]:
            tsne = TSNE(n_components=n_components, perplexity=1,
                        learning_rate=0.5, init="random", random_state=0,
                        method=method, early_exaggeration=1.0, n_iter=n_iter)
            tsne.fit_transform(X)

            assert tsne.n_iter_ == n_iter - 1
def TSNE_General(tablename):
    conn = sqlite3.connect(dirm.sqlite_file)
    c = conn.cursor()
    cmd = "SELECT * FROM {tn}".format(tn=tablename)
    c.execute(cmd)
    all_rows = c.fetchall()
    ids = []
    data = []
    for row in all_rows:
        ids.append(str(row[0]))
        data.append(row[1:])
    X = np.array(data)
    model = TSNE(n_components=2, random_state=0)
    tsne_vals = model.fit_transform(X)
    tsneHandler.storeTsneValsWIds(tablename, tsne_vals, ids)
    return tsne_vals, ids
def TSNE_sift(name):
    conn = sqlite3.connect(dirm.sqlite_file)
    c = conn.cursor()
    dist = sift_cb_handler.get_distributions()
    X_Ids = []
    X_data = []
    for d in dist:
        x_id = d[0]
        x_data = d[1:]
        X_Ids.append(x_id)
        X_data.append(x_data)
    X_data = np.array(X_data)
    model = TSNE(n_components=2)
    tsne_x = model.fit_transform(X_data)
    tsneHandler.storeTsneValsWIds(name, tsne_x, X_Ids)
    return tsne_x, X_Ids
Esempio n. 17
0
def test_early_exaggeration_used():
    # check that the ``early_exaggeration`` parameter has an effect
    random_state = check_random_state(0)
    n_components = 2
    methods = ['exact', 'barnes_hut']
    X = random_state.randn(25, n_components).astype(np.float32)
    for method in methods:
        tsne = TSNE(n_components=n_components, perplexity=1,
                    learning_rate=100.0, init="pca", random_state=0,
                    method=method, early_exaggeration=1.0)
        X_embedded1 = tsne.fit_transform(X)
        tsne = TSNE(n_components=n_components, perplexity=1,
                    learning_rate=100.0, init="pca", random_state=0,
                    method=method, early_exaggeration=10.0)
        X_embedded2 = tsne.fit_transform(X)

        assert not np.allclose(X_embedded1, X_embedded2)
Esempio n. 18
0
def test_preserve_trustworthiness_approximately():
    # Nearest neighbors should be preserved approximately.
    random_state = check_random_state(0)
    # The Barnes-Hut approximation uses a different method to estimate
    # P_ij using only a number of nearest neighbors instead of all
    # points (so that k = 3 * perplexity). As a result we set the
    # perplexity=5, so that the number of neighbors is 5%.
    n_components = 2
    methods = ['exact', 'barnes_hut']
    X = random_state.randn(100, n_components).astype(np.float32)
    for init in ('random', 'pca'):
        for method in methods:
            tsne = TSNE(n_components=n_components, perplexity=50,
                        learning_rate=100.0, init=init, random_state=0,
                        method=method)
            X_embedded = tsne.fit_transform(X)
            T = trustworthiness(X, X_embedded, n_neighbors=1)
            assert_almost_equal(T, 1.0, decimal=1)
Esempio n. 19
0
def test_n_iter_without_progress():
    # Use a dummy negative n_iter_without_progress and check output on stdout
    random_state = check_random_state(0)
    X = random_state.randn(100, 2)
    tsne = TSNE(n_iter_without_progress=-1, verbose=2,
                random_state=1, method='exact')

    old_stdout = sys.stdout
    sys.stdout = StringIO()
    try:
        tsne.fit_transform(X)
    finally:
        out = sys.stdout.getvalue()
        sys.stdout.close()
        sys.stdout = old_stdout

    # The output needs to contain the value of n_iter_without_progress
    assert_in("did not make any progress during the "
              "last -1 episodes. Finished.", out)
Esempio n. 20
0
def test_n_iter_without_progress():
    # Make sure that the parameter n_iter_without_progress is used correctly
    random_state = check_random_state(0)
    X = random_state.randn(100, 2)
    tsne = TSNE(n_iter_without_progress=2, verbose=2,
                random_state=0, method='exact')

    old_stdout = sys.stdout
    sys.stdout = StringIO()
    try:
        tsne.fit_transform(X)
    finally:
        out = sys.stdout.getvalue()
        sys.stdout.close()
        sys.stdout = old_stdout

    # The output needs to contain the value of n_iter_without_progress
    assert_in("did not make any progress during the "
              "last 2 episodes. Finished.", out)
Esempio n. 21
0
def test_bh_match_exact():
    # check that the ``barnes_hut`` method match the exact one when
    # ``angle = 0`` and ``perplexity > n_samples / 3``
    random_state = check_random_state(0)
    n_features = 10
    X = random_state.randn(30, n_features).astype(np.float32)
    X_embeddeds = {}
    n_iter = {}
    for method in ['exact', 'barnes_hut']:
        tsne = TSNE(n_components=2, method=method, learning_rate=1.0,
                    init="random", random_state=0, n_iter=251,
                    perplexity=30.0, angle=0)
        # Kill the early_exaggeration
        tsne._EXPLORATION_N_ITER = 0
        X_embeddeds[method] = tsne.fit_transform(X)
        n_iter[method] = tsne.n_iter_

    assert n_iter['exact'] == n_iter['barnes_hut']
    assert_array_almost_equal(X_embeddeds['exact'], X_embeddeds['barnes_hut'],
                              decimal=3)
Esempio n. 22
0
def test_verbose():
    # Verbose options write to stdout.
    random_state = check_random_state(0)
    tsne = TSNE(verbose=2)
    X = random_state.randn(5, 2)

    old_stdout = sys.stdout
    sys.stdout = StringIO()
    try:
        tsne.fit_transform(X)
    finally:
        out = sys.stdout.getvalue()
        sys.stdout.close()
        sys.stdout = old_stdout

    assert("[t-SNE]" in out)
    assert("nearest neighbors..." in out)
    assert("Computed conditional probabilities" in out)
    assert("Mean sigma" in out)
    assert("early exaggeration" in out)
Esempio n. 23
0
def test_min_grad_norm():
    # Make sure that the parameter min_grad_norm is used correctly
    random_state = check_random_state(0)
    X = random_state.randn(100, 2)
    min_grad_norm = 0.002
    tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2,
                random_state=0, method='exact')

    old_stdout = sys.stdout
    sys.stdout = StringIO()
    try:
        tsne.fit_transform(X)
    finally:
        out = sys.stdout.getvalue()
        sys.stdout.close()
        sys.stdout = old_stdout

    lines_out = out.split('\n')

    # extract the gradient norm from the verbose output
    gradient_norm_values = []
    for line in lines_out:
        # When the computation is Finished just an old gradient norm value
        # is repeated that we do not need to store
        if 'Finished' in line:
            break

        start_grad_norm = line.find('gradient norm')
        if start_grad_norm >= 0:
            line = line[start_grad_norm:]
            line = line.replace('gradient norm = ', '').split(' ')[0]
            gradient_norm_values.append(float(line))

    # Compute how often the gradient norm is smaller than min_grad_norm
    gradient_norm_values = np.array(gradient_norm_values)
    n_smaller_gradient_norms = \
        len(gradient_norm_values[gradient_norm_values <= min_grad_norm])

    # The gradient norm can be smaller than min_grad_norm at most once,
    # because in the moment it becomes smaller the optimization stops
    assert_less_equal(n_smaller_gradient_norms, 1)
Esempio n. 24
0
def test_verbose():
    random_state = check_random_state(0)
    tsne = TSNE(verbose=2)
    X = random_state.randn(5, 2)

    old_stdout = sys.stdout
    sys.stdout = StringIO()
    try:
        tsne.fit_transform(X)
    finally:
        out = sys.stdout.getvalue()
        sys.stdout.close()
        sys.stdout = old_stdout

    assert("[t-SNE]" in out)
    assert("Computing pairwise distances" in out)
    assert("Computed conditional probabilities" in out)
    assert("Mean sigma" in out)
    assert("Finished" in out)
    assert("early exaggeration" in out)
    assert("Finished" in out)
Esempio n. 25
0
def tsne_view(trainset, volume_manager):

    batch_scheduler = TractographyBatchScheduler(trainset,
                                                 batch_size=20000,
                                                 noisy_streamlines_sigma=False,
                                                 seed=1234,
                                                 normalize_target=True)
    rng = np.random.RandomState(42)
    rng.shuffle(batch_scheduler.indices)

    bundle_name_pattern = "CST_Left"
    # batch_inputs, batch_targets, batch_mask = batch_scheduler._prepare_batch(trainset.get_bundle(bundle_name_pattern, return_idx=True))
    inputs, targets, mask = batch_scheduler._next_batch(3)
    mask = mask.astype(bool)
    idx = np.arange(mask.sum())
    rng.shuffle(idx)

    coords = T.matrix('coords')
    eval_at_coords = theano.function([coords], volume_manager.eval_at_coords(coords))

    M = 2000 * len(trainset.subjects)
    coords = inputs[mask][idx[:M]]
    X = eval_at_coords(coords)

    from sklearn.manifold.t_sne import TSNE
    tsne = TSNE(n_components=2, verbose=2, random_state=42)
    Y = tsne.fit_transform(X)

    import matplotlib.pyplot as plt
    plt.figure()
    ids = range(len(trainset.subjects))
    markers = ['s', 'o', '^', 'v', '<', '>', 'h']
    colors = ['cyan', 'darkorange', 'darkgreen', 'magenta', 'pink', 'k']
    for i, marker, color in zip(ids, markers, colors):
        idx = coords[:, -1] == i
        print("Subject #{}: ".format(i), idx.sum())
        plt.scatter(Y[idx, 0], Y[idx, 1], 20, color=color, marker=marker, label="Subject #{}".format(i))

    plt.legend()
    plt.show()
Esempio n. 26
0
def test_n_iter_without_progress():
    # Use a dummy negative n_iter_without_progress and check output on stdout
    random_state = check_random_state(0)
    X = random_state.randn(100, 10)
    for method in ["barnes_hut", "exact"]:
        tsne = TSNE(n_iter_without_progress=-1, verbose=2, learning_rate=1e8,
                    random_state=0, method=method, n_iter=351, init="random")
        tsne._N_ITER_CHECK = 1
        tsne._EXPLORATION_N_ITER = 0

        old_stdout = sys.stdout
        sys.stdout = StringIO()
        try:
            tsne.fit_transform(X)
        finally:
            out = sys.stdout.getvalue()
            sys.stdout.close()
            sys.stdout = old_stdout

        # The output needs to contain the value of n_iter_without_progress
        assert_in("did not make any progress during the "
                  "last -1 episodes. Finished.", out)
Esempio n. 27
0
def load_and_transform(con, cur, tag):
    # erase old data
    cur.execute("delete from computed_viz where id = ?",(tag,))
    con.commit()

    # load/transform data structure
    words, mtx, topics = load_data_structures(cur, tag)

    # compute
    matrix = mtx.toarray()

    # PCA
    pca = PCA(n_components=2)
    X_r = pca.fit(matrix).transform(matrix)
    save_transformation(cur, tag, "pca", X_r, topics)
    con.commit()

    # T-SNE
    t_sne = TSNE(n_components=2, random_state=0, verbose=1)
    X_r = t_sne.fit_transform(matrix)
    save_transformation(cur, tag, "tsne", X_r, topics)
    con.commit()
def check_uniform_grid(method, seeds=[0, 1, 2], n_iter=1000):
    """Make sure that TSNE can approximately recover a uniform 2D grid"""
    for seed in seeds:
        tsne = TSNE(n_components=2, init='random', random_state=seed,
                    perplexity=10, n_iter=n_iter, method=method)
        Y = tsne.fit_transform(X_2d_grid)

        # Ensure that the convergence criterion has been triggered
        assert tsne.n_iter_ < n_iter

        # Ensure that the resulting embedding leads to approximately
        # uniformly spaced points: the distance to the closest neighbors
        # should be non-zero and approximately constant.
        nn = NearestNeighbors(n_neighbors=1).fit(Y)
        dist_to_nn = nn.kneighbors(return_distance=True)[0].ravel()
        assert dist_to_nn.min() > 0.1

        smallest_to_mean = dist_to_nn.min() / np.mean(dist_to_nn)
        largest_to_mean = dist_to_nn.max() / np.mean(dist_to_nn)

        try_name = "{}_{}".format(method, seed)
        assert_greater(smallest_to_mean, .5, msg=try_name)
        assert_less(largest_to_mean, 2, msg=try_name)
Esempio n. 29
0
import pandas as pd

from scipy.io import loadmat
from sklearn.manifold.t_sne import TSNE

chris_data = loadmat('/home/itskov/Downloads/dataForEyal.mat')
tnse_manifold = TSNE().fit_transform(chris_data['PCs'])
pass
Esempio n. 30
0
def test_distance_not_available():
    # 'metric' must be valid.
    tsne = TSNE(metric="not available")
    assert_raises_regexp(ValueError, "Unknown metric not available.*",
                         tsne.fit_transform, np.array([[0.0], [1.0]]))
Esempio n. 31
0
def test_bad_precomputed_distances(method, D, retype, message_regex):
    tsne = TSNE(metric="precomputed", method=method)
    with pytest.raises(ValueError, match=message_regex):
        tsne.fit_transform(retype(D))
Esempio n. 32
0
def test_init_not_available():
    # 'init' must be 'pca', 'random', or numpy array.
    tsne = TSNE(init="not available")
    m = "'init' must be 'pca', 'random', or a numpy array"
    with pytest.raises(ValueError, match=m):
        tsne.fit_transform(np.array([[0.0], [1.0]]))
Esempio n. 33
0
def test_chebyshev_metric():
    # t-SNE should allow metrics that cannot be squared (issue #3526).
    random_state = check_random_state(0)
    tsne = TSNE(metric="chebyshev")
    X = random_state.randn(5, 2)
    tsne.fit_transform(X)
Esempio n. 34
0
def test_n_components_range():
    # barnes_hut method should only be used with n_components <= 3
    tsne = TSNE(n_components=4, method="barnes_hut")
    with pytest.raises(ValueError, match="'n_components' should be .*"):
        tsne.fit_transform(np.array([[0.0], [1.0]]))
Esempio n. 35
0
def test_n_components_range():
    # barnes_hut method should only be used with n_components <= 3
    tsne = TSNE(n_components=4, method="barnes_hut")
    assert_raises_regexp(ValueError, "'n_components' should be .*",
                         tsne.fit_transform, np.array([[0.0], [1.0]]))
Esempio n. 36
0
def test_angle_out_of_range_checks():
    # check the angle parameter range
    for angle in [-1, -1e-6, 1 + 1e-6, 2]:
        tsne = TSNE(angle=angle)
        assert_raises_regexp(ValueError, "'angle' must be between 0.0 - 1.0",
                             tsne.fit_transform, np.array([[0.0], [1.0]]))
Esempio n. 37
0
def test_init_ndarray():
    # Initialize TSNE with ndarray and test fit
    tsne = TSNE(init=np.zeros((100, 2)))
    X_embedded = tsne.fit_transform(np.ones((100, 5)))
    assert_array_equal(np.zeros((100, 2)), X_embedded)
Esempio n. 38
0
def test_init_ndarray_precomputed():
    # Initialize TSNE with ndarray and metric 'precomputed'
    # Make sure no FutureWarning is thrown from _fit
    tsne = TSNE(init=np.zeros((100, 2)), metric="precomputed")
    tsne.fit(np.zeros((100, 100)))
Esempio n. 39
0
def test_method_not_available():
    # 'nethod' must be 'barnes_hut' or 'exact'
    tsne = TSNE(method='not available')
    assert_raises_regexp(ValueError, "'method' must be 'barnes_hut' or ",
                         tsne.fit_transform, np.array([[0.0], [1.0]]))
Esempio n. 40
0
def test_too_few_iterations():
    # Number of gradient descent iterations must be at least 200.
    tsne = TSNE(n_iter=199)
    with pytest.raises(ValueError, match="n_iter .*"):
        tsne.fit_transform(np.array([[0.0], [0.0]]))
Esempio n. 41
0
def test_init_ndarray_precomputed():
    # Initialize TSNE with ndarray and metric 'precomputed'
    # Make sure no FutureWarning is thrown from _fit
    tsne = TSNE(init=np.zeros((100, 2)), metric="precomputed")
    tsne.fit(np.zeros((100, 100)))
Esempio n. 42
0
def test_init_not_available():
    # 'init' must be 'pca', 'random', or numpy array.
    tsne = TSNE(init="not available")
    m = "'init' must be 'pca', 'random', or a numpy array"
    assert_raises_regexp(ValueError, m, tsne.fit_transform,
                         np.array([[0.0], [1.0]]))
Esempio n. 43
0
def test_method_not_available():
    # 'nethod' must be 'barnes_hut' or 'exact'
    tsne = TSNE(method='not available')
    with pytest.raises(ValueError, match="'method' must be 'barnes_hut' or "):
        tsne.fit_transform(np.array([[0.0], [1.0]]))
Esempio n. 44
0
def test_non_square_precomputed_distances():
    # Precomputed distance matrices must be square matrices.
    tsne = TSNE(metric="precomputed")
    assert_raises_regexp(ValueError, ".* square distance matrix",
                         tsne.fit_transform, np.array([[0.0], [1.0]]))
Esempio n. 45
0
def test_chebyshev_metric():
    # t-SNE should allow metrics that cannot be squared (issue #3526).
    random_state = check_random_state(0)
    tsne = TSNE(metric="chebyshev")
    X = random_state.randn(5, 2)
    tsne.fit_transform(X)
Esempio n. 46
0
def test_too_few_iterations():
    # Number of gradient descent iterations must be at least 200.
    tsne = TSNE(n_iter=199)
    assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
                         np.array([[0.0]]))
Esempio n. 47
0
def test_early_exaggeration_too_small():
    # Early exaggeration factor must be >= 1.
    tsne = TSNE(early_exaggeration=0.99)
    assert_raises_regexp(ValueError, "early_exaggeration .*",
                         tsne.fit_transform, np.array([[0.0]]))
Esempio n. 48
0
from sklearn.linear_model.logistic import LogisticRegression

#create reduced feature matrix
reader = csv.reader(open("reduced_features.csv", "r"), delimiter=",")
X = list(reader)
X = np.array(X)
X = X.astype(np.float)

#create result vector
reader = csv.reader(open("target_output.csv", "r"), delimiter=",")
y = list(reader)
y = np.array(y)
y = y.astype(np.int)
y = y.ravel()

X_Train_embedded = TSNE(n_components=2).fit_transform(X)
print(X_Train_embedded.shape)
model = LogisticRegression().fit(X, y)
y_predicted = model.predict(X)
# replace the above by your data and model

# create meshgrid
resolution = 1024  # 100x100 background pixels
X2d_xmin, X2d_xmax = np.min(X_Train_embedded[:,
                                             0]), np.max(X_Train_embedded[:,
                                                                          0])
X2d_ymin, X2d_ymax = np.min(X_Train_embedded[:,
                                             1]), np.max(X_Train_embedded[:,
                                                                          1])
xx, yy = np.meshgrid(np.linspace(X2d_xmin, X2d_xmax, resolution),
                     np.linspace(X2d_ymin, X2d_ymax, resolution))
Esempio n. 49
0
def test_exact_no_precomputed_sparse():
    tsne = TSNE(metric='precomputed', method='exact')
    with pytest.raises(TypeError, match='sparse'):
        tsne.fit_transform(sp.csr_matrix([[0, 5], [5, 0]]))
Esempio n. 50
0
def test_pca_initialization_not_compatible_with_precomputed_kernel():
    # Precomputed distance matrices must be square matrices.
    tsne = TSNE(metric="precomputed", init="pca")
    assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
                         "used with metric=\"precomputed\".",
                         tsne.fit_transform, np.array([[0.0], [1.0]]))
Esempio n. 51
0
            name = image.split('/')[-1]
            if name in valid_names:
                img = open_image(image)
                pred = learn.predict(img)
                embedding = pred[-1].detach().numpy()

                x.append(embedding)
                y.append(n_class)

        n_class += 1

    x, y = np.asarray(x), np.asarray(y)
    print(x.shape, y.shape)
    print(np.unique(y))

    x_tsne = TSNE(n_components=2).fit_transform(x)
    print(x_tsne.shape)

    color = ['darkcyan', 'g', 'y', 'magenta', 'lightgreen', 'mediumslateblue', 'yellow', 'brown', 'k', 'b']
    labels = [str(i) for i in range(10)]
    times = [0 for i in range(10)]
    for i in range(len(x_tsne)):
        if times[y[i]] == 0:
            plt.scatter(x_tsne[i, 0], x_tsne[i, 1], color=color[y[i]], label=labels[y[i]])
            times[y[i]] += 1
        else:
            plt.scatter(x_tsne[i, 0], x_tsne[i, 1], color=color[y[i]])
    plt.legend()
    plt.title('TSNE validation')
    plt.savefig('tsne_val.png')
Esempio n. 52
0
def test_early_exaggeration_too_small():
    # Early exaggeration factor must be >= 1.
    tsne = TSNE(early_exaggeration=0.99)
    with pytest.raises(ValueError, match="early_exaggeration .*"):
        tsne.fit_transform(np.array([[0.0], [0.0]]))