Beispiel #1
0
def test_f1_fitness():
    X, y = random_walk_blobs(n_ts_per_blob=20, sz=64, noise_level=0.1)
    X = np.reshape(X, (X.shape[0], X.shape[1]))
    extractor = GeneticExtractor(iterations=5,
                                 n_jobs=1,
                                 population_size=10,
                                 fitness=f1_fitness)
    extractor.fit(X, y)
def test_serialization():
	X, y = random_walk_blobs(n_ts_per_blob=20, sz=64, noise_level=0.1)
	X = np.reshape(X, (X.shape[0], X.shape[1]))
	extractor = GeneticExtractor(iterations=5, n_jobs=1, population_size=10)
	distances = extractor.fit_transform(X, y)
	extractor.save('temp.p')
	new_extractor = GeneticExtractor.load('temp.p')
	new_distances = new_extractor.transform(X)
	np.testing.assert_array_equal(distances, new_distances)
	os.remove('temp.p')
Beispiel #3
0
def test_pipeline():
	X, y = random_walk_blobs(n_ts_per_blob=20, sz=64, noise_level=0.1)
	X = np.reshape(X, (X.shape[0], X.shape[1]))
	extractor = GeneticExtractor(iterations=5, n_jobs=1, population_size=10)
	lr = LogisticRegression()
	pipeline = Pipeline([
		('shapelets', extractor),
		('log_reg', lr)
	])
	pipeline.fit(X, y)
	
from __future__ import print_function
import numpy
from sklearn.metrics import accuracy_score
from sklearn.pipeline import Pipeline

from tslearn.generators import random_walk_blobs
from tslearn.preprocessing import TimeSeriesScalerMinMax
from tslearn.neighbors import KNeighborsTimeSeriesClassifier, KNeighborsTimeSeries
from tslearn.piecewise import SymbolicAggregateApproximation

numpy.random.seed(0)
n_ts_per_blob, sz, d, n_blobs = 20, 100, 1, 2

# Prepare data
X, y = random_walk_blobs(n_ts_per_blob=n_ts_per_blob,
                         sz=sz,
                         d=d,
                         n_blobs=n_blobs)
scaler = TimeSeriesScalerMinMax(min=0., max=1.)  # Rescale time series
X_scaled = scaler.fit_transform(X)

indices_shuffle = numpy.random.permutation(n_ts_per_blob * n_blobs)
X_shuffle = X_scaled[indices_shuffle]
y_shuffle = y[indices_shuffle]

X_train = X_shuffle[:n_ts_per_blob * n_blobs // 2]
X_test = X_shuffle[n_ts_per_blob * n_blobs // 2:]
y_train = y_shuffle[:n_ts_per_blob * n_blobs // 2]
y_test = y_shuffle[n_ts_per_blob * n_blobs // 2:]

# Nearest neighbor search
knn = KNeighborsTimeSeries(n_neighbors=3, metric="dtw")
Beispiel #5
0
def _create_large_ts_dataset():
    return random_walk_blobs(n_ts_per_blob=50, n_blobs=3, random_state=1,
                             sz=20, noise_level=0.025)
Beispiel #6
0
def _create_small_ts_dataset():
    return random_walk_blobs(n_ts_per_blob=5, n_blobs=3, random_state=1,
                             sz=10, noise_level=0.025)