Пример #1
0
def load_tslearn_data():
    """ Time series data with variable length """
    X_train, y_train, X_test, y_test = CachedDatasets().load_dataset("Trace")
    X_train = X_train[y_train < 4]  # Keep first 3 classes
    np.random.shuffle(X_train)
    X_train = TimeSeriesScalerMeanVariance().fit_transform(X_train[:50])  # Keep only 50 time series
    X_train = TimeSeriesResampler(sz=40).fit_transform(X_train)  # Make time series shorter
    X_train = X_train.reshape(50,-1)
    return X_train
Пример #2
0
import numpy as np
from Sloth import cluster

seed = 0
numpy.random.seed(seed)
X_train, y_train, X_test, y_test = CachedDatasets().load_dataset("Trace")
X_train = X_train[y_train < 4]  # Keep first 3 classes
numpy.random.shuffle(X_train)

#X_train = TimeSeriesScalerMeanVariance().fit_transform(X_train[:50])  # Keep only 50 time series

X_train = X_train[:50]

sz = X_train.shape[1]

X_train = X_train.reshape((X_train.shape[0], X_train.shape[1]))

#Sloth = Sloth()
eps = 20
min_samples = 2
LOAD = False  # Flag for loading similarity matrix from file if it has been computed before
if (LOAD):
    SimilarityMatrix = cluster.LoadSimilarityMatrix()
else:
    SimilarityMatrix = cluster.GenerateSimilarityMatrix(X_train)
    cluster.SaveSimilarityMatrix(SimilarityMatrix)
nclusters, labels, cnt = cluster.ClusterSimilarityMatrix(
    SimilarityMatrix, eps, min_samples)

print("DEBUG::number of clusters found =")
print(nclusters)