Ejemplo n.º 1
0
def ComputeSax(sample_data, sample_data2):

    sample_data = sample_data.as_matrix()
    sample_data2 = sample_data2.as_matrix()

    #########################################
    # SAX - Symbolic aggregate approximation
    #http://www.cs.ucr.edu/~eamonn/SAX.pdf
    ##########################################
    #PARAMETERS:
    #W: The number of PAA segments representing the time series - aka the len()
    # of the string representing the timeseries - useful for dimensionality reduction
    #Alphabet size: Alphabet size (e.g., for the alphabet = {a,b,c} = 3)

    downsample_ratio = 200
    word_length = len(sample_data[:, 1]) / downsample_ratio
    alphabet_size = 7

    s = SAX(word_length, alphabet_size)

    mic_distances = []
    for mic in range(1, 5):
        (x1String, x1Indices) = s.to_letter_rep(sample_data[:, mic])
        (x2String, x2Indices) = s.to_letter_rep(sample_data2[:, mic])

        #print x1String

        x1x2ComparisonScore = s.compare_strings(x1String, x2String)

        mic_distances.append(x1x2ComparisonScore)
        #print "Mic: " + str(mic) + ", distance= " + str(x1x2ComparisonScore)
    return mic_distances
    def _get_SAX_spikes(cls, timeseries, timestamps, treshold):
        """
        Returns spikes counting how many times a timestamp is a maximum
        in a SAX conversion
        """

        # Seconds bethween measurements
        retention = (timestamps[-1] - timestamps[0]) / len(timestamps)

        # Number of entries per window
        entries_per_word = cls.WINDOW_SECONDS_COUNT / retention

        num_windows = len(timeseries) / entries_per_word
        window_size = len(timeseries) / num_windows

        num_symbols = window_size * retention / cls.SECONDS_PER_SYMBOL

        sax_generator = SAX(wordSize=num_symbols,
                            alphabetSize=cls.ALPHABET_SIZE)

        symbols_per_datapoint = int(
            round(cls.SECONDS_PER_SYMBOL / float(retention)))

        # Convert timeseries into SAX notation
        words, intervals = sax_generator.sliding_window(
            timeseries, num_windows, .8)

        # Times index i is a maximal value
        maximum_count = {i: 0 for i in xrange(len(timeseries))}
        # Times index i is passed by a window
        window_count = {i: 0 for i in xrange(len(timeseries))}

        # Count in how many windows a timestamp is a local maximum
        for i in xrange(len(words)):
            word = words[i]
            interval = intervals[i]

            for j in xrange(len(word)):
                index = j * symbols_per_datapoint + interval[0]
                if word[j] == string.ascii_lowercase[cls.ALPHABET_SIZE - 1]:
                    maximum_count[index] += 1
                window_count[index] += 1

        spikes = {}
        for key, value in maximum_count.iteritems():
            if value == window_count[key] and value and \
               timeseries[key] > treshold:
                val = timeseries[key]
                spikes[timestamps[key]] = cls._get_basic_spike_prio(
                    val, treshold)

        return spikes
Ejemplo n.º 3
0
def saxify_and_export(df, csvf, alphabet=5):
    nrows, ncols = df.shape
    sample_size = ncols - 1
    sax = SAX(sample_size, alphabet, 1e-6)
    cols = ['label', 'sax']
    nv = []
    for i in range(nrows):
        values = df.iloc[i, 1:].values.tolist()
        v = {}
        v['label'] = int(df.iloc[i, 0])

        letters, _ = sax.to_letter_rep(values)
        v['sax'] = letters
        nv.append(v)
    return pd.DataFrame(nv, columns=cols).to_csv(csvf, index=False)
 def __init__(self,
              segmentLength=20,
              paaSize=5,
              alphabetSize=3,
              upperBound=100,
              lowerBound=-100):
     self.segmentLength = segmentLength
     self.paaSize = paaSize
     self.alphabetSize = alphabetSize
     self.upperBound = upperBound
     self.lowerBound = lowerBound
     self.sax = SAX(wordSize=paaSize,
                    alphabetSize=alphabetSize,
                    lowerBound=lowerBound,
                    upperBound=upperBound,
                    epsilon=1e-6)
     self.grammar = Grammar()
     self.segmentIndexes = []
     self.rule_set = []
     self.tsCount = 0
def sax_kmeans(X, K, wordSize, alphabetSize): 
    '''Cluster by SAX k-means
    
    Args:
        X: 2D np array of dimension (n_households, time)
        K: Number of clusters
        See https://github.com/nphoff/saxpy

    Returns:
        List of K centroids
        List of SAX k-means cluster assignments for each load in X
    '''
    
    np.random.seed(NUM)

    # Initialize to K random centers
    sax = SAX(wordSize=wordSize, alphabetSize=alphabetSize)
    idx = np.random.randint(X.shape[0], size=K)
    xmu =  list(X[idx, :])
    mu = []
    
    for i in range(len(xmu)):
        mu.append(sax.to_letter_rep(xmu[i])[0])   
    oldmu = []

    strX = []
    for i in range(X.shape[0]):
        strX.append(sax.to_letter_rep(X[i])[0])

    #i = 1
    while not has_converged(mu, oldmu):
        oldmu = mu
        # Assign all points in X to clusters
        clusters, mu_ind = cluster_points(X, strX, mu, sax)
        # Reevaluate centers
        mu = reevaluate_centers(oldmu, clusters, sax)

    return mu, mu_ind
Ejemplo n.º 6
0
 def setUp(self):
     # All tests will be run with 6 letter words
     # and 5 letter alphabet
     self.sax = SAX(6, 5, 1e-6)
Ejemplo n.º 7
0
def sax_rep(word, letter, ary):
    ary = np.asarray(ary)
    sax = SAX(word, letter)
    return sax.to_letter_rep(ary)
Ejemplo n.º 8
0

def DrawLines(lines):
    ax = gca()
    for line in lines:
        tline = Line2D((line[0], line[2]), (line[1], line[3]))
        ax.add_line(tline)


n, w, a = read_para(sys.argv[1:])

#1 represent SAX and calculate the frequence

x = Time_series.Time_series_CAR(n)
data = x.tolist()
sax = SAX(w, a, 1e-6)
(letters, indices) = sax.to_letter_rep(data)
frq = sax.symbol_frequency(data)

#2 Dimensionality reduction with linear interprolation

a = np.asarray(data, dtype=np.float64)
newdata = (a + np.random.normal(0, 3, n)).tolist()
nordata = normalize(a[:, np.newaxis], axis=0).ravel()

figure()
lines = WindowSliding.WindowSliding(nordata, Fitting.Fitting,
                                    Fitting.SumofSquaredError)
DrawPlot(nordata, 'Pecewise linear approximation with Sliding Window')
DrawLines(lines)
show()
Ejemplo n.º 9
0
def convert_sax(ts, word, alpha, eps=0.000001):
    s = SAX(word, alpha, eps)
    (t1String, t1Indices) = s.to_letter_rep(ts)
    return t1String
Ejemplo n.º 10
0
def min_dist_sax(t1String, t2String, word, alpha, eps=0.000001):
    s = SAX(word, alpha, eps)
    return s.compare_strings(t1String, t2String)