def dominanceRankingTotal(self, currentEvent, nApp, domIndex, j, minPred, diffPred): dom = [] for i in range(j, nApp): currentApp = domIndex[i] signature = self.signatures.keys()[currentApp] initPath, dist = _ucrdtw.ucrdtw(self.signatures[signature], currentEvent, 0.1) path = range(initPath, initPath + len(currentEvent)) signatureChunk = self.signatures.items()[currentApp][1][path] eventTemp = np.array(currentEvent) >= 0 signatureChunkTemp = signatureChunk[eventTemp] eventTemp = np.array(currentEvent)[eventTemp] if len(eventTemp) == 0: dist = np.sum(currentEvent) else: if diffPred[currentApp] < 2: dist = 0 else: signatureChunkTemp = np.subtract(signatureChunkTemp, minPred[currentApp]) dist = np.divide( np.percentile( np.abs(np.subtract(eventTemp, signatureChunkTemp)), 95), np.max(self.signatures.items()[currentApp][1]) ) # Scaled 95-th percentile distance on signature max value dom.append(dist) domIndexNew = np.argsort(dom) del dom, nApp toReturn = domIndex[domIndexNew[0] + j] return toReturn
def calc_distances(self,ex_key): example=self.example_data[ex_key] #UCRDTW z=np.zeros(self.n) for c,col in enumerate(example): x=example[col].values[::-1] z_c=np.array([ucrdtw(df[col].values[::-1], x, 0.20, False)[1] for key, df in self.test_data.iteritems()]) #print col #print z_c z=z+self.w[c]*z_c return z
def dtw_totalVsSig_singleEvent(self, event, signature): totDistances = list() totCosts = list() totPaths = list() currentEvent = event initPath, dist = _ucrdtw.ucrdtw(self.signatures[signature], currentEvent, 0.1) path = range(initPath, initPath + len(event)) totDistances.append(dist) totPaths.append(path) self.totDistances[signature] = np.divide(totDistances, len(totPaths)) self.totPaths[signature] = totPaths del totDistances, totPaths, currentEvent, path, initPath, dist
def featurize(ts, dat, templates): """ Get the features from the raw tennis data :param ts: numpy array of size (samples,sensor readings) which contains the raw data :param dat: tuple with start and stop indices to slice data :return: :rtype : """ s = dat[0] t = dat[1] data = ts[s:t, :] length = 1.0 * len(data[:, 0]) a = np.sqrt(np.sum(np.array(data**2, dtype=float), axis=1)) max_index = np.argmax(a) max_index = 8 if s - max_index < 0: s = 16 model1x = sm.OLS(range(1, 9), ts[s + max_index:s + max_index + 8, 0]) model1y = sm.OLS(range(1, 9), ts[s + max_index:s + max_index + 8, 1]) model1z = sm.OLS(range(1, 9), ts[s + max_index:s + max_index + 8, 2]) reg1 = np.array([ model1x.fit().params[0], model1y.fit().params[0], model1z.fit().params[0] ]) model2x = sm.OLS(range(1, 9), ts[s + max_index - 8:s + max_index, 0]) model2y = sm.OLS(range(1, 9), ts[s + max_index - 8:s + max_index, 1]) model2z = sm.OLS(range(1, 9), ts[s + max_index - 8:s + max_index, 2]) reg2 = np.array([ model2x.fit().params[0], model2y.fit().params[0], model2z.fit().params[0] ]) dist = np.zeros((len(templates))) for j, template in enumerate(templates): dist[j] = _ucrdtw.ucrdtw(data, template, 0.5, False)[1] correlation = np.corrcoef(np.c_[data, a], rowvar=0) return np.hstack([ np.max(data,axis=0),\ np.min(data,axis=0),np.mean(a),\ np.std(a),\ np.min(a),np.max(a), reg1,reg2, np.sign(ts[8,0:2]), (np.argmax(dat,axis=0)-np.argmin(dat,axis=0))/length, np.array([correlation[0,1],correlation[0,2],correlation[1,2],correlation[0,3],correlation[1,3],correlation[2,3]]), dist, np.sum(np.diff(np.sign(data),axis=0)>0,axis=0)/length,\ np.sum(np.diff(np.sign(data),axis=0)<0,axis=0)/length])
def predict(self, X): """ The method predicts over the input data Args: Xarr (ndarray<float>): The data to predict over Returns: (ndarray<int>) The predictions over the data """ X = np.array(X) out = [] for i in range(X.shape[0]): loc, dist = _ucrdtw.ucrdtw(self.sequences, X[i,:], 0.05, True) out.append(self.labels[np.floor(loc/(2.0*X.shape[1]))]) return out
def predict(self, X): """ The method predicts over the input data Args: Xarr (ndarray<float>): The data to predict over Returns: (ndarray<int>) The predictions over the data """ X = np.array(X) out = [] for i in range(X.shape[0]): loc, dist = _ucrdtw.ucrdtw(self.sequences, X[i, :], 0.05, True) out.append(self.labels[np.floor(loc / (2.0 * X.shape[1]))]) return out
def ComputeDTW(sample_data, sample_data2): #Read tsFresh tables for all gestures sample_data = sample_data.as_matrix() sample_data2 = sample_data2.as_matrix() mic_distances = [] for mic in range(1, 5): #distance, path = dtw(sample_data[:,mic], sample_data2[:,mic], dist=euclidean) loc, distance = _ucrdtw.ucrdtw(stats.zscore(sample_data[:, mic]), stats.zscore(sample_data2[:, mic]), 0.05) #Print results mic_distances.append(distance) print "Mic: " + str(mic) + ", distance= " + str(distance) return mic_distances
def featurize(ts,dat,templates): """ Get the features from the raw tennis data :param ts: numpy array of size (samples,sensor readings) which contains the raw data :param dat: tuple with start and stop indices to slice data :return: :rtype : """ #start and stop indices s=dat[0] t=dat[1] #slices the time series data=ts[s:t,:] length=1.0*len(data[:,0]) #this handles the edge case in which the swings is at the very end of the collection #computes total acceleration a=np.sqrt(np.sum(np.array(data**2,dtype=float),axis=1)) #finds the index which corresponds to the maximum acceleration max_index=8 model1x=sm.OLS(range(1,9),ts[s:s+max_index,0]) model1y=sm.OLS(range(1,9),ts[s:s+max_index,1]) model1z=sm.OLS(range(1,9),ts[s:s+max_index,2]) reg1=np.array([model1x.fit().params[0],model1y.fit().params[0],model1z.fit().params[0]]) model2x=sm.OLS(range(1,9),ts[s+max_index:t,0]) model2y=sm.OLS(range(1,9),ts[s+max_index:t,1]) model2z=sm.OLS(range(1,9),ts[s+max_index:t,2]) reg2=np.array([model2x.fit().params[0],model2y.fit().params[0],model2z.fit().params[0]]) dist=np.zeros((len(templates))) for j,template in enumerate(templates): dist[j]=_ucrdtw.ucrdtw(data, template, 0.5, False)[1] #cross correlation between each sensor reading correlation=np.corrcoef(np.c_[data,a],rowvar=0) return np.hstack([ np.max(data,axis=0),\ np.min(data,axis=0),np.mean(a),\ np.std(a),\ np.min(a),np.max(a), reg1,reg2,#reg3,reg4, np.sign(ts[8,0:2]), (np.argmax(dat,axis=0)-np.argmin(dat,axis=0))/length, np.array([correlation[0,1],correlation[0,2],correlation[1,2],correlation[0,3],correlation[1,3],correlation[2,3]]), dist, np.sum(np.diff(np.sign(data),axis=0)>0,axis=0)/length,\ np.sum(np.diff(np.sign(data),axis=0)<0,axis=0)/length])
metavar='DATA_FILE', type=str, help='Path to data file') parser.add_argument('query', metavar='QUERY_FILE', type=str, help='Path to query file') parser.add_argument('query_size', metavar='QUERY_SIZE', type=int, default=0, help='Max size of query') parser.add_argument('warp_width', metavar='WARP_WIDTH', type=float, default=0.05, help='Width of allowed warp as fraction of query size') parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Print verbose info') args = parser.parse_args() data = numpy.fromfile(args.data, sep=' ') query = numpy.fromfile( args.query, sep=' ', count=args.query_size if args.query_size > 0 else -1) print _ucrdtw.ucrdtw(data, query, args.warp_width, True)
parser.add_argument('warp_width', metavar='WARP_WIDTH', type=float, default=0.05, help='Width of allowed warp as fraction of query size') parser.add_argument('curr_ind', metavar='CURR_IND', type=int, default=-1, help='Current index for self similarity') parser.add_argument('ez', metavar='EZ', type=int, default=2, help='Exclusion Zone for self similarity') parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Print verbose info') args = parser.parse_args() data = numpy.fromfile(args.data, sep=' ') query = numpy.fromfile( args.query, sep=' ', count=args.query_size if args.query_size > 0 else -1) print( _ucrdtw.ucrdtw(data, query, args.warp_width, args.curr_ind, args.ez, True))
import _ucrdtw import sys import numpy as np import matplotlib.pyplot as plt import math if __name__ == '__main__': data = np.cos(np.linspace(0.0, math.pi * 6, 600)) + (np.random.uniform(-0.5, 0.5, 600) / 10) query = np.sin(np.linspace(0.0, math.pi * 2, 200)) plt.figure() plt.plot(data) plt.plot(query) loc, dist = _ucrdtw.ucrdtw(data, query, 0.05, True) query = np.concatenate((np.linspace(0.0, 0.0, loc), query)) plt.plot(query) plt.show()
text_file.write("\n" + str(amp + 1) + "," + str(window + 1) + ",ftw," + str(ftwdistance) + "," + str(timet.microseconds) + ',' + str(ftwpath[1][0] + count) + ',' + str(ftwpath[1][-1] + count)) path1 = np.savetxt('paths/' + "amp_" + str(amp + 1) + "_window_" + str(window + 1) + '_query_ftw.txt', ftwpath[0], delimiter=',') path2 = np.savetxt('paths/' + "amp_" + str(amp + 1) + "_window_" + str(window + 1) + '_ref_ftw.txt', ftwpath[1], delimiter=',') #ucrdtw timeb = datetime.now() ucrloc, ucrdist = _ucrdtw.ucrdtw(y, x, 0.05) timet = datetime.now() - timeb print("ucr complete on amp " + str(amp + 1)) with open("bench_log.txt", "a") as text_file: text_file.write("\n" + str(amp + 1) + "," + str(window + 1) + ",ucr," + str(ucrdist) + "," + str(timet.microseconds) + ',' + str(ucrloc + count) + ',' + str(ucrloc + count)) with open( 'paths/' + str(amp + 1) + "," + str(window + 1) + '_loc_ucr.txt', "w") as text_file: text_file.write(str(ucrloc)) #cydtw timeb = datetime.now() cdtw_master = pydtw.dtw( x,
def use_dtw(signal, chunk): signal_x=signal.axes_data[0] chunk_x=chunk.axes_data[0] index, dist = dtw.ucrdtw(signal_x, chunk_x, 0.05, False) return index, dist
def calc_dtw(scrappie_df_1, scrappie_df_2, warp_width=0.1): _, dtw_dist = ucrdtw(scrappie_df_1, scrappie_df_2, warp_width, False) dtw_dist = np.float32(dtw_dist) return dtw_dist
def transform_dist(x, y, row): index, dist = dtw.ucrdtw(x[row].values, y[row].values, 0.05, False) return dist
import _ucrdtw import numpy import sys import argparse import time if __name__ == "__main__": parser = argparse.ArgumentParser(description="Calculate best DTW location and distance") parser.add_argument("data", metavar="DATA_FILE", type=str, help="Path to data file") parser.add_argument("query", metavar="QUERY_FILE", type=str, help="Path to query file") parser.add_argument("query_size", metavar="QUERY_SIZE", type=int, default=0, help="Max size of query") parser.add_argument( "warp_width", metavar="WARP_WIDTH", type=float, default=0.05, help="Width of allowed warp as fraction of query size", ) parser.add_argument("-v", "--verbose", action="store_true", default=False, help="Print verbose info") args = parser.parse_args() data = numpy.fromfile(args.data, sep=" ") query = numpy.fromfile(args.query, sep=" ", count=args.query_size if args.query_size > 0 else -1) print _ucrdtw.ucrdtw(data, query, args.warp_width, True)