def feature_vector_communicate(data, iscommunicate=False, test=False): trimmed_data = trim_or_pad_data(data, TRIM_DATA_SIZE_COMMUNICATE) featureVector = feature_vector_communicate_ind(trimmed_data, 'rightWrist_x', iscommunicate, test=True) featureVector = np.append(featureVector, feature_vector_communicate_ind(trimmed_data, 'rightWrist_y', iscommunicate, test=True)) featureVector = np.append(featureVector, feature_vector_communicate_ind(trimmed_data, 'leftWrist_y', iscommunicate, test=True)) featureVector = np.append(featureVector, feature_vector_communicate_ind(trimmed_data, 'leftWrist_y', iscommunicate, test)) return featureVector
def feature_vector_buy(data, isBuy=False, test=False): trimmed_data = trim_or_pad_data(data, TRIM_DATA_SIZE_BUY) featureVector = feature_vector_buy_ind(trimmed_data, 'rightWrist_x', isBuy, test=True) featureVector = np.append( featureVector, feature_vector_buy_ind(trimmed_data, 'rightWrist_y', isBuy, test)) return featureVector
def feature_vector_mother(data, isMother=False, test=False): trimmed_data = trim_or_pad_data(data, TRIM_DATA_SIZE_MOTHER) rY = trimmed_data['rightWrist_y'] normRawColumn = universal_normalization(rY, trimmed_data, x_norm=False) normRawColumn = general_normalization(normRawColumn) diffNormRawData = np.diff(normRawColumn) zeroCrossingArray = np.array([]) maxDiffArray = np.array([]) if diffNormRawData[0] > 0: initSign = 1 else: initSign = 0 windowSize = 5 for x in range(1, len(diffNormRawData)): if diffNormRawData[x] > 0: newSign = 1 else: newSign = 0 if initSign != newSign: zeroCrossingArray = np.append(zeroCrossingArray, x) initSign = newSign maxIndex = np.minimum(len(diffNormRawData), x + windowSize) minIndex = np.maximum(0, x - windowSize) maxVal = np.amax(diffNormRawData[minIndex:maxIndex]) minVal = np.amin(diffNormRawData[minIndex:maxIndex]) maxDiffArray = np.append(maxDiffArray, (maxVal - minVal)) index = np.argsort(-maxDiffArray) featureVector = np.array([]) featureVector = np.append(featureVector, zeroCrossingArray[index[0:5]]) featureVector = np.append(featureVector, maxDiffArray[index[0:5]]) if TRIM_DATA_SIZE_MOTHER - 1 > featureVector.shape[0]: featureVector = np.pad( featureVector, (0, TRIM_DATA_SIZE_MOTHER - featureVector.shape[0] - 1), 'constant') featureVector = featureVector[:TRIM_DATA_SIZE_MOTHER - 1] if not test: if isMother: featureVector = np.append(featureVector, 1) else: featureVector = np.append(featureVector, 0) return featureVector
def feature_vector_fun(data, isFun=False, test=False): trimmed_data = trim_or_pad_data(data, TRIM_DATA_SIZE_FUN) rX = trimmed_data['rightWrist_x'] normRawColumn = universal_normalization(rX, trimmed_data, x_norm=True) normRawColumn = general_normalization(normRawColumn) # Area under curve auc = np.array([]) auc = np.append(auc, abs(integrate.simps(normRawColumn, dx=5))) # Absolute Sum of Consecutive Differences scd = fc.absolute_sum_of_changes(normRawColumn) # Entropy entropy = fc.approximate_entropy(normRawColumn, 2, 3) # AutoCorrelation ac = fc.autocorrelation(normRawColumn, lag=5) # Count Above Mean cam = fc.count_above_mean(normRawColumn) # Count Below Mean cbm = fc.count_below_mean(normRawColumn) featureVector = np.array([]) featureVector = np.append(featureVector, auc) featureVector = np.append(featureVector, scd) featureVector = np.append(featureVector, entropy) featureVector = np.append(featureVector, ac) featureVector = np.append(featureVector, cam) featureVector = np.append(featureVector, cbm) if TRIM_DATA_SIZE_FUN - 1 > featureVector.shape[0]: featureVector = np.pad( featureVector, (0, TRIM_DATA_SIZE_FUN - featureVector.shape[0] - 1), 'constant') featureVector = featureVector[:TRIM_DATA_SIZE_FUN - 1] if not test: if isFun: featureVector = np.append(featureVector, 1) else: featureVector = np.append(featureVector, 0) return featureVector
def feature_vector_hope(data, isHope=False, test=False): trimmed_data = trim_or_pad_data(data, TRIM_DATA_SIZE_HOPE) rY = trimmed_data['rightWrist_y'] lY = trimmed_data['leftWrist_y'] normRawColumn = universal_normalization(rY, trimmed_data, x_norm=False) normRawColumn = general_normalization(normRawColumn) diffNormRawData = np.diff(normRawColumn) zeroCrossingArray = np.array([]) maxDiffArray = np.array([]) #Fast Fourier Transform fftArray = np.array([]) fftVal = [] fft_coefficients = fft(diffNormRawData, n=6)[1:] fft_coefficients_real = [value.real for value in fft_coefficients] fftVal += fft_coefficients_real fftArray = np.append(fftArray, fftVal) #Area under curve auc = np.array([]) auc = np.append(auc, abs(integrate.simps(diffNormRawData, dx=5))) #Kurtosis kur = np.array([]) kur = np.append(kur, kurtosis(diffNormRawData)) if diffNormRawData[0] > 0: initSign = 1 else: initSign = 0 windowSize = 5 for x in range(1, len(diffNormRawData)): if diffNormRawData[x] > 0: newSign = 1 else: newSign = 0 if initSign != newSign: zeroCrossingArray = np.append(zeroCrossingArray, x) initSign = newSign maxIndex = np.minimum(len(diffNormRawData), x + windowSize) minIndex = np.maximum(0, x - windowSize) maxVal = np.amax(diffNormRawData[minIndex:maxIndex]) minVal = np.amin(diffNormRawData[minIndex:maxIndex]) maxDiffArray = np.append(maxDiffArray, (maxVal - minVal)) index = np.argsort(-maxDiffArray) featureVector = np.array([]) featureVector = np.append(featureVector, fftArray) featureVector = np.append(featureVector, auc) featureVector = np.append(featureVector, kur) featureVector = np.append(featureVector, maxDiffArray[index[0:5]]) if TRIM_DATA_SIZE_HOPE - 1 > featureVector.shape[0]: featureVector = np.pad( featureVector, (0, TRIM_DATA_SIZE_HOPE - featureVector.shape[0] - 1), 'constant') featureVector = featureVector[:TRIM_DATA_SIZE_HOPE - 1] if not test: if isHope: featureVector = np.append(featureVector, 1) else: featureVector = np.append(featureVector, 0) return featureVector