def __next__(self): batch_x = np.zeros((BATCH_SIZE, N_CHUNK, WIN * N_SENSOR)) batch_y = np.zeros((BATCH_SIZE, N_CHUNK)) min_duration = timedelta(seconds=MAX_LENGTH_SEC) if self.toggleNegativeSequence: logger.info("Generate non-chewing sequence") # make sure that data contains at least one MAX_LENGTH sequence while True: fold = self.continuous_ranges[random.randint( 0, len(self.continuous_ranges) - 1)] if fold[1] - fold[0] > min_duration: break else: logger.info("Generate chewing sequence") # make sure that data contains at least one MAX_LENGTH sequence while True: fold = self.gt_ranges[random.randint(0, len(self.gt_ranges) - 1)] if fold[1] - fold[0] > min_duration: break # get random data into memory random_interval = _random_datetime_interval( fold[0], fold[1], timedelta(seconds=4 * MAX_LENGTH_SEC)) df = get_necklace(self.subj, datetime_to_epoch(random_interval[0]), datetime_to_epoch(random_interval[1])) # get point-wise label df['label_chewing'] = np.zeros((df.shape[0], )) for seg in self.gt_ranges: df['label_chewing'][(df.index >= seg[0]) & (df.index <= seg[1])] = 1 label_chewing = df['label_chewing'].as_matrix() for b in range(BATCH_SIZE): start = randint(0, df.shape[0] - MAX_LENGTH) # slicing data x = df[self.key_list].iloc[start:start + MAX_LENGTH].as_matrix() b_x = np.reshape(x, (N_CHUNK, WIN * N_SENSOR)) batch_x[b, :, :] = b_x # downsample label query_chewing = label_chewing[start:start + MAX_LENGTH] batch_y[b, :] = query_chewing[(WIN // 2)::WIN] self.toggleNegativeSequence = not self.toggleNegativeSequence return batch_x.astype(float), batch_y.astype(int)
def __next__(self): if self.counter > len(self.test_folds) - 1: raise StopIteration fold = self.test_folds[self.counter] print(fold[1] - fold[0]) self.counter = self.counter + 1 # slice continuous folds and chewing folds df = get_necklace("P120", datetime_to_epoch(fold[0]), datetime_to_epoch(fold[1])) df['label_chewing'] = np.zeros((df.shape[0], )) for c in self.gt_ranges: df['label_chewing'][(df.index >= c[0]) & (df.index <= c[1])] = 1 label_chewing = df['label_chewing'].as_matrix() N = df.shape[0] N_even = ((N - 1) // (BATCH_SIZE * WIN)) * BATCH_SIZE * WIN logger.debug( "Number of samples before truncation: {} and after: {}".format( N, N_even)) # slicing df = df.iloc[:N_even] label_chewing = label_chewing[:N_even] batch_x = df[self.key_list].as_matrix() batch_x = np.reshape(batch_x, (BATCH_SIZE, -1, WIN * N_SENSOR)) # majority voting label_chewing = np.reshape(label_chewing, (-1, WIN)) label_sum_chewing = np.sum(label_chewing, axis=1) batch_y_chewing = label_sum_chewing > WIN // 2 batch_y_chewing = np.reshape(batch_y_chewing, (BATCH_SIZE, -1)) return batch_x.astype(float), batch_y_chewing.astype(int)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) import matplotlib matplotlib.rcParams.update({'font.size': 20}) data = get_necklace_timestr('P120', "2017-08-26 08:18:15.000", "2017-08-26 08:19:00.000", reliability=0.01) # data = pd.read_csv('data/test_fft.csv') proximity = data['proximity'].as_matrix() proximity = proximity - 2000 time = [datetime_to_epoch(t) for t in data.index] time = np.array(time) peaks_index = peak_detection(proximity, min_prominence=6) peaks_index = np.array(peaks_index) peaks_time = time[peaks_index] other_peaks_index = argrelextrema(proximity, np.greater, order=2, mode='clip')[0] other_peaks_time = time[peaks_index] # segments = periodic_subsequence(peaks_index, peaks_time, min_length=4,max_length=100, eps=0.1, alpha=0.45,low=400,high=1200) time_obj = [datetime.datetime.fromtimestamp(t / 1000) for t in time] plt.figure(figsize=(10, 5))
label_bite_path = "{}/{}/visualize/SYNC/{}/labelbites.json".format( settings.CLEAN_FOLDER, subj, meal) label_bite_dict, bite_epoch, bite_chewing_count = read_label_bite( label_bite_path) # create bite ranges (skip the range starting at 'e') bite_ranges = [] b_previous = None tmp = list(zip(bite_epoch, bite_epoch[1:])) for interval, c in zip(tmp, bite_chewing_count): if not 'e' in c: bite_ranges.append(interval) df = get_necklace(subj, bite_ranges[0][0], bite_ranges[-1][1]) peaks_index = peak_detection(df['proximity'], min_prominence=2) peaks_time = df.index[peaks_index] peaks_epoch = np.array([datetime_to_epoch(p) for p in peaks_time]) # calculating number of prominent peaks per bite for b1, b2 in bite_ranges: print(b1) print(b2) count = len(np.where((peaks_epoch >= b1) & (peaks_epoch < b2))[0]) print(count)
chewing_rate_list = [] chewing_rate_std_list = [] for m in valid_meals: # get chewing groundtruth gt = read_SYNC("{}/{}/visualize/SYNC/{}/labelchewing.json".format( settings.CLEAN_FOLDER, subj, m)) print(gt) rate_list = [] rate_std_list = [] for i in range(gt.shape[0]): df = get_necklace(subj, datetime_to_epoch(gt['start'].iloc[i]), datetime_to_epoch(gt['end'].iloc[i])) print(df.shape) if df.empty or df.shape[0] < 3: print("EMPTY DATAFRAME, DOUBLE CHECK!!!") continue rate, rate_std = get_chewing_rate(df) rate_list.append(rate) rate_std_list.append(rate_std) chewing_rate_list.append(np.mean(np.array(rate_list))) chewing_rate_std_list.append(np.mean(np.array(rate_std_list)))