def test_resample_median_bug_1688(self): df = DataFrame([1, 2], index=[datetime(2012, 1, 1, 0, 0, 0), datetime(2012, 1, 1, 0, 5, 0)]) result = df.resample("T", how=lambda x: x.mean()) exp = df.asfreq("T") tm.assert_frame_equal(result, exp) result = df.resample("T", how="median") exp = df.asfreq("T") tm.assert_frame_equal(result, exp)
def test_resample_median_bug_1688(self): df = DataFrame([1, 2], index=[datetime(2012,1,1,0,0,0), datetime(2012,1,1,0,5,0)]) result = df.resample("T", how=lambda x: x.mean()) exp = df.asfreq('T') tm.assert_frame_equal(result, exp) result = df.resample("T", how="median") exp = df.asfreq('T') tm.assert_frame_equal(result, exp)
def test_resample_median_bug_1688(self): for dtype in ["int64", "int32", "float64", "float32"]: df = DataFrame([1, 2], index=[datetime(2012, 1, 1, 0, 0, 0), datetime(2012, 1, 1, 0, 5, 0)], dtype=dtype) result = df.resample("T", how=lambda x: x.mean()) exp = df.asfreq("T") tm.assert_frame_equal(result, exp) result = df.resample("T", how="median") exp = df.asfreq("T") tm.assert_frame_equal(result, exp)
def test_asfreq_ts(self, frame_or_series): index = period_range(freq="A", start="1/1/2001", end="12/31/2010") obj = DataFrame(np.random.randn(len(index), 3), index=index) obj = tm.get_obj(obj, frame_or_series) result = obj.asfreq("D", how="end") exp_index = index.asfreq("D", how="end") assert len(result) == len(obj) tm.assert_index_equal(result.index, exp_index) result = obj.asfreq("D", how="start") exp_index = index.asfreq("D", how="start") assert len(result) == len(obj) tm.assert_index_equal(result.index, exp_index)
def test_resample_median_bug_1688(self): for dtype in ['int64','int32','float64','float32']: df = DataFrame([1, 2], index=[datetime(2012, 1, 1, 0, 0, 0), datetime(2012, 1, 1, 0, 5, 0)], dtype = dtype) result = df.resample("T", how=lambda x: x.mean()) exp = df.asfreq('T') tm.assert_frame_equal(result, exp) result = df.resample("T", how="median") exp = df.asfreq('T') tm.assert_frame_equal(result, exp)
def test_resample_median_bug_1688(): for dtype in ['int64', 'int32', 'float64', 'float32']: df = DataFrame([1, 2], index=[datetime(2012, 1, 1, 0, 0, 0), datetime(2012, 1, 1, 0, 5, 0)], dtype=dtype) result = df.resample("T").apply(lambda x: x.mean()) exp = df.asfreq('T') tm.assert_frame_equal(result, exp) result = df.resample("T").median() exp = df.asfreq('T') tm.assert_frame_equal(result, exp)
def test_dataframe(self): bts = DataFrame({'a': tm.makePeriodSeries()}) ts = bts.asfreq('D') ax = bts.plot() self.assert_(ax.get_lines()[0].get_xydata()[0, 0], ts.index[0].ordinal) idx = ax.get_lines()[0].get_xdata() self.assert_(idx.freqstr == 'D')
def test_asfreq_datetimeindex(self): df = DataFrame({"A": [1, 2, 3]}, index=[datetime(2011, 11, 1), datetime(2011, 11, 2), datetime(2011, 11, 3)]) df = df.asfreq("B") tm.assertIsInstance(df.index, DatetimeIndex) ts = df["A"].asfreq("B") tm.assertIsInstance(ts.index, DatetimeIndex)
def data_validation(series): """ add gaps sum & nan sum to the time series Args: series (pandas.Series): time-series Returns: pandas.DataFrame: tags with columns 'nans', 'gaps', ... """ ts = series.copy() ts = ts.append( Series(index=[ ts.index[0].replace(day=1, month=1, hour=0, minute=0), ts.index[-1].replace(day=31, month=12, hour=23, minute=59) ], data=[NaN, NaN])) ts = ts[~ts.index.duplicated()].copy().sort_index() tags = DataFrame(index=ts.index) tags['nans'] = isna(ts).astype(int) tags = tags.reindex(tags.asfreq('T').index) tags['gaps'] = isna(ts.fillna(0).reindex(tags.index)).astype(int) return tags
def data_validation(series): """ add gaps sum & nan sum to the time series Args: series (pandas.Series): time-series Returns: pandas.DataFrame: tags with columns 'nans', 'gaps', ... """ ts = series.copy() first_index = ts.index[0].replace(day=1, month=1, hour=0, minute=0) if first_index not in series.index: ts = Series(index=[first_index]).append(ts) last_index = ts.index[-1].replace(day=31, month=12, hour=23, minute=59) if last_index not in ts.index: ts = ts.append(Series(index=[last_index])) if ts.index.has_duplicates: # very slow an large data sets ts = ts[~ts.index.duplicated()].copy() if not ts.index.is_monotonic_increasing: raise UserWarning( 'Series has not monotonic increasing of the timestamps.') ts = ts.sort_index() tags = DataFrame(index=ts.index) tags['nans'] = isna(ts).astype(int) tags = tags.reindex(tags.asfreq('T').index) tags['gaps'] = isna(ts.fillna(0).reindex(tags.index)).astype(int) return tags
def test_resample_median_bug_1688(): for dtype in ["int64", "int32", "float64", "float32"]: df = DataFrame( [1, 2], index=[datetime(2012, 1, 1, 0, 0, 0), datetime(2012, 1, 1, 0, 5, 0)], dtype=dtype, ) result = df.resample("T").apply(lambda x: x.mean()) exp = df.asfreq("T") tm.assert_frame_equal(result, exp) result = df.resample("T").median() exp = df.asfreq("T") tm.assert_frame_equal(result, exp)
def test_asfreq_keep_index_name(self): # GH#9854 index_name = "bar" index = date_range("20130101", periods=20, name=index_name) df = DataFrame(list(range(20)), columns=["foo"], index=index) assert index_name == df.index.name assert index_name == df.asfreq("10D").index.name
def fill_dates(data: pd.DataFrame, interp_vars: List[str]) -> pd.DataFrame: data = data.set_index('date').sort_index() data = data.asfreq('D').reset_index() data[interp_vars] = data[interp_vars].interpolate(axis=0) data['location_id'] = (data['location_id'].fillna( method='pad').astype(int)) return data[['location_id', 'date'] + interp_vars]
def test_asfreq_keep_index_name(self, frame_or_series): # GH#9854 index_name = "bar" index = date_range("20130101", periods=20, name=index_name) obj = DataFrame(list(range(20)), columns=["foo"], index=index) obj = tm.get_obj(obj, frame_or_series) assert index_name == obj.index.name assert index_name == obj.asfreq("10D").index.name
def test_asfreq_datetimeindex(self): df = DataFrame({'A': [1, 2, 3]}, index=[datetime(2011, 11, 1), datetime(2011, 11, 2), datetime(2011, 11, 3)]) df = df.asfreq('B') assert isinstance(df.index, DatetimeIndex) ts = df['A'].asfreq('B') assert isinstance(ts.index, DatetimeIndex)
def fill_dates(df: pd.DataFrame, interp_var: str = None) -> pd.DataFrame: """Forward fill data by date.""" df = df.sort_values('Date').set_index('Date') df = df.asfreq('D').reset_index() if interp_var: df[interp_var] = df[interp_var].interpolate() df = df.fillna(method='pad') df['location_id'] = df['location_id'].astype(int) return df
def test_asfreq_datetimeindex(self): df = DataFrame( {"A": [1, 2, 3]}, index=[datetime(2011, 11, 1), datetime(2011, 11, 2), datetime(2011, 11, 3)], ) df = df.asfreq("B") assert isinstance(df.index, DatetimeIndex) ts = df["A"].asfreq("B") assert isinstance(ts.index, DatetimeIndex)
def test_asfreq_fillvalue(self): # test for fill value during upsampling, related to issue 3715 # setup rng = date_range("1/1/2016", periods=10, freq="2S") ts = Series(np.arange(len(rng)), index=rng) df = DataFrame({"one": ts}) # insert pre-existing missing value df.loc["2016-01-01 00:00:08", "one"] = None actual_df = df.asfreq(freq="1S", fill_value=9.0) expected_df = df.asfreq(freq="1S").fillna(9.0) expected_df.loc["2016-01-01 00:00:08", "one"] = None tm.assert_frame_equal(expected_df, actual_df) expected_series = ts.asfreq(freq="1S").fillna(9.0) actual_series = ts.asfreq(freq="1S", fill_value=9.0) tm.assert_series_equal(expected_series, actual_series)
def expanding_moving_average(data: pd.DataFrame, measure: str, window: int) -> pd.Series: """Expands a dataset over date and performs a moving average. Parameters ---------- data The dataset to perform the moving average over. measure The column name in the dataset to average. window The number of days to average over. Returns ------- A series indexed by the expanded date with the measure averaged over the window. """ required_columns = [COLUMNS.date, measure] data = data.loc[:, required_columns].set_index(COLUMNS.date).loc[:, measure] buffer_window = 5 if len(data) <= buffer_window: return data # extend traingular weighted diffs over last/first days of window #data = np.exp(data) w = np.array([1, 2, 3, 2, 1]) w = w / w.sum() first_diff = np.diff(data[:buffer_window + 1]) # second_diff = np.diff(np.diff(data[:buffer_window+2])) pre = data[0] - (first_diff * w).sum() # - (second_diff*w).sum() pre = pd.Series(pre, [data.index.min() - pd.Timedelta(days=1)], name=measure) pre.index.name = COLUMNS.date first_diff = np.diff(data[len(data) - (buffer_window + 1):]) # second_diff = np.diff(np.diff(data[len(data)-(buffer_window+2):])) post = data[len(data) - 1] + (first_diff * w).sum() # + (second_diff*w).sum() post = pd.Series(post, [data.index.max() + pd.Timedelta(days=1)], name=measure) post.index.name = COLUMNS.date data = pd.concat([pre, data, post]) moving_average = (data.asfreq('D', method='pad').rolling(window=window, min_periods=1, center=True).mean()) moving_average = moving_average[1:-1] #moving_average = np.log(moving_average) return moving_average
def make_monthly(df: pd.DataFrame, method='ffill', add_endpoints: list = None) -> pd.DataFrame: if add_endpoints: ind = df.index.to_series() if add_endpoints[0]: ind[0] = ind[0] - MonthEnd(1) if add_endpoints[1]: ind[-1] = ind[-1] + MonthEnd(1) df.set_index(ind, inplace=True) df = df.asfreq(freq='1M', method=method, how="e") df.index += MonthEnd(0) return df
def test_asfreq_normalize(self, frame_or_series): rng = date_range("1/1/2000 09:30", periods=20) norm = date_range("1/1/2000", periods=20) vals = np.random.randn(20, 3) obj = DataFrame(vals, index=rng) expected = DataFrame(vals, index=norm) if frame_or_series is Series: obj = obj[0] expected = expected[0] result = obj.asfreq("D", normalize=True) tm.assert_equal(result, expected)
def test_asfreq_ts(self): index = period_range(freq="A", start="1/1/2001", end="12/31/2010") ts = Series(np.random.randn(len(index)), index=index) df = DataFrame(np.random.randn(len(index), 3), index=index) result = ts.asfreq("D", how="end") df_result = df.asfreq("D", how="end") exp_index = index.asfreq("D", how="end") assert len(result) == len(ts) tm.assert_index_equal(result.index, exp_index) tm.assert_index_equal(df_result.index, exp_index) result = ts.asfreq("D", how="start") assert len(result) == len(ts) tm.assert_index_equal(result.index, index.asfreq("D", how="start"))
def test_asfreq_ts(self): index = PeriodIndex(freq='A', start='1/1/2001', end='12/31/2010') ts = Series(np.random.randn(len(index)), index=index) df = DataFrame(np.random.randn(len(index), 3), index=index) result = ts.asfreq('D', how='end') df_result = df.asfreq('D', how='end') exp_index = index.asfreq('D', how='end') assert len(result) == len(ts) tm.assert_index_equal(result.index, exp_index) tm.assert_index_equal(df_result.index, exp_index) result = ts.asfreq('D', how='start') assert len(result) == len(ts) tm.assert_index_equal(result.index, index.asfreq('D', how='start'))
def test_asfreq_resample_set_correct_freq(self, frame_or_series): # GH#5613 # we test if .asfreq() and .resample() set the correct value for .freq dti = to_datetime(["2012-01-01", "2012-01-02", "2012-01-03"]) obj = DataFrame({"col": [1, 2, 3]}, index=dti) obj = tm.get_obj(obj, frame_or_series) # testing the settings before calling .asfreq() and .resample() assert obj.index.freq is None assert obj.index.inferred_freq == "D" # does .asfreq() set .freq correctly? assert obj.asfreq("D").index.freq == "D" # does .resample() set .freq correctly? assert obj.resample("D").asfreq().index.freq == "D"
def test_asfreq_resample_set_correct_freq(self): # GH#5613 # we test if .asfreq() and .resample() set the correct value for .freq df = DataFrame( {"date": ["2012-01-01", "2012-01-02", "2012-01-03"], "col": [1, 2, 3]} ) df = df.set_index(to_datetime(df.date)) # testing the settings before calling .asfreq() and .resample() assert df.index.freq is None assert df.index.inferred_freq == "D" # does .asfreq() set .freq correctly? assert df.asfreq("D").index.freq == "D" # does .resample() set .freq correctly? assert df.resample("D").asfreq().index.freq == "D"
def prepare_plot_data(df: pd.DataFrame, daily_spending: float, extend: datetime = None) -> pd.DataFrame: df['Total'] = df['Amount'].cumsum() df = df.select_dtypes('number').drop('Amount', axis=1) df = df.reset_index().drop_duplicates(df.index.name, keep='last').set_index(df.index.name) if extend is not None: df.loc[extend] = 0 df = df.asfreq('1D', 'pad') df['Planned'] = np.nan df['Planned'].iloc[0] = 0 df['Planned'].iloc[-1] = (df.index[-1] - df.index[0]).days * daily_spending df['Planned'] = df['Planned'].interpolate() df['Difference'] = df['Total'] - df['Planned'] return df
def operation1(vehicle_df: pd.DataFrame) -> pd.DataFrame: # 1. 按秒进行重新采样 print("正在进行缺失数据重新采样...") res = vehicle_df.asfreq('1S') # 2. 限制缺失数据在一秒以内才填充 res = res[~res.加速度.isnull() | ~res.加速度.shift().isnull()] print("正在进行拉格朗日插值...") # 3. 拉格朗日插值 for i in res.columns: for j in range(len(res)): if (res[i].isnull()).iloc[j]: res[i][j] = ployinterp_column(res[i], j) res['时间'] = res.index print('填补缺失数据后的行数为:{}'.format(res.shape[0])) return res
def df_to_ts(df: pd.DataFrame, datetime_label: str, freq, copy=False) -> pd.DataFrame: # Copy the dataframe to avoid border effects (if needed). df = copy_or_not_copy(df, copy) # Set the datetime column as dataframe index and check they are no duplicated. df.set_index(datetime_label, inplace=True, verify_integrity=True) # Convert the index into a datetime. convert_df_index_to_datetime_if_needed(df, copy=False) # Sort the index sort_df_index_if_needed(df, copy=False) # Set the dataframe frequency. df = df.asfreq(freq) return df
def transform(self, X: pandas.DataFrame) -> pandas.DataFrame: """ Set a frequency on the dataset Parameters ---------- X : pandas.DataFrame DataFrame without index frequency Returns ------- X : pandas.DataFrame DataFrame with index frequency """ if not isinstance(X.index, pandas.DatetimeIndex): raise TypeError('Index must be a DatetimeIndex') # Convert the timeseries to the given frequency X = X.asfreq(self._frequency) return X
} CLKS =['a57_clk', 'a53_clk', 'oxili_gfx3d_clk'] trace = Ftrace(r'C:\Users\c00759961\Documents\temp\nina-MDA35B-camera-UHD-recording-after.html', ['tsens_read', 'tsens_threshold_clear', 'tsens_threshold_hit', 'clock_set_rate', 'clock_enable', 'clock_disable']) start = Timestamp('1/1/1970') #end = start + Second(trace.duration) NAMES = [TSENS_ALIAS[tsens] for tsens in trace.thermal.names if tsens in TSENS_ALIAS] + CLKS df_therm = DataFrame(columns=NAMES) #index=period_range(start=start, end=end, freq='1U') for tsens in trace.thermal.names: for therm in trace.thermal.temp_intervals(tsens=tsens): df_therm.loc[start + Micro(therm.interval.end*1e6), TSENS_ALIAS[tsens]] = therm.temp # lets look at clocks. for clk in CLKS: for freq_event in trace.clock.frequency_intervals(clock=clk): df_therm.loc[start + Micro(freq_event.interval.end*1e6), clk] = freq_event.frequency for clk_event in trace.clock.clock_intervals(clock=clk, state=ftrace.clock.ClockState.DISABLED): df_therm.loc[start + Micro(clk_event.interval.end*1e6), clk] = 0 df_therm.sort(inplace=True) # Resample to every 100milliseconds df_therm = df_therm.asfreq('100L', method='ffill').fillna(method='ffill').fillna(-1) df_therm.to_csv(r'C:\Users\c00759961\Documents\temp\nina-MDA35B-camera-UHD-recording-after-thermal-timeline.csv')
import numpy as np import matplotlib.pyplot as plt from collections import defaultdict plt.interactive(True) names = ['AAPL', 'GOOG', 'MSFT', 'DELL', 'GS', 'MS', 'BAC', 'C'] def get_px(stock, start, end): print('Get ' + stock) return web.get_data_yahoo(stock, start, end)['Adj Close'] px = DataFrame({n: get_px(n, '1/1/2009', '6/1/2012') for n in names}) px = px.asfreq('B').fillna(method='pad') rets = px.pct_change() ((1 + rets).cumprod() - 1).plot() print('block') def calc_mom(price, lookback, lag): mon_ret = price.shift(lag).pct_change(lookback) ranks = mon_ret.rank(axis=1, ascending=False) demeaned = ranks - ranks.mean(axis=1) return demeaned / demeaned.std(axis=1) compound = lambda x: (1 + x).prod() - 1 daily_sr = lambda x: x.mean() / x.std()
def _resample_data(self, time_series_data: pd.DataFrame, holes_filling_method: str = 'ffill'): return time_series_data.asfreq(self.forecast_frequency, holes_filling_method)
filter_=pd.read_pickle('/Users/harbes/data/xccdata/filter') # 4672606个有效数据点(原来有6140094个数据点) key=lambda x:x.year*100+x.month size=data['size_tot'].unstack()[filter_==1] size_month=size.groupby(key).mean() size_month.index = pd.to_datetime(size_month.index.values.astype(str),format=('%Y%m')) num_by_=10 label_=[i+1 for i in range(num_by_)] # percentile=np.linspace(0,1,num_by_+1) # 按同期size分组,或者按前一期size进行分组 mark_=DataFrame([pd.qcut(size_month.loc[m],q=percentile,labels=label_) for m in size_month.index],index=size_month.index,columns=size_month.columns) mark_=mark_.asfreq(freq='D',method='ffill').loc[size.index] # 不同size组合收益,Equally-weighted opnprc=data['adj_open'].unstack()#[filter_==1] clsprc=data['adj_close'].unstack()#[filter_==1] rtn=(clsprc-opnprc)/opnprc rtn_port=DataFrame([[rtn.loc[m][mark_.loc[m]==l_].mean() for l_ in label_] for m in mark_.index],index=mark_.index,columns=label_) # market portfolio #rtn_port['M']=(size*rtn).mean(axis=1)/(size[~np.isnan(size*rtn)]).mean(axis=1) # value-weighted rtn_port['M']=rtn.mean(axis=1) # equally-weighted # 设置第二index rtn_port['index']=(rtn_port.index.year-2005)*12+rtn_port.index.month-1 max_month=rtn_port['index'][-1] rtn_port['trddt']=rtn_port.index
def plot_diff_GenPort_CW(saved_propags: pd.DataFrame, market_CW: pd.DataFrame, eval_dates: list, savefile: bool = False, namefile: str = "ResultDifference.png") -> None: """ Computes and plots the difference between the portfolios of the genetic algorithm and the Cap-Weighted Portfolio. Parameters ---------- saved_propags : DataFrame Propagations that have been saved. market_CW : DataFrame Cap-Weighted portfolio to compare with. eval_dates : List of Period dates Evaluation dates for display. savefile : bool Option to save the plot. namefile : str Name of the file to save in. Returns ------- None None """ # Checks that all frequencies are the same if (saved_propags.index.freq != market_CW.index.freq): market_CW = market_CW.asfreq(saved_propags.index.freq) if (saved_propags.index.freq != eval_dates.freq): eval_dates = eval_dates.asfreq(saved_propags.index.freq) # Computing values diff_array = (saved_propags.to_numpy() - market_CW.to_numpy()) \ / market_CW.to_numpy() * 100 Diff_GenPort_CW = pd.DataFrame(data=diff_array, columns=saved_propags.columns, index=saved_propags.index) # Plotting fig, axis = plt.subplots(nrows=1, ncols=1) Diff_GenPort_CW.plot(legend=False, figsize=(20, 7), ax=axis) # Adding evaluation dates for ed in eval_dates: axis.axvline(x=ed, color='grey', linestyle='--', linewidth=1) # Setting axes axis.axhline(y=0, color='grey', linestyle='--') plt.title("Difference Genetic Portfolios - CW Portfolio") plt.xlabel("Time") plt.ylabel("Difference in %") # Saving plot as a png file if savefile: plt.savefig('./' + namefile) return None
plt.rc('figure', figsize=(12, 6)) from pandas_datareader import data, wb # Downloading data names = ['AAPL', 'GOOG', 'MSFT', 'DELL', 'GS', 'MS', 'BAC', 'C'] def get_px(stock, start, end): return data.DataReader(stock, 'yahoo', start=start, end=end)['Adj Close'] px = DataFrame({n: get_px(n, None, None) for n in names}) # Business day px = px.asfreq('B').fillna(method='pad') rets = px.pct_change() ((1 + rets).cumprod() - 1).plot() # Calculate momentum def calc_mom(price, lookback, lag): mom_ret = price.shift(lag).pct_change(lookback) ranks = mom_ret.rank(axis=1, ascending=False) # (momentum return - mean momentum return) / standard deviation demeaned = ranks.subtract(ranks.mean(axis=1), axis=0) return demeaned.divide(demeaned.std(axis=1), axis=0) compound = lambda x: (1 + x).prod() - 1 # daily sharpe ratio
if __name__ == '__main__': fp, trace = parse_file(FILEPATH) # duration total_duration = trace.duration if not INTERVAL else INTERVAL.duration # Thermal NAMES = [TSENS_ALIAS[tsens] for tsens in trace.thermal.names if tsens in TSENS_ALIAS] + CLKS df_therm = DataFrame(columns=NAMES) for tsens in trace.thermal.names: for therm in trace.thermal.temp_intervals(tsens=tsens, interval=INTERVAL): df_therm.loc[start + Micro(therm.interval.start*1e6), TSENS_ALIAS[tsens]] = therm.temp # lets look at clocks. for clk in CLKS: for freq_event in trace.clock.frequency_intervals(clock=clk, interval=INTERVAL): i_start=start + Micro(freq_event.interval.start*1e6) i_end=start + Micro(freq_event.interval.end*1e6) try: df_therm.loc[i_start:i_end, clk] = freq_event.frequency except KeyError: print "Error logging " + str(freq_event) df_therm[start + Micro(freq_event.interval.start*1e6):start + Micro(freq_event.interval.end*1e6), clk] = freq_event.frequency for clk_event in trace.clock.clock_intervals(clock=clk, state=ftrace.clock.ClockState.DISABLED, interval=INTERVAL): df_therm.loc[start + Micro(clk_event.interval.start*1e6): start + Micro(clk_event.interval.end*1e6), clk] = 0 df_therm.sort(inplace=True) df_therm = df_therm.asfreq(THERMAL_TIMELINE_RESOLUTION, method='ffill').fillna(method='ffill').fillna(-1) df_therm.to_csv(r'{C:\Users\c00759961\Documents\Charters\congitive-thermal-engine\res\thermal_timeline.csv')
class Person(object): MOVEMENT_SMOOTHING_WINDOW_SIZE_SECONDS = 0.1 def __init__(self): # lh_y, rh_y, head_y, body_z, hand_dist self.param_values = [0, 0, 0, 0, 0] self.saved_joint_distances = DataFrame() self.last_joint_positions = None self.skeleton = None self.mean_joint_distance = 0 self.role = None def update_skeleton(self, skeleton, timestamp): self.skeleton = nite2.Skeleton(skeleton) # Round to milliseconds timestamp = round(timestamp, 3) timestamp_datetime = pd.to_datetime(timestamp, unit="s") skeleton = nite2.Skeleton(skeleton) joint_positions = self.get_joint_positions() #print "Joint positions:", joint_positions # TODO: ignore low confidence joints if self.last_joint_positions is not None: joint_distances = [ calcDist(p1, p2) for p1, p2 in zip(joint_positions, self.last_joint_positions) ] #print "Joint distances:", joint_distances joint_distances_series = Series(joint_distances, name=timestamp_datetime) self.saved_joint_distances = self.saved_joint_distances.append( joint_distances_series) self.last_joint_positions = joint_positions window_start_epoch = time.time( ) - self.MOVEMENT_SMOOTHING_WINDOW_SIZE_SECONDS window_start = pd.to_datetime(window_start_epoch, unit="s") #self.saved_joint_distances.sort(inplace=True) self.saved_joint_distances = self.saved_joint_distances.truncate( before=window_start) # Weighted average #print "Saved:" #print self.saved_joint_distances resampled = self.saved_joint_distances.asfreq("1ms").fillna(0) self.mean_joint_distance = resampled.mean().mean() #self.normalized_mean_joint_distance = self.mean_joint_distance / #print "Mean joint movement:", mean_joint_distance #if len(self.saved_joint_distances.shape) == 25: # print "yoo" head, neck, left_shoulder, right_shoulder, left_elbow, right_elbow, left_hand, right_hand, \ torso, left_hip, right_hip, left_knee, right_knee, left_foot, right_foot = self.get_joints() ################# get hands y height ################# min_hands = -300 max_hands = 820 if left_hand.positionConfidence >= 0.5: left_hand_pos = float(left_hand.position.y - min_hands) / (max_hands - min_hands) left_hand_pos = min(1, max(0, left_hand_pos)) self.param_values[0] = left_hand_pos # print "~~~~~~~~~~~~~~~~~~~~ Left Hand Y cord: ", left_hand.position.y, " ~~~~~~~~~~~~~~~~~~~~~~~" if right_hand.positionConfidence >= 0.5: right_hand_pos = float(right_hand.position.y - min_hands) / (max_hands - min_hands) right_hand_pos = min(1, max(0, right_hand_pos)) self.param_values[1] = right_hand_pos # print "~~~~~~~~~~~~~~~~~~~~ Right Hand Y cord: ", right_hand.position.y, " ~~~~~~~~~~~~~~~~~~~~~~~" ################# get head position ################# max_head = 450 min_head = -140 if head.positionConfidence >= 0.5: relitive_head = min( 1, max( 0, float(head.position.y - min_head) / float(max_head - min_head))) # print "~~~~~~~~~~~~~~~~~~~~ Raw head position: ", head.position.y # print "~~~~~~~~~~~~~~~~~~~~ Relative head position: ", relitive_head, " ~~~~~~~~~~~~~~~~~~~~~~~" self.param_values[2] = relitive_head ################# get body position (front-back) ################# if torso.positionConfidence >= 0.5: body_pos = torso.position.z min_dist = 1200 max_dist = 2750 relative_body_distance = float(body_pos - min_dist) / (max_dist - min_dist) relative_body_distance = min(1, max(0, relative_body_distance)) self.param_values[3] = relative_body_distance # print "~~~~~~~~~~~~~~~~~~~~ Torso position: ", body_pos, " ~~~~~~~~~~~~~~~~~~~~~~~" # print "~~~~~~~~~~~~~~~~~~~~ Relative torso position: ", relative_body_distance ################# get hands distance ################# if right_hand.positionConfidence >= 0.5 and left_hand.positionConfidence >= 0.5: hands_distance = calcDist(right_hand.position, left_hand.position) hands_distance_pos = min(float(hands_distance) / float(1000), 1) # print "~~~~~~~~~~~~~~~~~~~~ Hands Position: ", hands_distance, " ~~~~~~~~~~~~~~~~~~~~~~~" # print "~~~~~~~~~~~~~~~~~~~~ Relative hand distance: ", hands_distance_pos self.param_values[4] = hands_distance_pos if neck.positionConfidence >= 0.5 and torso.positionConfidence >= 0.5: torso_vector = get_vector(neck.position, torso.position) if right_hand.positionConfidence >= 0.5 and right_shoulder.positionConfidence >= 0.5: rh_vector = get_vector(right_hand.position, right_shoulder.position) rh_angle_rad = angle_between(torso_vector, rh_vector) self.rh_angle = rh_angle_rad / math.pi * 180 if left_hand.positionConfidence >= 0.5 and left_shoulder.positionConfidence >= 0.5: lh_vector = get_vector(left_hand.position, left_shoulder.position) lh_angle_rad = angle_between(torso_vector, lh_vector) self.lh_angle = lh_angle_rad / math.pi * 180 def get_joint_positions(self): return [joint.position for joint in self.get_joints()] def get_joints(self): return [self.skeleton.get_joint(i) for i in xrange(15)]