-
Notifications
You must be signed in to change notification settings - Fork 1
/
conductor.py
executable file
·222 lines (189 loc) · 6.69 KB
/
conductor.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
#!/usr/bin/env python
import freenect
import numpy as np
from freenect import sync_get_depth as get_depth, sync_get_video as get_video
from constants import *
import time
import argparse
from utilities import view_frame, set_params, avg_depth, normalize_depth,\
INVALID_DEPTH, view_depth
import pickle
from multiprocessing import Process, Value, Lock
from audiotest3 import stretch_audio, load_song
# constants
RIGHT = 0
LEFT = 1
# Parameters for Kinect
tilt = 0;
led = LED_OFF;
MIN_DEPTH = 300
MAX_DEPTH = 1200
BACK_DEPTH_FNAME = 'back_depth.pkl'
VERBOSE = 0
REDUCTION = 90
# load the approximate depth of the background
BACK_DEPTH = pickle.load(open(BACK_DEPTH_FNAME,'r'))
# Parameters for conducting
THRESH = 4 # measurements in other direction before we change directions
DIST_THRESH = 2
# use for test multiprocessing package
def test_print(args):
i = 0
while True:
print r.value
time.sleep(1)
i+=1
def body(dev, ctx):
freenect.set_led(dev, led)
freenect.set_tilt_degs(dev, tilt)
#(depth,_), (rgb,_) = get_depth(), get_video()
#view_frame(normalize_depth(depth),rgb)
def filter_arms(depth):
# filter out background
depth[depth>=BACK_DEPTH] = INVALID_DEPTH
# filter out body
body_depth = avg_depth(depth)
arms_depth = depth.copy()
arms_depth[depth>=body_depth-REDUCTION] = INVALID_DEPTH
# TODO: consider filtering out forearm
return arms_depth
def calibrate():
(depth,_) = get_depth()
background_depth = avg_depth(depth)
print 'calibated depth of background: %d' % background_depth
f = open(BACK_DEPTH_FNAME, 'w')
pickle.dump(background_depth,f)
f.close()
# Parse command line arguments
def get_args():
# cmd line arguments
parser = argparse.ArgumentParser(description='OpenKinect Symphony Conductor'
+ ' Demo')
parser.add_argument('-i','--init',action='store_true',
help='Adjusts Kinect to a good angle')
## calibrate background
parser.add_argument('-c','--calibrate',action='store_true',
help='Calibrate background depth. Be sure to run this with no one view of the Kinect.')
## specify file in Music directory
parser.add_argument('-f','--filename')
## child mode
parser.add_argument('-k','--kid',action='store_true',
help='Toggle child mode')
## load a new song as pkl dump
parser.add_argument('-l','--load',action='store_true',
help='Load songs (to make them available)')
parser.add_argument('-s','--list',action='store_true',
help='List available songs')
args = parser.parse_args()
print args
return args
if __name__ == '__main__':
args = get_args()
## SETUP for command line options
# -------------------------------------------------------------------------
if args.load:
load_song(args.filename)
import sys
sys.exit()
if args.list:
import os
# List available songs
songs = os.listdir('mdata')
for i in range(len(songs)):
print "%s: %s" % (i,songs[i])
# User input to select song
select = raw_input("Select a song by number: ")
fname = songs[int(select)].split('.pkl')[0]
else:
fname = args.filename
if args.kid:
REDUCTION = 100
global REDUCTION
#TODO fix this comment: make rate accessible to audio thread
if args.init:
freenect.runloop(body=body)
# calculates average depth and stores it
# use it for calibrating depth of background
if args.calibrate:
calibrate()
## SETUP for multiprocessing
# -------------------------------------------------------------------------
# share rate with other audio process
r = Value('f', 1)
# spawn audio process
p = Process(target=stretch_audio, args=(r,fname,))
p.start()
## INITIALIZATION for conducting
# -------------------------------------------------------------------------
# for image output
set_params(MIN_DEPTH, MAX_DEPTH)
c = 0
(depth,_) = get_depth()
#TODO make class for state
y = depth.shape[1]/2
y_minus1 = depth.shape[1]/2
r_votes = 0
l_votes = 0
r_saved_y = [depth.shape[1]/2] * THRESH
l_saved_y = [depth.shape[1]/2] * THRESH
now = time.time()
r_saved_time = [now] * THRESH
l_saved_time = [now] * THRESH
r_ind = 0
l_ind = 0
r_end = 0
l_end = depth.shape[1]
r_time = now
l_time = now + 1000
direction = RIGHT # 0 - right, 1 - left
y_left = depth.shape[1]/2
y_right = depth.shape[1]/2
## CONDUCTING
# -------------------------------------------------------------------------
while True:
# Get a fresh frame
(depth,_) = get_depth()
c+=1
arms_depth = filter_arms(depth)
# background filtered out, body filtered out, rgb views
if c % 20 == 0:
view_depth(normalize_depth(arms_depth))
# estimate location of hand (only supports 1 hand for now)
(hand_x, hand_y) = np.where(arms_depth < INVALID_DEPTH)
# hand_x = hand_x.sum() / hand_x.shape[0] # x is vertical
hand_y = hand_y.sum() / hand_y.shape[0] # y is horizontal
#print "estimated hand position: (%d,%d)" % (hand_x,hand_y)
# update history
y_minus1 = y
y = hand_y
# guess which direction we're going in
if y-y_minus1 <= -DIST_THRESH: # Right
r_votes+=1
r_saved_y[r_ind] = float(y)
r_saved_time[r_ind] = time.time()*1000
r_ind=(r_ind+1) % THRESH
if VERBOSE:
print "RIGHT @ (%d)" % (hand_y)
elif y-y_minus1 >= DIST_THRESH: # Left
l_votes+=1
l_saved_y[l_ind] = float(y)
l_saved_time[l_ind] = time.time()*1000
l_ind=(l_ind+1) % THRESH
if VERBOSE:
print "LEFT @ (%d)" % (hand_y)
else:
continue
# guess if there is a transition in direction
if r_votes >= THRESH and l_votes >= THRESH:
if r_votes > l_votes:
r_end = ((min(r_saved_y) + min(l_saved_y))/2)
r_time = (sum(r_saved_time) + sum(l_saved_time))/(2*THRESH)
print "RIGHT --> LEFT @ %d \tdy=%d \t(dt=%d)" % (r_end, abs(l_end-r_end), abs(r_time-l_time))
r_votes = 0
r.value = abs(r_time-l_time)
else:
l_end = ((max(r_saved_y) + max(l_saved_y))/2)
l_time = (sum(r_saved_time) + sum(l_saved_time))/(2*THRESH)
print "LEFT --> RIGHT @ %d \tdy=%d \t(dt=%d)" % (l_end, abs(l_end-r_end), abs(r_time-l_time))
l_votes = 0
r.value = abs(r_time-l_time)