/
objdet.py
178 lines (159 loc) · 7.69 KB
/
objdet.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
import numpy as np
import cv2
from datetime import datetime
import time
import video
import scipy.spatial
from common import anorm2, draw_str
lk_params = dict( winSize = (15, 15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict( maxCorners = 500,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
class App:
def onChange(self, val): #callback when the user change the detection threshold
self.threshold = val
def __init__(self, src, threshold = 25, doRecord=True, showWindows=True):
self.doRecord = doRecord
self.show = showWindows
self.frame = None
self.cap = video.create_capture(src)
self.cap.set(3,1280)
self.cap.set(4,2316)
self.ret, self.frame = self.cap.read() #Take a frame to init recorder
self.frame_rate = self.cap.get(5)
print self.frame_rate
self.gray_frame = np.zeros((self.cap.get(3), self.cap.get(4), 1), np.uint8)
self.average_frame = np.zeros((self.cap.get(3), self.cap.get(4), 3), np.float32)
self.absdiff_frame = None
self.previous_frame = None
self.surface = self.cap.get(3) * self.cap.get(4)
self.currentsurface = 0
self.currentcontours = None
self.threshold = threshold
self.isRecording = False
self.tracks = []
self.tracks_dist = []
self.track_len = 3
self.frame_idx = 0
self.detect_interval = 5
# self.font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2, 8) #Creates a font
self.trigger_time = 0
if showWindows:
cv2.namedWindow("Image", cv2.WINDOW_AUTOSIZE)
# cv2.createTrackbar("Detection treshold: ", "Image", self.threshold, 100, self.onChange)
def run(self):
started = time.time()
print "started"
print started
while True:
ret, frame = self.cap.read()
currentframe = frame.copy()
# cv2.imshow("Image", currentframe)
instant = time.time()
# print instant
self.processImage (currentframe)
if not self.isRecording:
if self.somethingHasMoved():
self.speedEstimation()
cv2.drawContours(currentframe, self.currentcontours,-1,(0, 0, 255),2)
if self.show:
for dist in self.tracks_dist:
if dist[2] > 0:
font = cv2.FONT_HERSHEY_SIMPLEX
# cv2.putText(currentframe,'OpenCV',(10,500), font, 4,(255,255,255),2,cv2.LINE_AA)
# cv2.putText(currentframe, str(dist[2]/(9*5/30)), (60, 60), font, 4,(255,255,255),2,cv2.CV_AA)
draw_str(currentframe,(dist[0],dist[1]), str(dist[2]/(9*5/30)))
cv2.imshow("Image", currentframe)
self.prev_gray = self.gray_frame
self.frame_idx += 1
c = cv2.waitKey(1) % 0x100
if c==27 or c == 10: #Break if user enters 'Esc'.
break
def processImage(self, curframe):
# cv.Smooth(curframe, curframe) #Remove false positives
curframe = cv2.GaussianBlur(curframe,(5,5),0)
# GaussianBlur(gray_image, canny_image, Size(17, 17), 2, 2);
if self.absdiff_frame == None: #For the first time put values in difference, temp and moving_average
self.absdiff_frame = curframe.copy()
self.previous_frame = curframe.copy()
self.average_frame = np.float32(curframe) #Should convert because after runningavg take 32F pictures
# cv2.imshow("average_frame",self.average_frame)
else:
cv2.accumulateWeighted(curframe, self.average_frame, 0.05) #Compute the average
# cv.RunningAvg(curframe, self.average_frame, 0.05) #Compute the average
self.previous_frame = np.uint8(self.average_frame) #Should convert because after runningavg take 32F pictures
# cv2.imshow("previous_frame", self.previous_frame)
cv2.absdiff(curframe, self.previous_frame, self.absdiff_frame) # moving_average - curframe
# self.absdiff_frame = self.watershed(self.absdiff_frame)
self.gray_frame = cv2.cvtColor(self.absdiff_frame, cv2.COLOR_BGR2GRAY) #Convert to gray otherwise can't do threshold
# ret, self.gray_frame = cv2.threshold(self.gray_frame,50,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
ret, self.gray_frame = cv2.threshold(self.gray_frame, 50, 255, cv2.THRESH_BINARY)
self.gray_frame = cv2.dilate(self.gray_frame, None, 15) #to get object blobs
self.gray_frame = cv2.erode(self.gray_frame, None, 10)
def somethingHasMoved(self):
# Find contours
# image, contours, hierarchy = cv2.findContours(self.gray_frame, 1, 2)
# cv2.imshow("gray_frame",self.gray_frame)
contours, hierarchy = cv2.findContours(self.gray_frame, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# cnt = contours[0]
self.currentcontours = contours #Save contours
# if contours: #For all contours compute the area
# self.currentsurface += cv2.contourArea(contours[0])
# avg = (self.currentsurface*100000)/self.surface #Calculate the average of contour area on the total size
# self.currentsurface = 0 #Put back the current surface to 0
# print avg
# if avg > self.threshold:
# print "true"
return True
def speedEstimation(self):
if self.frame_idx % self.detect_interval == 0:
mask = np.zeros_like(self.gray_frame)
mask[:] = 255
for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
cv2.circle(mask, (x, y), 5, 0, -1)
# cv2.imshow("mask",mask)
p = cv2.goodFeaturesToTrack(self.gray_frame, mask = mask, **feature_params)
if p is not None:
for x, y in np.float32(p).reshape(-1, 2):
self.tracks.append([(x, y)])
if len(self.tracks) > 0:
img0, img1 = self.prev_gray, self.gray_frame
p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
good = d < 1
new_tracks = []
self.tracks_dist = []
for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
if not good_flag:
continue
tr.append((x, y))
dist = 0
if len(tr) > self.track_len:
del tr[0]
XA = np.reshape(tr[:9],(-1,2))
XB = np.reshape(tr[1:],(-1,2))
eu_dists = scipy.spatial.distance.cdist(XA, XB, 'euclidean')
for eu_dist in eu_dists:
dist += eu_dist[0]
self.tracks_dist.append ([int(x), int(y), round(dist,2)])
# print self.tracks_dist
# print dist
new_tracks.append(tr)
# cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
self.tracks = new_tracks
# print len(self.tracks[0])
# cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
# draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))
if __name__ == '__main__':
print __doc__
import sys
try:
video_src = sys.argv[1]
except:
video_src = 0
App(video_src, ).run()