def whichPerson(img): img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) personROI = convertForKeras(img) if len(people) == 0: newPerson = Person(0) newPerson.addPrevious(personROI) people.append(newPerson) return newPerson.getIdentifier() else: closest = 100 closestPerson = 100 closestFrame = None save = False for person in people: print( str(person.getIdentifier()) + " activity is " + str(person.isActive())) if not person.isActive(): save = True currentPerson = person.getIdentifier() previous = person.getPrevious() for previousFrame in previous: prediction = queryNeuralNetwork(personROI, previousFrame) print("person " + str(person.getIdentifier()) + " distance = " + str(prediction)) if prediction < closest: closest = prediction closestPerson = currentPerson closestFrame = previousFrame # if save: # concat = np.concatenate((personROI, closestFrame), axis=1) # concat *= 255 # cv2.imwrite("classificationsREID/"+str(closest)+" "+str(closestPerson)+".jpg",concat) # count the number of times these are different people and <0.5, or the same person and >0.5 if closest < 0.5: person = people[closestPerson] person.addPrevious(personROI) return person.getIdentifier() else: nextPerson = Person(len(people)) nextPerson.addPrevious(personROI) people.append(nextPerson) return nextPerson.getIdentifier()
def runOnSingleCamera(video_file): cap.open(video_file) # create window by name (as resizable) cv2.namedWindow(windowName, cv2.WINDOW_NORMAL) cv2.resizeWindow(windowName, 640, 480) # set up HoG detector mog = cv2.createBackgroundSubtractorMOG2(history=2000, varThreshold=16, detectShadows=True) hog = cv2.HOGDescriptor() hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector()) keep_processing = True previousImg = np.zeros((640, 480, 3), np.uint8) while (keep_processing): ret, img = cap.read() if not is_similar(img, previousImg): displayImage = img img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) fgmask = mog.apply(img) fgthres = cv2.threshold(fgmask.copy(), 200, 255, cv2.THRESH_BINARY)[1] fgdilated = cv2.dilate(fgthres, kernel=cv2.getStructuringElement( cv2.MORPH_ELLIPSE, (3, 3)), iterations=3) _, contours, hierarchy = cv2.findContours(fgthres, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for i in contours: x, y, w, h = cv2.boundingRect(i) originalx = x originaly = y originalw = w originalh = h x = int(max(0, x - padding / 100.0 * w)) y = int(max(0, y - padding / 100.0 * h)) w = int(min(img.shape[1] - 1, (w + 2 * padding / 100.0 * w))) h = int(min(img.shape[0] - 1, (h + 2 * padding / 100.0 * h))) if ((w >= width) and (h >= height) and (x + w < img.shape[1]) and (y + h < img.shape[0])): #cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2) roi = img[y:h + y, x:w + x] #perform HOG based pedestrain detection found, w = hog.detectMultiScale(roi, winStride=(8, 8), padding=(8, 16), scale=1.05) found_filtered = [] for ri, r in enumerate(found): for qi, q in enumerate(found): if ri != qi and inside(r, q): break else: found_filtered.append(r) #draw_detections(img, found) # draw_detections(displayImage, found_filtered, colour, 3) for x, y, w, h in found_filtered: #w, h = int(0.15*w), int(0.05*h) personROI = roi[y:h + y, x:w + x] personROI = convertForKeras(personROI) if len(people) == 0: newPerson = Person(0) newPerson.addPrevious(personROI) people.append(newPerson) draw_detections(displayImage, originalx, originaly, originalw, originalh, newPerson.getColour(), 3) else: closest = 100 closestPerson = 100 for person in people: currentPerson = person.getIdentifier() previous = person.getPrevious() for previousFrame in previous: prediction = queryNeuralNetwork( personROI, previousFrame) print(currentPerson, prediction) if prediction < closest: closest = prediction closestPerson = currentPerson if closest < 0.5: person = people[closestPerson] person.addPrevious(personROI) draw_detections(displayImage, originalx, originaly, originalw, originalh, person.getColour(), 3) print("REID") else: newPerson = Person(len(people)) newPerson.addPrevious(personROI) people.append(newPerson) draw_detections(displayImage, originalx, originaly, originalw, originalh, newPerson.getColour(), 3) print("NEW") # display image cv2.imshow(windowName, displayImage) previousImg = img # if user presses "x" then exit key = cv2.waitKey( 1) & 0xFF # wait 200ms (i.e. 1000ms / 5 fps = 200 ms) if (key == ord('x')): keep_processing = False # close all windows cv2.destroyAllWindows()