コード例 #1
0
ファイル: HOG_NMS.py プロジェクト: timshine/EagleEye
def read_video_stream():
    global outputFrame, lock
    # initialize the HOG descriptor/person detector
    hog = cv2.HOGDescriptor()
    hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
    #hog.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
    #hog.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
    cv2.startWindowThread()

    # open webcam video stream
    capture = cv2.VideoCapture(0)

    while (True):
        # Capture frame-by-frame
        ret, frame = capture.read()
        # load the image and resize it to (1) reduce detection time
        # and (2) improve detection accuracy
        if ret == True:
            frame = imutils.resize(frame, width=min(1080, frame.shape[1]))
            orig = frame.copy()

            # detect people in the image
            (rects, weights) = hog.detectMultiScale(frame,
                                                    winStride=(12, 12),
                                                    padding=(14, 14),
                                                    scale=1.05)

            # draw the original bounding boxes
            for (x, y, w, h) in rects:
                cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)

            # apply non-maxima suppression to the bounding boxes using a
            # fairly large overlap threshold to try to maintain overlapping
            # boxes that are still people
            rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
            # Good: pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
            pick = non_max_suppression_fast(rects, 0.65)

            # draw the final bounding boxes
            for (xA, yA, xB, yB) in pick:
                im = frame[yA:yB, xA:xB]
                #print((xA, yA, xB, yB))
                if detect_red(im, .1):
                    cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 0, 255), 2)
                else:
                    cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 0, 0), 2)

            #aquire lock, set the output frame
            with lock:
                outputFrame = frame.copy()
                (flag, encodedImage) = cv2.imencode(".jpg", outputFrame)

            # Display the resulting frame
            # cv2.imshow('Eye Sight (original)',orig)
            # cv2.imshow('Eye Sight',frame)
            # if cv2.waitKey(1) & 0xFF == ord('q'):
            #     break

        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + bytearray(encodedImage) +
               b'\r\n')

    # When everything done, release the capture
    capture.release()
    # finally, close the window
    cv2.destroyAllWindows()
    cv2.waitKey(1)
コード例 #2
0
ファイル: proyecto.py プロジェクト: agmc13/Proyecto-integrado
class detection:
    def draw_detections(img, rects, thickness=1):
        for x, y, w, h in rects:
            pad_w, pad_h = int(0.15 * w), int(0.05 * h)
            # Aqui se crea la forma que va a tener el rectangulo que saldra cuando se detecte a una persona.
            cv2.rectangle(img, (x + pad_w, y + pad_h),
                          (x + w - pad_w, y + h - pad_h), (0, 0, 255), 2)
            return True

    if __name__ == '__main__':
        #Creamos la variable de HOG que se encargan de detectar objetos  que ente caso seran personas.
        hog = cv2.HOGDescriptor()
        hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
        #Se captura el primer fotograma de la camara para que sirva de referencia.
        cap = cv2.VideoCapture(0)

        while True:
            #se empieza a capturar la imagen de la camara.
            _, frame = cap.read()

            #aqui buscara a traves de los datos de la escala dados a una persona.
            found, w = hog.detectMultiScale(frame,
                                            winStride=(4, 4),
                                            padding=(8, 8),
                                            scale=1)
            #si encuentra algo coincidente hara el dibujo del rectangulo.
            draw_detections(frame, found)

            # con imshow mostrará la ventana de la imagen que se esta emitiendo.
            cv2.imshow('reconocimiento', frame)

            if (draw_detections(frame, found) == True):
                counter += 1

            print(counter)
            #Al pulsar la tecla "q" finalizara el programa.
            k = 0xFF & cv2.waitKey(1)
            if k == ord('q'):
                break

            #Bucle if para que cuando el contador llegue a 10 envie un mensaje de texto por telegram.
            if (counter > 5):
                r = requests.post(
                    'https://api.telegram.org/bot' + token + '/sendMessage',
                    data={
                        'chat_id':
                        ID_canal,
                        'text':
                        'conflicto en el area del recreo a las ' +
                        time.strftime("%I:%M:%S") + ' horas'
                    })
                print(r.text)
                counter = 0

            #bucle if para reiniciar cada 10 segundos la variable counter.
            if (countdown < 7):
                countdown += 1
            else:
                counter = 0
                countdown = 0

        cv2.destroyAllWindows()
コード例 #3
0
def main():
    try:
        # initialize leds
        gpio.setmode(gpio.BCM)
        gpio.setup(17, gpio.OUT)
        gpio.setup(27, gpio.OUT)
        gpio.output(27, True)

        # initialize the HOG descriptor/person detector
        camera = PiCamera()
        camera.hflip = True
        camera.vflip = True
        camera.resolution = (320, 240)
        camera.framerate = 32
        rawCapture = PiRGBArray(camera, size=(320, 240))
        time.sleep(0.25)
        hog = cv2.HOGDescriptor()
        hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

        Threshold = 0
        features_number = 0

        tracked_features = None
        detected = False

        for frame in camera.capture_continuous(rawCapture,
                                               format="bgr",
                                               use_video_port=True):

            if not detected:  # detection block
                gpio.output(17, False)
                Threshold = 0
                unchangedPointsMap = dict()

                current_frame = frame.array
                current_frame = imutils.resize(current_frame, width=300)
                current_frame_copy = current_frame.copy()
                current_frame = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)

                # detect people in the image
                (rects, weights) = hog.detectMultiScale(current_frame,
                                                        winStride=(4, 4),
                                                        padding=(8, 8),
                                                        scale=1.5)

                # draw the original bounding boxes
                for i in range(len(rects)):
                    x, y, w, h = rects[i]
                    rects[i][0] = x + 15
                    rects[i][1] = y + 40
                    rects[i][2] = w - 30
                    rects[i][3] = h - 40

                for (x, y, w, h) in rects:
                    cv2.rectangle(current_frame_copy, (x, y), (x + w, y + h),
                                  (0, 0, 255), 2)

        # Filter boxes
                rects = np.array([[x, y, x + w, y + h]
                                  for (x, y, w, h) in rects])
                pick = non_max_suppression(rects,
                                           probs=None,
                                           overlapThresh=0.65)

                # draw the final bounding boxes
                for (xA, yA, xB, yB) in pick:
                    cv2.rectangle(current_frame, (xA, yA), (xB, yB),
                                  (0, 255, 0), 2)

                print("{} original boxes, {} after suppression".format(
                    len(rects), len(pick)))

                if len(rects) > 0:
                    features, height_from_floor = find_features(
                        current_frame, rects[0], 0)
                    #print(features)
                    detected = True
                    gpio.output(17, True)

            if detected:  # Tracking block
                if Threshold == 0:
                    features_number = len(features)
                    Threshold = features_number * threshold_percent

                #print ("Threshold" + str(Threshold))
                if features_number < Threshold:
                    print("Features less than threshold")
                    detected = False
                else:
                    rawCapture.truncate(0)
                    next_frame = frame.array
                    next_frame = imutils.resize(next_frame, width=300)

                    current_frame_copy = next_frame.copy()
                    next_frame = cv2.cvtColor(next_frame, cv2.COLOR_BGR2GRAY)

                    #-----------Tracking using LK ---------------------------

                    try:
                        features = np.array(features)

                        (tracked_features, status,
                         feature_errors) = cv2.calcOpticalFlowPyrLK(
                             current_frame, next_frame, features, None,
                             **lk_params)

                        arr_x = []
                        arr_y = []

                        for i in range(len(tracked_features)):
                            f = tracked_features[i]
                            x = f[0][0]
                            y = f[0][1]

                            arr_x.append(x)
                            arr_y.append(y)

                        arr_x = sorted(arr_x)
                        arr_y = sorted(arr_y)

                        mid = len(arr_x) / 2
                        X = arr_x[mid]
                        mid = len(arr_y) / 2
                        Y = arr_y[mid]

                        print(X)
                        #bus.write_i2c_block_data(address, X & 0xff, ((i >> 8) & 0xff,))
                        #bus.write_byte_data(address, int(X) & 0xff, (int(X) >> 8) & 0xff)
                        Q = int(X)
                        msb = Q / 256
                        lsb = Q % 256
                        bus.write_i2c_block_data(address, msb, [lsb])
                        new_feature_number = 0
                        temp_set_number = []
                        temp_distance = []
                        j = 0

                        # print ("Height_from_floor" + str(height_from_floor))
                        # print ("num" + str(features_number))
                        # print ("Status" + str(status))
                        # print ("Status[0]   " + str(status[0]))
                        # print ("Status[1]   " + str(status[1]))
                        # print ("Status[0][0]   " + str(status[0][0]))

                        for i in range(features_number):
                            if status[i][0] == 1:
                                new_feature_number += 1
                                # temp_distance[j] = height_from_floor[i]
                                j += 1

                    # height_from_floor = []

                    # print ("Here")

                    # for i in range(features_number):
                    # 	height_from_floor.append(temp_distance[i])

                    # print ("Here2")

                        features_number = new_feature_number
                        #print("Features_num" + str(features_number))
                        features = []

                        for i in range(features_number):
                            features.append(tracked_features[i])

                        features = np.array(features)
                        tracked_features = []
                        current_frame = next_frame.copy()

                    except Exception, e:
                        raise e

#-------Compute Distance --------------------
# status, v = scaled_people_floor(features_number, features, height_from_floor)

# if status:
# 	distance = compute_distance(v)
# 	print (distance)

#-------Showing Points ------------------------
                    for i in range(features_number):
                        cv2.circle(current_frame_copy, tuple(features[i][0]),
                                   3, 255, -1)

                    cv2.circle(current_frame_copy, (X, Y), 5, (0, 0, 255), -1)

            # show the output images
            cv2.imshow("HOG", current_frame_copy)
            key = cv2.waitKey(1) & 0xFF
            rawCapture.truncate(0)

            if key == ord("w"):
                break
    except KeyboardInterrupt, SystemExit:
        gpio.output(27, False)
        gpio.output(17, False)
        camera.release()
        cv2.destroyAllWindows()
        raise
コード例 #4
0
 def __init__(self):
     # initialize detector
     self.hog = cv2.HOGDescriptor()
     self.hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
コード例 #5
0
def run_kalman_filter(kf, imgs_dir, noise, sensor, save_frames={},
                      template_loc=None):

    imgs_list = [f for f in os.listdir(imgs_dir)
                 if f[0] != '.' and f.endswith('.jpg')]
    imgs_list.sort()

    frame_num = 0

    if sensor == "hog":
        hog = cv2.HOGDescriptor()
        hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

    elif sensor == "matching":
        frame = cv2.imread(os.path.join(imgs_dir, imgs_list[0]))
        template = frame[template_loc['y']:
                         template_loc['y'] + template_loc['h'],
                         template_loc['x']:
                         template_loc['x'] + template_loc['w']]

    else:
        raise ValueError("Unknown sensor name. Choose between 'hog' or "
                         "'matching'")

    for img in imgs_list:

        frame = cv2.imread(os.path.join(imgs_dir, img))

        # Sensor
        if sensor == "hog":
            (rects, weights) = hog.detectMultiScale(frame, winStride=(4, 4),
                                                    padding=(8, 8), scale=1.05)

            if len(weights) > 0:
                max_w_id = np.argmax(weights)
                z_x, z_y, z_w, z_h = rects[max_w_id]

                z_x += z_w // 2
                z_y += z_h // 2

                z_x += np.random.normal(0, noise['x'])
                z_y += np.random.normal(0, noise['y'])

        elif sensor == "matching":
            corr_map = cv2.matchTemplate(frame, template, cv2.cv.CV_TM_SQDIFF)
            z_y, z_x = np.unravel_index(np.argmin(corr_map), corr_map.shape)

            z_w = template_loc['w']
            z_h = template_loc['h']

            z_x += z_w // 2 + np.random.normal(0, noise['x'])
            z_y += z_h // 2 + np.random.normal(0, noise['y'])

        x, y = kf.process(z_x, z_y)

        if False:  # For debugging, it displays every frame
            out_frame = frame.copy()
            cv2.circle(out_frame, (int(z_x), int(z_y)), 20, (0, 0, 255), 2)
            cv2.circle(out_frame, (int(x), int(y)), 10, (255, 0, 0), 2)
            cv2.rectangle(out_frame, (int(z_x) - z_w // 2, int(z_y) - z_h // 2),
                          (int(z_x) + z_w // 2, int(z_y) + z_h // 2),
                          (0, 0, 255), 2)

            cv2.imshow('Tracking', out_frame)
            cv2.waitKey(1)

        # Render and save output, if indicated
        if frame_num in save_frames:
            frame_out = frame.copy()
            cv2.circle(frame_out, (int(x), int(y)), 10, (255, 0, 0), 2)
            cv2.imwrite(save_frames[frame_num], frame_out)

        # Update frame number
        frame_num += 1
        if frame_num % 20 == 0:
            print 'Working on frame %d' % frame_num
コード例 #6
0
def detect(input, output, skip_frames, win_stride, scale, resize, visualize):
    if input is None:
        vs = cv2.VideoCapture(0)
        time.sleep(2.0)
    else:
        vs = cv2.VideoCapture(input)

    hog = cv2.HOGDescriptor()
    hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
    writer_pos = None
    writer_neg = None
    prev = False
    count = 0

    while True:
        ret, img = vs.read()

        if input is not None and img is None:
            break

        frame_width = int(vs.get(3))
        frame_height = int(vs.get(4))

        if output is not None and writer_pos is None:
            fourcc = cv2.VideoWriter_fourcc(*"XVID")
            writer_pos = cv2.VideoWriter(output + "det.avi", fourcc,
                                         vs.get(cv2.CAP_PROP_FPS),
                                         (frame_width, frame_height), True)

        if output is not None and writer_neg is None:
            fourcc = cv2.VideoWriter_fourcc(*"XVID")
            writer_neg = cv2.VideoWriter(output + "ndet.avi", fourcc,
                                         vs.get(cv2.CAP_PROP_FPS),
                                         (frame_width, frame_height), True)

        if count % skip_frames == 0:
            frame = cv2.resize(img, (0, 0), fx=resize, fy=resize)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            (rects, weights) = hog.detectMultiScale(frame,
                                                    winStride=win_stride,
                                                    padding=(8, 8),
                                                    scale=scale)

            for (x, y, w, h) in rects:
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

            if not len(rects) == 0 and writer_pos is not None:
                writer_pos.write(frame)
                prev = True
            elif len(rects) == 0 and writer_neg is not None:
                writer_neg.write(frame)
                prev = False
        else:
            frame = cv2.resize(img, (0, 0), fx=resize, fy=resize)

            if prev:
                writer_pos.write(frame)
            else:
                writer_neg.write(frame)

        count += 1
        if visualize:
            cv2.imshow("frame", frame)
            cv2.waitKey(10)

    if writer_pos is not None:
        writer_pos.release()
    if writer_neg is not None:
        writer_neg.release()
    if input is None:
        vs.stop()
    else:
        vs.release()

    cv2.destroyAllWindows()
コード例 #7
0
def main():
    winH = 128
    winW = 64

    # load the SVM model
    model = cv2.ml.SVM_load(".//model_linear_with_hard_neg_mining_128_64.xml")

    # Make an HOG Descriptor
    # HOG descriptor
    winSize = (64, 128)
    blockSize = (16, 16)
    blockStride = (8, 8)
    cellSize = (8, 8)
    nbins = 9
    hog = cv2.HOGDescriptor(winSize, blockSize, blockStride, cellSize, nbins)
    hog2 = cv2.HOGDescriptor(winSize, blockSize, blockStride, cellSize, nbins)
    hog2.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

    print("Beginning testing pedestrian detection algorithm")
    while True:
        #name_of_test_image = input("Enter the name of test image with extension:")
        root = tk.Tk()
        root.withdraw()
        file_path = filedialog.askopenfilename()
        img = cv2.imread(file_path)
        #img = utils.resize(img, width=min(350, img.shape[1]))
        img = utils.resize(img, width=350)
        if img is None:
            continue

        # Variable for box
        boxes = np.empty((1, 4), dtype=np.int)

        start = datetime.datetime.now()
        for (resized_img, overall_scale) in utils.pyramid(img, 1.2):
            for (x, y, window) in utils.sliding_window(resized_img, (4, 4),
                                                       (winW, winH)):
                #if window.shape[0] != winH or window.shape[1] != winW:
                #    continue
                # compute hog feature
                h = hog.compute(window)

                label = model.predict(h.reshape(-1, h.shape[0]))
                if label[1] == 1:
                    temp = overall_scale * np.array(
                        [[x, y, x + winW, y + winH]], dtype=np.int)
                    boxes = np.concatenate((boxes, temp))

        boxes = boxes[1:, :]

        filtered_boxes = utils.non_max_suppression(boxes,
                                                   probs=None,
                                                   overlapThresh=0.60)
        print(filtered_boxes)
        print("[INFO] detection before ] took: {}s".format(
            (datetime.datetime.now() - start).total_seconds()))

        clone = img.copy()
        utils.draw_detections_using_corners(img, filtered_boxes, 3)

        cv2.imshow('img using my detector', img)

        start = datetime.datetime.now()
        found, w = hog2.detectMultiScale(clone,
                                         winStride=(4, 4),
                                         padding=(16, 16),
                                         scale=1.2)
        print(found)
        utils.draw_detections(clone, found)
        cv2.imshow('img using default detector', clone)
        print("[INFO] detection before ] took: {}s".format(
            (datetime.datetime.now() - start).total_seconds()))

        ch = cv2.waitKey()

        cv2.imwrite('my_custom_detector.jpg', img)
        cv2.imwrite('openCV_detector.jpg', clone)
コード例 #8
0
def getROI_Array(img):
    hog = cv2.HOGDescriptor()
    hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
    hogParams = {'winStride': (8, 8), 'padding': (16, 16), 'scale': 1.05}
    (rect, weights) = hog.detectMultiScale(img, **hogParams)  
    return rect
コード例 #9
0
 def __init__(self, preview):
     self.hog = cv2.HOGDescriptor()
     self.hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
コード例 #10
0
def main():
    # uncomment only one
    #
    logging.basicConfig(format='%(message)s', level=logging.INFO)
    #logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)

    # load camera configuration
    try:
        settings = config.camera
    except AttributeError:
        settings = {}

    # send counters to Unix Domain Socket
    import uds

    # use Raspberry Pi Camera
    #
    camera = PiCamera()
    camera.resolution = (320, 240)
    rawCapture = PiRGBArray(camera, size=(320, 240))
    time.sleep(0.1)

    camera.capture(rawCapture, format="bgr")
    image = rawCapture.array
    prev = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    np.zeros_like(prev)
    rawCapture.truncate(0)

    # initialise model for human faces
    #
    face_cascade = cv2.CascadeClassifier('models/haarcascade_frontalface_alt.xml')

    # initialise model for standing humans
    #
    hog = cv2.HOGDescriptor()
    hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

    # compute counters every 2 seconds
    #
    flagone = datetime.datetime.now()
    while True:

        flagtwo = datetime.datetime.now()
        elapsed = (flagtwo - flagone).total_seconds()

        if elapsed < 2:
            time.sleep(0.1)
            continue
        flagone = datetime.datetime.now()

        people_count = 0
        people_moves = 0
        people_faces = 0

        # capture an image
        #
        mask = np.zeros(image.shape[:2], dtype='uint8')
        camera.capture(rawCapture, format='bgr')
        image = rawCapture.array
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        rawCapture.truncate(0)

        # detect human faces
        #
        faces = face_cascade.detectMultiScale(gray, 1.3, 5)
        people_faces = len(faces)

        # detect human beings
        #
        (rects, _) = hog.detectMultiScale(image,
                                          winStride=(4, 4),
                                          padding=(8, 8),
                                          scale=1.07)
        rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
        pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
        people_count = len(pick)

        for(xA, yA, xB, yB) in pick:
            cv2.rectangle(mask, (xA, yA), (xB, yB), 255, -1)

        # count moving people
        #
        nextim = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        nextim = cv2.bitwise_and(nextim, nextim, mask=mask)
        flow = cv2.calcOpticalFlowFarneback(prev, nextim, None, 0.5, 1, 3, 15, 3, 5, 1)
        mag, _ = cv2.cartToPolar(flow[..., 0], flow[..., 1])
        mag = mag * 0.8
        prev = nextim

        for(xA, yA, xB, yB) in pick:
            try:
                m = np.median(mag[yA:yB, xA:xB])
    #            print m
                if m > 1.6:
                    people_moves += 1
            except RuntimeWarning:
                pass

        # push counters via Unix Domain Socket
        #
        message = '%s %d %d %d' % (settings.get('id', 'myCamera'),
                                   people_count,
                                   people_moves,
                                   people_faces)
        uds.push(message)
コード例 #11
0
def main():
    # initialize the HOG descriptor/person detector
    camera = cv2.VideoCapture(0)
    time.sleep(0.25)
    hog = cv2.HOGDescriptor()
    hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

    Threshold = 0
    features_number = 0

    while True:  # main loop

        tracked_features = None

        while True:  # detection loop, loop over the images

            unchangedPointsMap = dict()

            # load the image and resize it to (1) reduce detection time
            # and (2) improve detection accuracy
            (grabbed, current_frame) = camera.read()
            current_frame = imutils.resize(current_frame, width=300)
            current_frame_copy = current_frame.copy()
            current_frame = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)

            # detect people in the image
            (rects, weights) = hog.detectMultiScale(current_frame,
                                                    winStride=(4, 4),
                                                    padding=(8, 8),
                                                    scale=1.5)

            # draw the original bounding boxes

            for i in range(len(rects)):
                x, y, w, h = rects[i]
                rects[i][0] = x + 20
                rects[i][1] = y + 10
                rects[i][2] = w - 30
                rects[i][3] = h - 10

            for (x, y, w, h) in rects:
                cv2.rectangle(current_frame_copy, (x, y), (x + w, y + h),
                              (0, 0, 255), 2)

            # apply non-maxima suppression to the bounding boxes using a
            # fairly large overlap threshold to try to maintain overlapping
            # boxes that are still people
            rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
            pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)

            # draw the final bounding boxes
            for (xA, yA, xB, yB) in pick:
                cv2.rectangle(current_frame, (xA, yA), (xB, yB), (0, 255, 0),
                              2)

            print("{} original boxes, {} after suppression".format(
                len(rects), len(pick)))

            if len(rects) > 0:
                features = find_features(current_frame, rects[0], 0)
                print("NUM" + str(features_number))
                break

            # cv2.imshow("HOG", current_frame_copy)
            # key = cv2.waitKey(1) & 0xFF

            # if key == ord("w"):
            # 	break

        features_number = len(features)
        Threshold = features_number * threshold_percent

        while True:  # Tracking loop

            #print ("Threshold" + str(Threshold))
            if features_number < Threshold:
                print("Features less than threshold")
                break
            else:
                (grabbed, next_frame) = camera.read()
                next_frame = imutils.resize(next_frame, width=300)
                if not grabbed:
                    print("Camera read failed")
                    return
                current_frame_copy = next_frame.copy()
                next_frame = cv2.cvtColor(next_frame, cv2.COLOR_BGR2GRAY)

                #-----------Tracking using LK ---------------------------

                try:
                    features = np.array(features)
                    (tracked_features, status,
                     feature_errors) = cv2.calcOpticalFlowPyrLK(
                         current_frame, next_frame, features, None,
                         **lk_params)

                    arr_x = []
                    arr_y = []

                    for i in range(len(tracked_features)):
                        f = tracked_features[i]
                        x = f[0][0]
                        y = f[0][1]

                        arr_x.append(x)
                        arr_y.append(y)

                    arr_x = sorted(arr_x)
                    arr_y = sorted(arr_y)

                    mid = len(arr_x) / 2
                    X = arr_x[mid]
                    mid = len(arr_y) / 2
                    Y = arr_y[mid]
                    print("X Value " + str(X))
                    print("Y Value " + str(Y))
                    bus.write_byte(address, X)
                    bus.write_byte(address, Y)

                    new_feature_number = 0
                    temp_set_number = []
                    temp_distance = []
                    j = 0
                    for i in range(features_number):
                        if status[i] == 1:
                            new_feature_number += 1
                            j += 1

                    features_number = new_feature_number
                    features = []

                    for i in range(features_number):
                        features.append(tracked_features[i])

                    features = np.array(features)
                    tracked_features = []
                    current_frame = next_frame.copy()
                except Exception, e:
                    # bus.write_byte(address, 0)
                    # bus.write_byte(address, 0)
                    raise e

    #-------Showing Points ------------------------
                for i in range(features_number):
                    #print ("features " + str(features[i]))
                    cv2.circle(current_frame_copy, tuple(features[i][0]), 3,
                               255, -1)

                cv2.circle(current_frame_copy, (X, Y), 5, (0, 0, 255), -1)

            # show the output images
            cv2.imshow("HOG", current_frame_copy)
            key = cv2.waitKey(1) & 0xFF

            if key == ord("w"):
                break
コード例 #12
0
def svmdetectperson(img):
    hog=cv2.HOGDescriptor()
    hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
    person,w= hog.detectMultiScale(img)
    return person
コード例 #13
0
def main():
    # initialize the HOG descriptor/person detector
    camera = cv2.VideoCapture(0)
    time.sleep(0.25)
    hog = cv2.HOGDescriptor()
    hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

    Threshold = 0
    features_number = 0

    while True:  # main loop

        tracked_features = None

        while True:  # detection loop, loop over the images

            unchangedPointsMap = dict()

            # load the image and resize it to (1) reduce detection time
            # and (2) improve detection accuracy
            (grabbed, current_frame) = camera.read()
            current_frame = imutils.resize(current_frame, width=300)
            current_frame_copy = current_frame.copy()
            current_frame = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)

            # detect people in the image
            (rects, weights) = hog.detectMultiScale(current_frame,
                                                    winStride=(4, 4),
                                                    padding=(8, 8),
                                                    scale=1.5)

            # draw the original bounding boxes

            for i in range(len(rects)):
                x, y, w, h = rects[i]
                rects[i][0] = x + 15
                rects[i][1] = y + 40
                rects[i][2] = w - 30
                rects[i][3] = h - 20

            for (x, y, w, h) in rects:
                cv2.rectangle(current_frame_copy, (x, y), (x + w, y + h),
                              (0, 0, 255), 2)

            # apply non-maxima suppression to the bounding boxes using a
            # fairly large overlap threshold to try to maintain overlapping
            # boxes that are still people
            rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
            pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)

            # draw the final bounding boxes
            for (xA, yA, xB, yB) in pick:
                cv2.rectangle(current_frame, (xA, yA), (xB, yB), (0, 255, 0),
                              2)

            print("{} original boxes, {} after suppression".format(
                len(rects), len(pick)))

            # 	if len(rects) > 0:
            # 		features = find_features(current_frame, rects[0], 0)
            # 		print("NUM" + str(features_number))
            # 		break

            # 	# cv2.imshow("HOG", current_frame_copy)
            # 	# key = cv2.waitKey(1) & 0xFF

            # 	# if key == ord("w"):
            # 	# 	break

            # features_number = len(features)
            # Threshold = features_number * threshold_percent

            # while True: # Tracking loop

            # 	#print ("Threshold" + str(Threshold))
            # 	if features_number < Threshold:
            # 		print ("Features less than threshold")
            # 		break
            # 	else:
            # 		(grabbed, next_frame) = camera.read()
            # 		next_frame = imutils.resize(next_frame, width = 300)
            # 		if not grabbed:
            # 			print ("Camera read failed")
            # 			return
            # 		current_frame_copy = next_frame.copy()
            # 		next_frame = cv2.cvtColor(next_frame, cv2.COLOR_BGR2GRAY)

            # 			#-----------Tracking using LK ---------------------------

            # 			try:
            # 				features = np.array(features)
            # 				#print("Features" + str(features))
            # 				(tracked_features, status, feature_errors) = cv2.calcOpticalFlowPyrLK(current_frame, next_frame, features, None, **lk_params)
            # 			#print("TEST")
            # 			# print("KEYS" + str(unchangedPointsMap.keys()))
            # 			# for i in range(len(tracked_features[0])):
            # 			# 	f = tracked_features[0][i]
            # 			# 	x = round(f[0])
            # 			# 	y = round(f[1])
            # 			# 	print("x and y" + str((x,y)))
            # 			# 	if (x,y) in unchangedPointsMap.keys():
            # 			# 		unchangedPointsMap[(x,y)] += 1
            # 			# 		print("ADDED" + str(unchangedPointsMap[(x,y)]))
            # 			# 		if unchangedPointsMap[(x,y)] == 30:
            # 			# 			print ("BEFORE" + str(tracked_features[0]))
            # 			# 			tracked_features = np.delete(tracked_features,i,0)
            # 			# 			unchangedPointsMap.pop((x,y))
            # 			# 			print ("AFTER" + str(tracked_features[0]))
            # 			# 	else:
            # 			# 		unchangedPointsMap[(x,y)] = 0

            # 			# print("BEFORE" + str(tracked_features))
            # 			# tracked_features[tracked_features[:,0].argsort()]
            # 			# print("AFTER" + str(tracked_features))

            # 			arr_x = []
            # 			arr_y = []

            # 			for i in range(len(tracked_features)):
            # 				f = tracked_features[i]
            # 				x = f[0][0]
            # 				y = f[0][1]

            # 				arr_x.append(x)
            # 				arr_y.append(y)

            # 			print("X_arr" + str(arr_x))
            # 			print("Y_arr" + str(arr_y))
            # 			print ("X SORTED " + str(sorted(arr_x)))
            # 			print ("Y SORTED " + str(sorted(arr_y)))

            # 			arr_x = sorted(arr_x)
            # 			arr_y = sorted(arr_y)

            # 			mid = len(arr_x)/2
            # 			X = arr_x[mid]
            # 			mid = len(arr_y)/2
            # 			Y = arr_y[mid]

            # 			new_feature_number = 0
            # 			temp_set_number = []
            # 			temp_distance = []
            # 			j = 0
            # 			for i in range(features_number):
            # 				if status[i] == 1:
            # 					new_feature_number += 1
            # 					#temp_set_number.append()
            # 					#temp_distance.append(height_from_floor[i])
            # 					j += 1

            # 			#height_from_floor = temp_distance
            # 			features_number = new_feature_number
            # 			#print("Features_num" + str(features_number))
            # 			features = []

            # 			for i in range(features_number):
            # 				features.append(tracked_features[i])

            # 			features = np.array(features)
            # 			tracked_features = []
            # 			current_frame = next_frame.copy()
            # 			except Exception, e:
            # 				raise e

            # 			#-------Showing Points ------------------------
            # 			for i in range(features_number):
            # 				# print ("features " + str(features))
            # 				# print ("features0 " + str(features[0]))
            # 				# print ("features00 " + str(features[0][0]))
            # 				# print ("features000 " + str(features[0][0][0]))

            # 				#print ("features " + str(features[i]))
            # 				cv2.circle(current_frame_copy,
            # 						   tuple(features[i][0]),
            # 						   3,
            # 						   255,
            # 						   -1)

            # 			cv2.circle(current_frame_copy,
            # 						(X,Y),
            # 						5,
            # 						(0,0,255),
            # 						-1)

            # show the output images
            cv2.imshow("HOG", current_frame_copy)
            key = cv2.waitKey(1) & 0xFF

            if key == ord("w"):
                break

    camera.release()
    cv2.destroyAllWindows()
コード例 #14
0
ファイル: main.py プロジェクト: waggle-sensor/summer2017
def main():
    # ------------------------------ Ped Detection Initialization ------------------------------------------------
    # HOG descriptor
    win_W = 64; win_H = 128
    winSize = (win_W, win_H)
    blockSize = (16, 16)
    blockStride = (8, 8)
    cellSize = (8, 8)
    nbins = 9
    hog1 = cv2.HOGDescriptor(winSize, blockSize, blockStride, cellSize, nbins)
    hog1.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

    winSize = (int(win_W/2), int(win_H/2))
    blockSize = (8, 8)
    blockStride = (4, 4)
    cellSize = (4, 4)
    nbins = 9
    hog2 = cv2.HOGDescriptor(winSize, blockSize, blockStride, cellSize, nbins)
    hog2.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

    hog = [hog1, hog2]
    # -------------------------------------------------------------------------------------------------------------

    # Keep history of deleted pedestrians
    listOfDeletedPedestrians = list()
    totalPedestriansToRemember = 20

    # ------------------------------------------------- Parameters ------------------------------------------------
    # scale factor for reduction of box size for finding initial histogram
    scaleFac = 0.6
    # scale factor for detection window size
    detectScaleUp = 1.6
    # Maximum speed of each window object (in units of pixels per frame)
    maxSpeed = 5
    # height to width ratio to purge windows
    h_to_w = 1.3
    # Kernels for morphological operations`
    kSize = 4
    kernelO = np.ones((kSize, kSize), np.uint8)
    kernelC = np.ones((kSize, kSize), np.uint8)
    # Weight of overlap vs weight of correlation
    overlapWeightage = 0.9
    # Overlap threshold
    overlapThresh = 0.50
    # Appearance Model Threshold
    corrThresh = 0.10
    # Correlation threshold for matching with old windows
    # corrThreshForOldWins = 0.70
    # Threshold for discarding new boxes
    threshForDiscarding = 0.40
    # Detection Rate in units of frames per detection
    detectionRate = 10
    # pixel movement threshold
    pixelMovThresh = 50

    # Window deletion factor
    win_padding = 10

    # List of window objects
    list_of_windows = list()
    counter = 0;

    # Capture the video and set the frame size
    cap = cv2.VideoCapture("C:\\Users\\Zeeshan Nadir\\Documents\\Argonne\\Pedestrian_Detection_Tracking\\data\\towncenter.avi")
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    frame_width = 800
    frame_height = 450
    frame_area = frame_height * frame_width

    # KLT Parameters
    lk_params = dict(winSize=(15, 15),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

    feature_params = dict(maxCorners=100,
                          qualityLevel=0.001,
                          minDistance=3,
                          blockSize=5)

    #frame_height = 360
    #frame_width = 640
    #cap.set(cv2.CAP_PROP_FRAME_WIDTH, frame_width)
    #cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_height)
    #frame_width =  int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    #frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    # Video Writer object
    out = cv2.VideoWriter(".\\data\\results12.avi", -1, fps, (frame_width, frame_height))

    # create background subtraction object
    fgbg = cv2.createBackgroundSubtractorMOG2()
    list_of_new_boxes = list()
    while True:
        counter += 1
        print("Frame = ", counter, "Windows =", len(list_of_windows))
        # -------------------------- grab the current frame ----------------------------------------------------
        ret, frame = cap.read()

        if frame is None:
            break

        if counter ==1000:
            break

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        frame = utils.resize(frame, frame_width)
        frame_HSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # --------------------------------------------------------------------------------------------------------

        # ------------------------------------------- background subtraction ------------------------------------
        fgmask = fgbg.apply(frame)
        ret, fgmask = cv2.threshold(fgmask, 0, 255, 0)
        opening = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernelO, iterations=2)
        sure_fg = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernelC, iterations=2)
        (_, contours, _) = cv2.findContours(sure_fg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        # ------------------------------------------------------------------------------------------------------

        # ---------------------------------- Get the list of new boxes -------------------------------------------
        list_of_new_boxes = track.giveBoundingBoxes(contours, frame_area, frame_width, frame_height, h_to_w, win_padding,
                                         0.003/100, 20/100)


        for i, box in enumerate(list_of_new_boxes):
            cx = box[0]
            cy = box[1]
            w  = box[2]
            h  = box[3]
            if win_padding<=cx<=frame_width-win_padding \
                    and win_padding<=cy<=frame_height-win_padding:
                x = int(cx - w/2)
                y = int(cy-h/2)
                cv2.rectangle(sure_fg, (x, y), (x + w, y + h), (127,127,127), 2)
        # -------------------------------------------- Get new Kalman Windows ----------------------------------
        if len(list_of_new_boxes)>0:
            list_of_new_boxes = track.purgeNewWindowsForPedDetection(list_of_new_boxes, win_W, win_H, frame, hog, detectScaleUp)
        # ------------------------------------------------------------------------------------------------------------

        for i, box in enumerate(list_of_new_boxes):
            cx = box[0]
            cy = box[1]
            w  = box[2]
            h  = box[3]
            if win_padding<=cx<=frame_width-win_padding \
                    and win_padding<=cy<=frame_height-win_padding:
                x = int(cx - w/2)
                y = int(cy-h/2)
                cv2.rectangle(sure_fg, (x, y), (x + w, y + h), (255,255,255), 2)
        cv2.imshow('Sure Foreground', sure_fg)
        # ------------------------------------------------------------------------------------------------------
        # ----------------------------- Predict the kalman windows ------------------------------------------------
        for win in list_of_windows:
            win.predict()
        # ------------------------------------------------------------------------------------------------------

        # ----------------------------- Correct Kalman Windows for moving objects ----------------------------------
        new_boxes_to_discard = track.mapMotionToCurrentWindows(list_of_windows, list_of_new_boxes, pixelMovThresh, overlapWeightage,
                                                      threshForDiscarding, overlapThresh, corrThresh, scaleFac, frame_HSV)

        for i, win in enumerate(list_of_windows):
            list_of_windows[i].correct(pixelMovThresh, frame_HSV, corrThresh,
                                       scaleFac, overlapThresh, win_W, win_H, frame, hog, detectScaleUp, overlapWeightage,
                                       prev_gray, frame_gray, lk_params, feature_params, frame_width, frame_height, win_padding)


        # ------------------------------------ Delete all the mapped new boxes ----------------------------------
        if len(list_of_new_boxes) > 0:
            list_of_new_boxes = [i for j, i in enumerate(list_of_new_boxes) if j not in new_boxes_to_discard]
        # ------------------------------------------------------------------------------------------------------

        # ------------------------ Add all the new windows in the current list --------------------
        for new_box in list_of_new_boxes:
            list_of_windows.append(Window(new_box, scaleFac, detectionRate, maxSpeed, frame, frame_HSV, frame_gray,
                                          feature_params))
        # ------------------------------------------------------------------------------------------------------


        # ----------------------------------------- Delete Purged Kalman windows -------------------------------------------
        for win in list_of_windows:
            if win.markForDel is True:
                list_of_windows.remove(win)
                # listOfDeletedPedestrians.append((win.id, win.color, win.appearanceHist))
                # if len(listOfDeletedPedestrians) > totalPedestriansToRemember:
                # del listOfDeletedPedestrians[0]

        # ---------------------------------------------------------------------------------------------------------

        # -------------------------------- Plot the Windows -----------------------------------
        for i, win in enumerate(list_of_windows):
            cx = win.kalman.statePost[0]
            cy = win.kalman.statePost[1]
            #cv2.putText(frame, str(win.id), (int(cx - win.w / 2), int(cy - win.h / 2)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
            if win_padding <= cx <= frame_width - win_padding \
                    and win_padding <= cy <= frame_height - win_padding:
                x = int(cx - win.w / 2)
                y = int(cy - win.h / 2)
                cv2.rectangle(frame, (x, y), (x + win.w, y + win.h), win.color, 2)
                if win.measurementType==0:
                    cv2.circle(frame, (cx, cy), 2, (255, 0, 0), thickness=2)
                elif win.measurementType==1:
                    cv2.circle(frame, (cx, cy), 2, (0, 255, 0), thickness=2)
                elif win.measurementType==2:
                    cv2.circle(frame, (cx, cy), 2, (0, 0, 255), thickness=2)
        # ------------------------------------------------------------------------------------------------------
        prev_gray = frame_gray
        out.write(frame)
        cv2.imshow('Video', frame)
        cv2.waitKey(1)
        # ---------------------------------------------------------------------------------------------------------

    cap.release()
    cv2.destroyAllWindows()
コード例 #15
0
# -*-coding:utf-8-*-
# @Author: Damon0626
# @Time  : 19-2-21 下午9:01
# @Email : [email protected]
# @Software: PyCharm

import cv2
import numpy as np
import path
import imutils
from imutils.object_detection import non_max_suppression

hog = cv2.HOGDescriptor()
hog.setSVMDetector(
    cv2.HOGDescriptor_getDefaultPeopleDetector())  # 返回SVM参数,用于对SVM进行设置.

for image in ['p1.jpg', 'p2.jpg', 'p3.jpg', 'p4.JPG']:
    img = cv2.imread(image)
    img = imutils.resize(img, min(400, img.shape[1]))
    orig = img.copy()

    # Detects objects of different sizes in the input image. The detected objects are returned as a list of rectangles.
    rects, weights = hog.detectMultiScale(img,
                                          winStride=(4, 4),
                                          padding=(8, 8),
                                          scale=1.05)

    for (x, y, w, h) in rects:
        cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)

    rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
コード例 #16
0
    def calcDwellTime(self, parameters):

        # segregate different parameters from list
        #print("pppppppp    ",len(parameters))
        videostream = parameters[0]
        config = parameters[1]
        if not parameters[2]:
            livestream = False
        else:
            livestream = parameters[2]
        if not parameters[3]:
            isnovideo = False
        else:
            isnovideo = parameters[3]
        if not parameters[4]:
            starttime = None
        else:
            starttime = parameters[4]
        if not parameters[5]:
            frameskip = 0
        else:
            frameskip = parameters[5]
        if not parameters[6]:
            flushtime = 1000
        else:
            flushtime = parameters[6]
        if not parameters[7]:
            output = None
        else:
            output = parameters[7]
        if not parameters[8]:
            debug = False
        else:
            debug = parameters[8]

        video = videostream

        cleartime = 100
        updatetime = 1000

        if cleartime <= 0:
            cleartime = 1000

        verbose = debug
        novideo = isnovideo

        totalframes = 0
        fps = 0
        videolength = 0
        currentframetime = 0
        remainingtime = 0

        prevcentroids = []
        centroids = []
        currentflow = []
        lk_params = dict(winSize=(15, 15),
                         maxLevel=2,
                         criteria=(cv2.TERM_CRITERIA_EPS
                                   | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

        database = []

        first = True
        cap = None

        clearthreadtimer = 0  #use this variable as a timer to start the thread for clearing the in database
        updatethreadtimer = 0

        incount = 0

        font = cv2.FONT_HERSHEY_SIMPLEX

        framenumber = 0
        names = []
        boxes = []
        result = [None]
        dwelldatabase = {}

        clearthread = Thread(target=self.garbageClear,
                             args=(database, 30, dwelldatabase, starttime),
                             name="clear_thread")  # in database clear thread
        #updatethread=Thread(target=self.updateInMDB,args=(self.db,"dwell",dwelldatabase,result),name="update_thread")

        try:
            conf = []
            conf.append(
                config
            )  # bind the configuration dict obtained from function parameter into a List

            width = 250

            if len(conf) > 0:

                for seg in conf:

                    names.append(seg['name'])  # segment name
                    boxes.append(
                        seg['coordinates']
                    )  # segment box coordinates (diagonally opposite vertices)

            else:
                print("No segment defined in the configuration file")
                return -1

            for n in names:
                dwelldatabase[n] = []

            # initialise HOG descriptor for people detecting
            hog = cv2.HOGDescriptor()
            hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

            if not livestream:
                cap = cv2.VideoCapture(video)
                fps = cap.get(cv2.CAP_PROP_FPS)
                if first == True:
                    r, prevframe = cap.read()
                    if not r:
                        print("video finished ")
                        raise (Exception("Video Finished"))

                        first = False

                    currentframetime = cap.get(cv2.CAP_PROP_POS_MSEC)
                    remainingtime = videolength - currentframetime
                    framenumber = cap.get(cv2.CAP_PROP_POS_FRAMES)

                totalframes = cap.get(cv2.CAP_PROP_FRAME_COUNT)
                print(totalframes)
                videolength = (totalframes / fps) * 1000  # in seconds

            elif livestream:
                # read the stream from IP camera
                import urllib.request
                ip_stream = urllib.request.urlopen(
                    videostream)  # videostream corresponds to http URI
                print(ip_stream.headers['Content-Length'])
                if ip_stream.status != 200:
                    raise (Exception("Unable to fetch video from the IP"))
                totalframes = 0
                videolength = 0
                global bytes
                bytes = bytes()
                #print(ip_stream.read())

    # ========================================================================
            while True:
                ret = True
                if livestream:
                    # read individual frame from ip_stream
                    bytes += ip_stream.read(65536)

                    a = bytes.find(b'\xff\xd8')
                    b = bytes.find(b'\xff\xd9')
                    print(b)
                    if a != -1 and b != -1:
                        jpg = bytes[a:b + 2]
                        bytes = bytes[b + 2:]
                        frame = cv2.imdecode(
                            np.fromstring(jpg, dtype=np.uint8),
                            cv2.IMREAD_COLOR)
                        if first:
                            prevframe = frame.copy()
                            first = False
                    else:
                        raise (Exception("Cannot load stream from IP"))
                else:
                    (ret, frame) = cap.read()
                    framenumber = cap.get(cv2.CAP_PROP_POS_FRAMES)
                    currentframetime = cap.get(cv2.CAP_PROP_POS_MSEC)

                if not ret:
                    framenumber = cap.get(cv2.CAP_PROP_POS_FRAMES)
                    print("video finished")
                    cv2.destroyAllWindows()
                    cap.release()
                    raise (Exception("Video Finished"))

                orig = frame.copy()  # make a copy of the current frame

                frame = imutils.resize(frame, width=min(
                    width, frame.shape[1]))  #resize the frame

                prevframe = imutils.resize(
                    prevframe,
                    width=min(width,
                              prevframe.shape[1]))  #resize previous frame

                (prevrects, prevweights) = hog.detectMultiScale(
                    prevframe, winStride=(4, 4), padding=(4, 4),
                    scale=1.05)  #detect human in previous frame
                print(prevrects)
                print(prevweights)

                i = 0
                if len(prevrects) > 0 and len(prevweights) > 0:

                    prevweights = prevweights.tolist()
                    prevrects = prevrects.tolist()

                    while (i < len(prevweights)):

                        if prevweights[i][0] < float(1):

                            prevweights.pop(i)
                            prevrects.pop(i)
                            i -= 1
                        i += 1

                    prevrects = np.array([[x, y, x + w, y + h]
                                          for (x, y, w, h) in prevrects])
                    prevweights = np.array(prevweights)

                prevpick = non_max_suppression(prevrects,
                                               probs=None,
                                               overlapThresh=0.65)

                if len(prevpick) > 0:  #and len(pick) > 0:
                    if verbose:
                        print("seg 1")

                    for (xa, ya, xb, yb) in prevpick:

                        cen = cu.getCentroid([(xa, ya), (xb, yb)])
                        centroids.append(cen)

                    if verbose:
                        print("seg 2")

                    i = -1

                    for c in centroids:

                        i += 1
                        cx = int(c[0])
                        cy = int(c[1])
                        p0 = np.array([[cx, cy]], np.float32)

                        p0x = int(p0[0][0])
                        p0y = int(p0[0][1])
                        p0 = (p0x, p0y)

                        prevbox = [[prevpick[i][0], prevpick[i][1]],
                                   [prevpick[i][2], prevpick[i][3]]]

                        index = -1

                        index = cu.pointInWhichBox(p0, boxes)

                        if index != -1:  #in the person is in any of the segments

                            name = names[index]
                            print(name)
                            print(dwelldatabase)
                            print(
                                self.insertPerson(
                                    prevbox, cen, database, name,
                                    cap.get(cv2.CAP_PROP_POS_MSEC) / 1000,
                                    len(dwelldatabase[name])))  # insert person

                        elif index == -1:

                            status, data = self.removePerson(
                                prevbox, cen, database)  #remove person

                            if status:

                                print(data)

                                segname = data[3][0]

                                if starttime != None and starttime != 0:

                                    print(starttime)

                                    personin = starttime + datetime.timedelta(
                                        seconds=data[4][0])
                                    personin = personin.__str__()

                                    personout = starttime + datetime.timedelta(
                                        seconds=data[5][0])
                                    personout = personout.__str__()

                                else:

                                    personin = str(data[4][0])

                                    personout = str(data[5][0])

                                personincount = data[7][0]
                                personoutcount = data[8][0]

                                partialdata = data[9][0]
                                totalsec = data[6][0]
                                #d=[segname,personin,personout,totalsec,personincount,personoutcount,partialdata]
                                d = {
                                    "segname": segname,
                                    "personin": personin,
                                    "personout": personout,
                                    "totalsec": totalsec,
                                    "personincount": personincount,
                                    "personoutcount": personoutcount,
                                    "partialdata": partialdata
                                }

                                dwelldatabase[segname].append(d)

                        cv2.circle(frame, p0, 5, (0, 0, 255), -1)

                    for (xa, ya, xb, yb) in prevpick:

                        cv2.rectangle(frame, (xa, ya), (xb, yb), (0, 255, 0),
                                      2)
                        cen = cu.getCentroid([(xa, ya), (xb, yb)])
                        cv2.circle(frame, cen, 5, (0, 255, 0), -1)

                for b in boxes:
                    cv2.rectangle(frame, (b[0][0], b[0][1]),
                                  (b[1][0], b[1][1]), (0, 255, 0), 2)

                prevframe = orig.copy()

                prevcentroids = []
                centroids = []
                #print("dwelldatabase 1 : ",dwelldatabase)
                #input()
                print(clearthreadtimer, " / ", cleartime)
                #print("database : ",dwelldatabase)
                #input()
                if (clearthreadtimer >= cleartime):
                    if (len(database) > 0):
                        print(database)
                        #input()
                        clearthread = Thread(
                            target=self.garbageClear,
                            args=(database, 30, dwelldatabase, starttime),
                            name="clear_thread")  #  database clear thread
                        if verbose:
                            print("clear thread starting ")
                        clearthread.start()
                        clearthread.join()
                        if verbose:
                            print("clear thread finished")
                        del (clearthread)
                        clearthread = None
                    clearthreadtimer = 0

                #print("dwelldatabase 2 : ",dwelldatabase)
                #print(type(dwelldatabase))
                #input()
                print(updatethreadtimer, " / ", updatetime)
                if (updatethreadtimer >= updatetime):
                    if len(dwelldatabase) > 0:
                        #updatethread=Thread(target=self.updateInMDB,args=(self.db,"dwell",dwelldatabase,result),name="update_thread")
                        if verbose:
                            print("update thread starting ")
                        res = self.updateInMDB(self.db, "dwell", dwelldatabase)
                        #dwelldatabase.clear()
                        for k in dwelldatabase:
                            dwelldatabase[k] = []
                        #dwelldatabase.pop('_id')

                        #print (res)
                        #dwelldatabase.clear()
                        if verbose:
                            print("update thread finished")
                    updatethreadtimer = 0
                print("dwelldatabase 3 : ", dwelldatabase)
                #input()
                #print(type(dwelldatabase))

                clearthreadtimer += 1  #use this variable as a timer to start the thread for clearing the database
                updatethreadtimer += 1
                if not novideo:
                    cv2.imshow(str(video), frame)
                    k = cv2.waitKey(1) & 0xFF
                    if k == ord("q"):
                        framenumber = cap.get(cv2.CAP_PROP_POS_FRAMES)
                        cv2.destroyAllWindows()
                        cap.release()

                        raise (Exception("Video Finished"))

        except Exception as e:
            print("Fatal Error in dwellTime()")
            print("Error Name : ", e)
            print("Error in Details :")

            if verbose:
                err = sys.exc_info()
                print("Error Type : ", err[0])
                print("file name : ", err[-1].tb_frame.f_code.co_filename)
                print("Line Number : ", err[-1].tb_lineno)

        finally:
            cv2.destroyAllWindows()
            if cap:
                if framenumber <= 0:
                    framenumber = cap.get(cv2.CAP_PROP_POS_FRAMES)
                print("exiting ", framenumber)
                cap.release()
            return dwelldatabase, framenumber, fps
def object_detection():
    """
        Will load the pre-trained weight file and the cfg file which has knowledge of 80 different objects 
        Using the arg_parse function it will compare the confidence and threshold value of every object in a given frame

    """

    cfgfile = "cfg/yolov3.cfg"
    weightsfile = "yolov3.weights"
    args = arg_parse()
    confidence = float(args.confidence)
    nms_thesh = float(args.nms_thresh)
    start = 0
    num_classes = 80

    CUDA = torch.cuda.is_available()

    bbox_attrs = 5 + num_classes

    print("Loading network.....")
    model = Darknet(cfgfile)
    model.load_weights(weightsfile)
    print("Network successfully loaded")

    model.net_info["height"] = args.reso
    inp_dim = int(model.net_info["height"])
    assert inp_dim % 32 == 0
    assert inp_dim > 32

    if CUDA:
        model.cuda()

    #### Test the performance of the model on a Static Image
    # model(get_test_input(inp_dim, CUDA), CUDA)
    # model.eval()
    ####

    #### Test the performance of the model on any video file
    videofile = 'video3.avi'
    ####

    #### If you are using any thrird party camera access using IP address you can use this part of the code
    # address = ConnectionServer.connect()
    # address = 'http://' + address[0] + ':8000/stream.mjpg'
    # print("Fetching Video from", address)
    ####
    cap = cv2.VideoCapture(
        0
    )  #### If you are using your default webcam then use 0 as a source and for usbcam use 1

    assert cap.isOpened(
    ), 'Cannot capture source'  #### If camera is not found assert this message
    count = 0
    frames = 0
    start = time.time()

    while cap.isOpened():
        ret, frame = cap.read()
        if ret:
            img, orig_im, dim = prep_image(
                frame, inp_dim
            )  #### Pre-processing part of every frame that came from the source
            im_dim = torch.FloatTensor(dim).repeat(1, 2)

            if CUDA:  #### If you have a gpu properly installed then it will run on the gpu
                im_dim = im_dim.cuda()
                img = img.cuda()

            with torch.no_grad():  #### Set the model in the evaluation mode
                output = model(Variable(img), CUDA)
            output = write_results(
                output, confidence, num_classes, nms=True,
                nms_conf=nms_thesh)  #### Localize the objects in a frame

            if type(output) == int:
                frames += 1
                print("FPS of the video is {:5.2f}".format(
                    frames / (time.time() - start)))
                cv2.imshow("Object Detection Window", orig_im)
                key = cv2.waitKey(1)
                if key & 0xFF == ord('q'):
                    break
                continue

            #im_dim = im_dim.repeat(output.size(0), 1)
            #scaling_factor = torch.min(inp_dim/im_dim,1)[0].view(-1,1)

            output[:, 1:5] = torch.clamp(output[:, 1:5], 0.0,
                                         float(inp_dim)) / inp_dim
            im_dim = im_dim.repeat(output.size(0), 1)
            output[:, [1, 3]] *= frame.shape[1]
            output[:, [2, 4]] *= frame.shape[0]

            #output[:,1:5] /= scaling_factor

            # for i in range(output.shape[0]):
            #     output[i, [1,3]] = torch.clamp(output[i, [1,3]], 0.0, im_dim[i,0])
            #     output[i, [2,4]] = torch.clamp(output[i, [2,4]], 0.0, im_dim[i,1])

            classes = load_classes('data/coco.names')
            colors = pkl.load(open("pallete", "rb"))

            list(map(lambda x: write(x, orig_im, classes, colors), output))

            cv2.imshow("Object Detection Window",
                       orig_im)  #### Generating the window
            key = cv2.waitKey(1)
            if key & 0xFF == ord('q'):
                break
            frames += 1

            # print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
            l = print_labels()[0]
            print(l)
            hog = cv2.HOGDescriptor()
            hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
            # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            found, w = hog.detectMultiScale(frame,
                                            winStride=(8, 8),
                                            padding=(32, 32),
                                            scale=1.05)
            # time.sleep(2)
            # print(found)
            # print(len(found))
            # draw_detections(frame, found)
            get_number_of_object, get_distance = draw_detections(frame, found)
            if get_number_of_object >= 1 and get_distance != 0:
                feedback = ("{}".format(get_number_of_object) + " " + l +
                            " at {}".format(round(get_distance)) + "Inches")
                speak.Speak(feedback)
                print(feedback)
            else:
                feedback = ("{}".format("1") + " " + l)
                speak.Speak(feedback)
                print(feedback)
    # Stop the capture
    cap.release()
    # Destory the window
    cv2.destroyAllWindows()
コード例 #18
0
import numpy as np
import cv2
import time
import cv2
import imutils
import matplotlib.pyplot as plt
#from laneDetection.py import canny_edge_detector

car_cascade = 'cascades/haarcascade_car.xml'
car_classifier = cv2.CascadeClassifier(car_cascade)

pedestrain = cv2.HOGDescriptor()
pedestrain.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

capture = cv2.VideoCapture('files/test3.mp4')

while capture.isOpened():

    response, frame = capture.read()
    if response:
        frame = imutils.resize(frame, width=min(700, frame.shape[1]))
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        (regions, _) = pedestrain.detectMultiScale(frame,
                                                   winStride=(4, 4),
                                                   padding=(4, 4),
                                                   scale=1.05)
        cars = car_classifier.detectMultiScale(gray, 1.2, 3)

        for (x, y, w, h) in cars:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 3)
            cv2.putText(frame, "Car Detected", (x, y - 5),
コード例 #19
0
def main():
    winSize = (64, 128)
    blockSize = (16, 16)
    blockStride = (8, 8)
    cellSize = (8, 8)
    nbins = 9
    derivAperture = 2
    winSigma = -1.
    histogramNormType = 0
    L2HysThreshold = 0.2
    gammaCorrection = 1
    nlevels = 64
    signedGradients = False
    #Inicializacion del HogDescriptor
    #hog = cv2.HOGDescriptor()
    hog = cv2.HOGDescriptor(winSize, blockSize, blockStride, cellSize, nbins,
                            derivAperture, winSigma, histogramNormType,
                            L2HysThreshold, gammaCorrection, nlevels,
                            signedGradients)
    hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

    #Carga la imagen en escala de grises y la reescala
    image = cv2.imread(settings.img_path)
    imageGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    imageGray = cv2.resize(image,
                           None,
                           fx=settings.resize,
                           fy=settings.resize,
                           interpolation=cv2.INTER_CUBIC)
    height, width, channels = imageGray.shape
    """#Empieza a correr el tiempo para calcular cuanto tarda
    print("Time started ...")
    start = time.time()"""

    #Calcula el Hog y hace la deteccion con SVM devolviendo los bounding boxes de los match
    g = HogDescriptor(imageGray, hog)
    """#Calcula el tiempo transcurrido
    end = time.time()
    print("Time Finished ...")
    parcial = end - start
    print(parcial)"""

    l = g.copy()

    recorte = [width, height, 0, 0]

    for item in l:
        item[0] = item[0] - item[2]
        item[2] = item[2] * 3
        item[1] = item[1] - (0.1 * item[3])
        item[3] = 1.1 * item[3]
        if (item[0] < 0):
            item[0] = 0
        if (item[1] < 0):
            item[1] = 0
        if (item[3] > height - item[1]):
            item[3] = height - item[1] - 1
        if (item[2] > width - item[0]):
            item[2] = width - item[0] - 1
        print(item)
        if (recorte[0] > item[0]):
            recorte[0] = item[0]
        if (recorte[1] > item[1]):
            recorte[1] = item[1]

    for item in l:
        if (recorte[2] < item[2] + item[0] - recorte[0]):
            recorte[2] = item[2] + item[0] - recorte[0]
        if (recorte[3] < item[3] + item[1] - recorte[1]):
            recorte[3] = item[3] + item[1] - recorte[1]

    print(recorte)

    #Dibuja los rectangulos en pantalla de lo que detectó
    for (x, y, w, h) in g:
        cv2.rectangle(
            image, (int(x // settings.resize), int(y // settings.resize)),
            (int((x + w) // settings.resize), int(
                (y + h) // settings.resize)), (0, 255, 0), 2)
    #cv2.rectangle(image, (int(recorte[0]//settings.resize), int(recorte[1]//settings.resize)), (int((recorte[0] + recorte[2])//settings.resize), int((recorte[1] + recorte[3])//settings.resize)), (0, 0, 255), 2)

    cv2.imshow("Detections", image)
    cv2.waitKey(0)
    cv2.destroyWindow("Detections")

    croppedimageGray = imageGray[recorte[1]:recorte[1] + recorte[3],
                                 recorte[0]:recorte[0] + recorte[2]]

    g2 = HogDescriptor(croppedimageGray, hog)

    for (x, y, w, h) in g2:
        cv2.rectangle(image, (int((x + recorte[0]) // settings.resize),
                              int((y + recorte[1]) // settings.resize)),
                      (int(((x + recorte[0]) + w) // settings.resize),
                       int(((y + recorte[1]) + h) // settings.resize)),
                      (0, 0, 255), 2)

    cv2.imshow("Detections", image)
    cv2.waitKey(0)
    cv2.destroyWindow("Detections")
コード例 #20
0
# python 3.7
import cv2
from imutils.object_detection import non_max_suppression
import numpy as np

# Camera capture
commands = 'raspistill -v -o ' + 'capture' + '.jpg'
os.system(commands)
img = cv2.imread("./capture.jpg")
orig = img.copy()

# Detect persons and resize their images
defaultHog = cv2.HOGDescriptor()  # Define HOG target
defaultHog.setSVMDetector(
    cv2.HOGDescriptor_getDefaultPeopleDetector())  # Define SVN classifier
(rects, weights) = defaultHog.detectMultiScale(img,
                                               winStride=(4, 4),
                                               padding=(8, 8),
                                               scale=1.05)
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
for (xA, yA, xB, yB) in pick:
    cv2.rectangle(img, (xA, yA), (xB, yB), (0, 255, 0), 2)
cropImg = img[yA:yB, xA:xB]
cv2.imwrite("./data/reid_robot/000.jpg", cropImg)
print('Image Captured')
コード例 #21
0
cellSize = (8, 8)
nbins = 9
derivAperture = 1
winSigma = -1
histogramNormType = 0
L2HysThreshold = 0.2
gammaCorrection = True
nlevels = 64
signedGradient = False

# OpenCV's HOG based Pedestrian Detector
hogDefault = cv2.HOGDescriptor(winSize, blockSize, blockStride, cellSize,
                               nbins, derivAperture, winSigma,
                               histogramNormType, L2HysThreshold,
                               gammaCorrection, nlevels, signedGradient)
svmDetectorDefault = cv2.HOGDescriptor_getDefaultPeopleDetector()
hogDefault.setSVMDetector(svmDetectorDefault)

# read images from pedestrians directory
imagePath = 'images/race.jpg'

# We will run pedestrian detector at an fixed height image
finalHeight = 800.0

# read image
im = cv2.imread(imagePath, cv2.IMREAD_COLOR)

# resize image to height finalHeight
scale = finalHeight / im.shape[0]
im = cv2.resize(im, None, fx=scale, fy=scale)
def rearCarandPeopleDetector(gray, frame):
    #SETUP OBJECT DETECTORS: initialize detectors for pedestrians and cars
    #-----------------------------------------------------------------------
    database = 'C:\\Users\\estod_000\\Box\\EcoCAR 3\\Electrical\\ADAS\\Current Projects\\Object Detection\\Object Detectors'
    pedDet = cv2.HOGDescriptor()
    pedDet.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
    rearDet = cv2.CascadeClassifier('rearVehicleDetectorHaar_700sample.xml')
    carDet = cv2.CascadeClassifier('cars.xml')

    #VARIABLES AND OBJECTS
    #-----------------------
    i = 1
    validCarsFound = 0  #TODO limit Cars found to just 5? or allow as many as possible
    AreafilteredCars = []

    #1. REGION OF INTEREST(ROI): create ROI for middle row of screen
    #----------------------------------------------------------------
    height, width = gray.shape
    ROI2end = (int)(5 * height / 6)
    ROIstart = (0.5225 * ROI2end)
    ROI1end = (int)(0.7 * ROI2end)
    gray2 = gray[(int)(ROIstart):(int)(ROI2end), 0:(int)(width)]
    gray1 = gray[(int)(ROIstart):(int)(ROI1end), 0:(int)(width)]
    cv2.rectangle(
        frame, (0, (int)(ROIstart)), ((int)(width), (int)(ROI2end)),
        (255, 255, 0),
        2)  # can return "frame" depending on version. Used to view ROI
    cv2.rectangle(
        frame, (0, (int)(ROIstart)), ((int)(width), (int)(ROI1end)),
        (200, 200, 0),
        2)  # can return "frame" depending on version. Used to view ROI

    #2. CAR OBJECT DETECTION:  detect cars
    #-----------------------------------
    rearOnline = carDet.detectMultiScale(gray1, 1.1, 2)
    rearFound = rearDet.detectMultiScale(gray2,
                                         scaleFactor=1.05,
                                         minNeighbors=8,
                                         minSize=(1, 1),
                                         flags=cv2.CASCADE_SCALE_IMAGE)

    #3. PEDESTRIAN DETECTION: detect pedestrians
    #----------------------------------------111
    pedFound, pedW = pedDet.detectMultiScale(gray,
                                             winStride=(8, 8),
                                             padding=(32, 32),
                                             scale=1.05)
    for (pedX, pedY, pedW, pedH) in pedFound:
        pedY += (int)(ROIstart)  #adjust ROI offset for displaying on "frame"
        if (pedY <= 266):
            cv2.rectangle(frame, (pedX, pedY), (pedX + pedW, pedY + pedH),
                          (255, 255, 255), 2)

    #4. AREA FILTER: filter cars based on the size of the object and its location on the screen
    #------------------------------------------------------------------------------------------
    for (rx, ry, rw, rh) in rearOnline:
        ry += (int)((ROIstart) + 1 / 3.0 * rh)
        rx += (int)(1 / 4.0 * rw)
        rw = (int)(rw / 2.0)
        rh = (int)(rh / 3.0)
        area = rw * rh
        #print "area: " +str(area) + "  with y: " + str(ry) #TODO
        far = (ry <= 240) and area <= 31000
        farther = (ry >= 241 and ry <= 258)
        close = (ry >= 259 and ry <= 266) and area > 2800 and area <= 1000
        if not (farther or far or close):
            #    AreafilteredCars.append([(int)(rx), (int)(ry), (int)(rw), (int)(rh)])
            fileToSave = frame[(int)(ry):(int)(ry + rh),
                               (int)(rx):(int)(rx + rw)]
            #    name = "images/isCar/image_" + (str)(time.time()) + ".jpeg"
            #    cv2.imwrite(name, fileToSave)
            #cv2.rectangle(frame, (rx, ry), ((rx+rw), (ry+rh)), (0, 255, 255), 2)  # can return "frame" depending on version
            fileToSave = frame[(int)(ry):(int)(ry + rh),
                               (int)(rx):(int)(rx + rw)]
            name = "images/trash-isNotCar/image_" + (str)(
                time.time()) + ".jpeg"
            cv2.imwrite(name, fileToSave)

    for (rearX, rearY, rearW, rearH) in rearFound:
        rearY += (int)(ROIstart)  #adjust ROI offset for displaying on "frame"
        area = rearW * rearH
        close = (rearY >= 210
                 and rearY <= 226) and area > 2000 and area <= 12000
        far = (rearY >= 227 and rearY <= 260) and area > 2000 and area <= 31000
        farther = (rearY >= 261
                   and rearY <= 266) and area > 2800 and area <= 10000
        if not (farther or far or close):
            #AreafilteredCars.append([rearX,rearY,rearW,rearH])
            #fileToSave  = frame[(int)(rearY):(int)(rearY+rearH), (int)(rearX):(int)(rearX+rearW)]
            #name = "images/isCar/image_" + (str)(time.time()) + ".jpeg"
            #cv2.imwrite(name, fileToSave)
            fileToSave = frame[(int)(rearY):(int)(rearY + rearH),
                               (int)(rearX):(int)(rearX + rearW)]
            name = "images/trash-isNotCar/image_" + (str)(
                time.time()) + ".jpeg"
            cv2.imwrite(name, fileToSave)

    #5. OVERLAP FILTER: filter objects that overlap. only display the first object of overlapping objects.
    #--------------------------------------------------------------------------------------------------
    #frame = overlapFilter(AreafilteredCars, frame, 0, 255, 255)

    #6: RETURN: return frame with bounding boxes
    #----------------------------------------
    return frame
コード例 #23
0
    def TrackPeoples(self,parameters):
        
        
       # segregate different parameters from list
        videostream = parameters[0]
        width = parameters[1]
        if not parameters[2]:
            islivestream=False
        else:
            livestream = parameters[2]
        if not parameters[3]:
            isnovideo=False
        else:
            isnovideo = parameters[3]
        if not parameters[4]:
            starttime=None
        else:
            starttime = parameters[4]
        if not parameters[5]:
            frameskip=0
        else:
            frameskip = parameters[5]
        if not parameters[6]:
            flushtime=1000
        else:
            flushtime=parameters[6]
        if not parameters[7]:
            output=None
        else:
            output = parameters[7]
        if not parameters[8]:
            debug=False
        else:
            debug=parameters[8]
        
        
        video=videostream
        livestream=islivestream

        if livestream:
            try :
                video = int(video)
                if video < 0:
                    video = 0
            except:
                pass
        
        cleartime=int(flushtime)
        
        if cleartime <= 0 :
            cleartime = 1000
            
        verbose=debug
        novideo=isnovideo
        
        totalframes=0
        fps=0
        videolength=0
        currentframetime=0
        remainingtime=0


        first = True
        cap=None

        centroids=[]
        pick=[]

        database=[]
        finaldatabase=[]
        fulldatabase=[]

        locked=[False]
        updatethreadtimer = 0
        
        
        #updatethread=Thread(target=updateData,args=(database,db,locked),name="update_thread")  # thread for updating the data
        storethread=Thread(target=self.storeData,args=(database,finaldatabase,30,locked),name="store_thread")  # thread for storing the data
        errorstatus=0

        

        
        

        try:


            hog=cv2.HOGDescriptor()
            hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

            cap=cv2.VideoCapture(video)

            if not cap.isOpened():
                cap.open()
            if not cap.isOpened():
                raise(Exception("can not open the video stream"))



            #if first==True:
            #    
            #    r,prevframe=cap.read()
            #    if not r:

            #        print("video finished ")
            #        raise(Exception("Video Finished"))

            #    first=False

            
            while(cap.isOpened()):

                (ret,frame)=cap.read()
                print("read frame status : ",ret)
                framenumber=cap.get(cv2.CAP_PROP_POS_FRAMES)

                if not ret:
                    
                    framenumber=cap.get(cv2.CAP_PROP_POS_FRAMES)
                    print ("video finished")
                    cv2.destroyAllWindows()
                    cap.release()

                    raise(Exception("Video Finished"))

                currentframetime=cap.get(cv2.CAP_PROP_POS_MSEC)
                remainingtime=videolength-currentframetime

                frame=imutils.resize(frame,width=min(width,frame.shape[1])) #resize the frame
                if first==True:

                    fwidth=frame.shape[1]
                    fheight=frame.shape[0]
                    first=False
                


                (rects,weights)=hog.detectMultiScale(frame,winStride=(4,4),padding=(4,4),scale=1.05)  #detect human in previous
                print("rects : ",rects)
                print("weights : ",weights)
                
                i=0
                if len(rects)>0 and len(weights)>0:

                    weights=weights.tolist()
                    rects=rects.tolist()

                    while(i<len(weights)):
                        
                        if weights[i][0]<float(1):
                            
                            weights.pop(i)
                            rects.pop(i)
                            i-=1
                        i+=1

                    rects=np.array([[x,y,x+w,y+h] for (x,y,w,h) in rects])
                    weights=np.array(weights)


                i=0

                pick=non_max_suppression(rects,probs=None,overlapThresh=0.65)

                if len(pick) > 0 :

                    for (xa,ya,xb,yb) in pick :

                        cen=cu.getCentroid([(xa,ya),(xb,yb)])
                        centroids.append(cen)

                print("pick : ",pick)

                if len(centroids)>0:

                    i=-1

                    for c in centroids:

                        i+=1

                        cx=int(c[0])
                        cy=int(c[1])

                        p0=np.array([[cx,cy]],np.float32)
                        print("len pick : ",len(pick))
                        print("i : ",i)
                        print("pick [i] : ",pick[i])
                        crntbox=[[pick[i][0],pick[i][1]],[pick[i][2],pick[i][3]]]
                        print("crnt box : ",crntbox)

                        insertstatus=self.insertPerson(crntbox,c,database)
                        print("insert status : ",insertstatus)


                centroids=[]
                pick=[]


                if not locked[0]:

                    print(" updatethreadtimer : ",updatethreadtimer)
                    print(" cleartime : ",cleartime)

                    if updatethreadtimer>=cleartime :

                        locked=[True]
                        updatethreadtimer=0

                        storethread=Thread(target=self.storeData,args=(database,finaldatabase,30,locked),name="store_thread")  # thread for storing the data
                        storethread.start()
                        storethread.join()
                        if len(finaldatabase)>0:
                            if(self.ismongodb):
                                self.updateData(self.db,"track",finaldatabase)
                            fulldatabase.extend(finaldatabase)
                            finaldatabase.clear()
                        print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
                        #input()

                    else:
                        
                        updatethreadtimer += 1
                        

                    
                    
                

        except Exception as e:

            errorstatus=1
            
            print()
            print("Fatal Error in CountPeople()")
            print()
            print("Error Name : ",e)
            print()
            print("Error in Details :")
            print()

            err=sys.exc_info()
            print("Error Type : ",err[0])
            print("file name : ",err[-1].tb_frame.f_code.co_filename)
            print("Line Number : ",err[-1].tb_lineno)

        finally:

            return (errorstatus,fulldatabase,(fwidth,fheight),video)
コード例 #24
0
    def get_frame(self, countif):
        global previous_frame
        global min_area
        global center
        global count
        global boxcolor
        global hog
        global frame_processed
        global timeout
        global font
        global center_fix
        global flag_track2
        global prev_x_pixel
        global prev_y_pixel
        global counttrack2
        global tetaperpixel
        global nucleo
        global distance
        global distance2
        global teta
        global i
        global b
        global flag
        global pick2
        global tracker
        global prev_distance2

        def detect_people(frame, center, frame_out, bboxcolor=(0, 255, 0)):
            """
			detect humans using HOG descriptor
			Args:
				frame:
			Returns:
				processed frame, center of every bb box
			"""
            centerxd = []
            (rects, weights) = hog.detectMultiScale(frame,
                                                    winStride=(8, 8),
                                                    padding=(16, 16),
                                                    scale=1.06)
            rects = non_max_suppression(rects, probs=None, overlapThresh=0.65)
            for (x, y, w, h) in rects:
                cv2.rectangle(frame_out, (x, y), (x + w, y + h), (0, 0, 255),
                              2)

            # apply non-maxima suppression to the bounding boxes using a
            # fairly large overlap threshold to try to maintain overlapping
            # boxes that are still people
            rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
            pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)

            idx = 0
            # draw the final bounding boxes
            for (xA, yA, xB, yB) in pick:
                cv2.rectangle(frame_out, (xA, yA), (xB, yB), bboxcolor, 2)
                cv2.putText(frame_out, 'Person ' + str(idx), (xA, yA - 10), 0,
                            0.3, bboxcolor)
                idx = idx + 1
                # calculate the center of the object
                centerxd.append([(xA + xB) / 2, (yA + yB) / 2])

            return (frame, centerxd, pick)

        def background_subtraction(previous_frame, frame_resized_grayscale,
                                   min_area):
            """
			This function returns 1 for the frames in which the area 
			after subtraction with previous frame is greater than minimum area
			defined. 
			Thus expensive computation of human detection face detection 
			and face recognition is not done on all the frames.
			Only the frames undergoing significant amount of change (which is controlled min_area)
			are processed for detection and recognition.
			"""
            frameDelta = cv2.absdiff(previous_frame, frame_resized_grayscale)
            thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
            thresh = cv2.dilate(thresh, None, iterations=2)
            im2, cnts, hierarchy = cv2.findContours(thresh.copy(),
                                                    cv2.RETR_EXTERNAL,
                                                    cv2.CHAIN_APPROX_SIMPLE)
            temp = 0
            for c in cnts:
                # if the contour is too small, ignore it
                if cv2.contourArea(c) > min_area:
                    temp = 1
            return temp

        def encodex(x_pixel):
            if (x_pixel < 130):
                x_norm = 1
            elif (x_pixel > 230):
                x_norm = 2
            else:
                x_norm = 0

            return x_norm

        if (countif < 1):
            # setup the serial port
            nucleo = serial.Serial()
            nucleo.port = '/dev/ttyACM0'
            nucleo.baud = 115200
            nucleo.close()
            nucleo.open()
            nucleo.flush()
            time.sleep(2)
            print("connected to: " + nucleo.portstr)
            print("Running...")
            subject_label = 1
            font = cv2.FONT_HERSHEY_SIMPLEX
            tracker = KCF.kcftracker(
                True, False, True, False)  # hog, fixed_window, multiscale, lab
            # initialize the HOG descriptor/person detector
            hog = cv2.HOGDescriptor()
            hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
            time.sleep(1)

            frame = np.zeros((480, 640, 3), np.uint8)
            flag_track2 = 0
            count = 0
            counttrack2 = 0
            prev_y_pixel = 0
            prev_x_pixel = 0
            tetaperpixel = 0.994837 / 400.0
            prev_distance2 = 0
            # grab one frame at first to compare for background substraction
            frame, timestamp = freenect.sync_get_video()
            #time.sleep(5)
            frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
            frame_resized = imutils.resize(frame,
                                           width=min(400, frame.shape[1]))
            frame_resized_grayscale = cv2.cvtColor(frame_resized,
                                                   cv2.COLOR_BGR2GRAY)

            # initialize centroid
            center = [[frame_resized.shape[1] / 2, frame_resized.shape[0] / 2]]
            center_fix = []
            # defining min cuoff area
            #min_area=(480/400)*frame_resized.shape[1]
            min_area = (0.01) * frame_resized.shape[1]
            boxcolor = (0, 255, 0)
            timeout = 0
            #variable for counting time elapsed
            temp = 1

            previous_frame = frame_resized_grayscale
            # retrieve new RGB frame image
            # Frame generation for Browser streaming with Flask...
            self.outframe = open("stream.jpg", 'wb+')
            cv2.imwrite("stream.jpg", frame)  # Save image...

            return self.outframe.read()
        else:
            # start timer
            timer = cv2.getTickCount()
            starttime = time.time()
            frame, timestamp = freenect.sync_get_video()
            frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
            frame_resized = imutils.resize(frame,
                                           width=min(400, frame.shape[1]))
            frame_resized_grayscale = cv2.cvtColor(frame_resized,
                                                   cv2.COLOR_BGR2GRAY)
            temp = background_subtraction(previous_frame,
                                          frame_resized_grayscale, min_area)

            # retrieve depth map
            depth, timestamp = freenect.sync_get_depth()
            depth = imutils.resize(depth, width=min(400, depth.shape[1]))
            depth2 = np.copy(depth)
            # orig = image.copy()
            if temp == 1:
                if (flag_track2 == 0):
                    frame_processed, center_fix, pick2 = detect_people(
                        frame_resized_grayscale, center, frame_resized,
                        boxcolor)
                    if (len(center_fix) > 0):
                        i = 0
                        for b in center_fix:

                            #print(b)
                            #print("Point "+str(i)+": "+str(b[0])+" "+str(b[1]))

                            x_pixel = b[1]
                            y_pixel = b[0]
                            rawDisparity = depth[(int)(x_pixel),
                                                 (int)(y_pixel)]
                            print("raw:" + str(rawDisparity))
                            distance = 1 / (-0.00307 * rawDisparity + 3.33)
                            if (distance < 0):
                                distance = 0.5
                            print("Distance : " + str(distance))
                            cv2.putText(frame_resized,
                                        "distance: {:.2f}".format(distance),
                                        (10, (frame_resized.shape[0] -
                                              (i + 1) * 25) - 50), font, 0.65,
                                        (0, 0, 255), 3)
                            cv2.putText(
                                frame_resized, "Point " + str(i) + ": " +
                                str(b[0]) + " " + str(b[1]),
                                (10, frame_resized.shape[0] - (i + 1) * 25),
                                font, 0.65, (0, 0, 255), 3)
                            i = i + 1
                        y_pix, x_pix = center_fix[0]

                        endtime = time.time()
                        #nucleo.write(("8,"+str(x_person)+","+str(y_person)).encode()) # send x_person and y_person
                        if ((abs(prev_x_pixel - x_pix)) < 50
                                and (abs(prev_y_pixel - y_pix)) < 50):
                            timeout = timeout + (endtime - starttime)
                            if (timeout > 5):
                                flag_track2 = 1
                                boxcolor = (255, 0, 0)
                        else:
                            nucleo.flush()
                            nucleo.write("8,,,,,,,,,,,,".encode())
                            timeout = 0
                            boxcolor = (0, 255, 0)

                        prev_y_pixel, prev_x_pixel = y_pix, x_pix
                        # DEBUGGING #
                        #print("Teta: " + str(teta) + "Distance: " + str(distance))
                        print("Timeout: " + str(timeout))
                        #print ("Distance : " + str(distance))
                    elif (len(center_fix) <= 0):
                        timeout = 0
                        boxcolor = (0, 255, 0)
                        nucleo.flush()
                        nucleo.write("8,,,,,,,,,,,,".encode())

                elif (flag_track2 == 1):
                    if (counttrack2 == 0):
                        iA, iB, iC, iD = pick2[0]

                        tracker.init([iA, iB, iC - iA, iD - iB], frame_resized)
                        counttrack2 = counttrack2 + 1
                    elif (counttrack2 == 1):
                        print(pick2[0])
                        #print("iA:"+str(iA)+"iB:"+str(iB)+"iC:"+str(iC)+"iD:"+str(iD))
                        boundingbox = tracker.update(
                            frame_resized)  #frame had better be contiguous
                        boundingbox = list(map(int, boundingbox))
                        cv2.rectangle(frame_resized,
                                      (boundingbox[0], boundingbox[1]),
                                      (boundingbox[0] + boundingbox[2],
                                       boundingbox[1] + boundingbox[3]),
                                      (255, 0, 0), 3)
                        #GENERAL ASSUMPTION SINGLE PERSON TRACKING
                        # start tracking...

                        x_track = ((boundingbox[2]) / 2.0) + boundingbox[0]
                        y_track = ((boundingbox[3]) / 2.0) + boundingbox[1]
                        print("x:" + str(x_track) + "y:" + str(y_track))
                        x_center = (frame_resized.shape[1] + 1) / 2
                        y_center = (frame_resized.shape[0] + 1) / 2
                        print(x_center, y_center)
                        # compute teta asumsi distance lurus

                        rawDisparity2 = depth2[(int)(y_track), (int)(x_track)]
                        print("raw2:" + str(rawDisparity2))
                        distance2 = 1 / (-0.00307 * rawDisparity2 + 3.33)
                        if (distance2 < 0):
                            distance2 = prev_distance2
                        prev_distance2 = distance2

                        #realx = (x_track-x_center)+(distance/30.0)
                        #teta = math.atan(realx/distance) # if distance is tangensial
                        #teta = math.asin((0.026458333*(x_track-x_center)/distance)) # if distance is euclidean
                        teta = (x_track - x_center) * tetaperpixel
                        print("teta2: " + str(teta))
                        print("Distance2 : " + str(distance2))
                        cv2.putText(frame_resized,
                                    "distance: {:.2f}".format(distance2),
                                    (10, (frame_resized.shape[0] -
                                          (i + 1) * 25) - 50), font, 0.65,
                                    (0, 0, 255), 3)
                        cv2.putText(
                            frame_resized, "Point " + str(0) + ": " +
                            str(x_track) + " " + str(y_track),
                            (10, frame_resized.shape[0] - (i + 1) * 25), font,
                            0.65, (0, 0, 255), 3)
                        # send the teta and distance
                        nucleo.flush()
                        if (teta < 0.0):
                            flag = nucleo.write(
                                ("7," + format(teta, '1.2f') + "," +
                                 format(distance2, '1.3f')).encode())
                        elif (teta > 0.0):
                            flag = nucleo.write(
                                ("7," + format(teta, '1.3f') + "," +
                                 format(distance2, '1.3f')).encode())
                        print("WRITEIN1" + str(flag))

                        if (tracker.getpeakvalue() < 0.4):
                            counttrack2 = 0
                            flag_track2 = 0
                            nucleo.flush()
                            nucleo.write("8,,,,,,,,,,,,".encode())
                            print("WRITEOUT")

                #frame_resized = cv2.flip(frame_resized, 0)
                #cv2.imshow("Detected Human", frame_resized)
                #cv2.imshow("Depth", depth)
                # cv2.imshow("Original", frame)
            else:
                count = count + 1
                #print("Number of frame skipped in the video= " + str(count))

            # compute the fps
            fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

            #print("FPS: " + str(fps))

            # Frame generation for Browser streaming with Flask...
            self.outframe = open("stream.jpg", 'wb+')
            cv2.imwrite("stream.jpg", frame_resized)  # Save image...

            return self.outframe.read()
コード例 #25
0
 def init_detector(self):
     self.hog = cv2.HOGDescriptor()
     self.hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
コード例 #26
0
def track(x, y, w, h, f):
    trackerVar = 0
    varia = str(trackerVar)
    #creates tracker
    varia = cv2.Tracker_create("KCF")
    x = x
    y = y
    #creates bounding box
    xC = (int)(x - 1.5 * w)
    yC = (int)(y - 0.75 * w)
    height = h * 5
    width = (int)(w * 3.5)
    c, r, w, h = xC, yC, width, height
    bbox = (c, r, w, h)
    #intialized tracker with the created bounding box
    ok = varia.init(f, bbox)
    ratioX, ratioY = initDist(f, width, height)
    num = 0  #once we get to looking for distance every 5 frames
    var = 0
    turn = 0
    forward = 0
    nonLocate = 0
    while True:
        msg = rospy.wait_for_message("/ardrone/image_raw", imageX)
        frame = ros_numpy.numpify(msg)
        ok = True
        #updates position of bounding box using the KCF tracker
        ok, bbox = varia.update(frame)
        if ok:
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            #draws bounding  box on the current frame
            cv2.rectangle(frame, p1, p2, (0, 0, 255))
        if (num == 3):
            #check initDistance for descr of pedestrian detector
            position = 0
            hog = cv2.HOGDescriptor()
            hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
            image = imutils.resize(frame, width=min(400, frame.shape[1]))
            orig = image.copy()
            (rects, weights) = hog.detectMultiScale(image,
                                                    winStride=(4, 4),
                                                    padding=(8, 8),
                                                    scale=1.05)
            for (x, y, w, h) in rects:
                cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)
            rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
            pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
            par = 0
            for (xA, yA, xB, yB) in pick:
                par += 1
                cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)
            if (par > 0):
                #checks if bounding box is close enough using pedestrian detector
                if (abs(((xB + xA) / 2) - (p1[0] + p2[0]) / 2) > 120
                        or abs(((yB + yA) / 2) - ((p1[1] + p2[1]) / 2)) > 40):
                    cv2.circle(image, (((xB + xA) / 2), ((yB + yA) / 2)), 2,
                               (0, 0, 255), 3)
                    cv2.circle(image, (p1[0], p1[1]), 2, (0, 255, 0), 3)
                    print "fixing bbox"
                    xC = xB
                    yC = yA
                    #height=h*5
                    #width= (int) (w*3.5)
                    #creates new bounding box and creates/initializes a new tracker
                    c, r, w, h = xC + width / 2, yC + height / 2, width, height
                    bbox = (c, r, w, h)
                    trackerVar += 1
                    varia = str(trackerVar)
                    varia = cv2.Tracker_create("KCF")
                    ok = varia.init(frame, bbox)
#sets whether it should be moving forward or backward or neither
                if ((float)(yB - yA) / ratioY > 1.3):
                    position = 2
                    print(float)(yB - yA) / ratioY
                elif ((float)(yB - yA) / ratioY < 0.95):
                    position = 1
                else:
                    position = 0
            else:
                print "couldnt find person"
                #logic for setting commands for drone- could be accomplished much easier with global variables
                # but not sure if possible with current framework
                if (turn == 0):
                    SendCommand(0, 0, 0, 0)
                    rospy.sleep(0.3)
                    forward = 0
                elif (turn == 1):
                    SendCommand(0, 0, 0.3, 0)
                    forward = 0
                elif (turn == -1):
                    SendCommand(0, 0, -0.3, 0)
                    forward = 0
                if (nonLocate > 5):
                    SendCommand(0, 0, 0, 0)
                    nonLocate = 0
                else:
                    nonLocate += 1
            if (position == 1):
                if (turn == 0):
                    SendCommand(0, 0.1, 0, 0)
                    forward = 1
                elif (turn == 1):
                    SendCommand(0, 0.1, 0.3, 0)
                    forward = 1
                elif (turn == -1):
                    SendCommand(0, 0.1, -0.3, 0)
                    forward = 1
                print "forward"
                #rospy.sleep(1)
            elif (position == 2):
                if (turn == 0):
                    SendCommand(0, -0.1, 0, 0)
                    forward = 2
                elif (turn == 1):
                    SendCommand(0, -0.1, 0.3, 0)
                    forward = 2
                elif (turn == -1):
                    SendCommand(0, -0.1, -0.3, 0)
                    forward = 2
                print "backward"
            else:
                print "nuetral"
                if (turn == 0):
                    SendCommand(0, 0, 0, 0)
                    forward = 0
                elif (turn == 1):
                    SendCommand(0, 0, 0.3, 0)
                    forward = 0
                elif (turn == -1):
                    SendCommand(0, 0, -0.3, 0)
                    forward = 0
            num = 0
        else:
            num += 1

#displays current frame with bounding box drawn on
        cv2.imshow("Tracking", frame)
        #finds angle of center of bbox
        angle = findAngle((p1[0] + p2[0]) / 2)
        #checks angle and logic for setting drone commands
        if (angle <= 5 and angle >= -5 and var > 0):
            if (forward == 0):
                SendCommand(0, 0, 0, 0)
            elif (forward == 1):
                SendCommand(0, 0.1, 0, 0)
            elif (forward == -1):
                SendCommand(0, -0.1, 0, 0)
            turn = 0
            var = 0
        elif (angle > 5 and var == 0):
            if (forward == 0):
                SendCommand(0, 0, -0.3, 0)
            elif (forward == 1):
                SendCommand(0, 0.1, -0.3, 0)
            elif (forward == -1):
                SendCommand(0, -0.1, -0.3, 0)

            turn = -1
            var += 1
        elif (angle < -5 and var == 0):
            if (forward == 0):
                SendCommand(0, 0, 0.3, 0)
            elif (forward == 1):
                SendCommand(0, 0.1, 0.3, 0)
            elif (forward == -1):
                SendCommand(0, -0.1, 0.3, 0)
            turn = 1
            var += 1
        #update angle
        k = cv2.waitKey(1) & 0xff
        if k == 27: break
コード例 #27
0
def mainfunc():
    import numpy as np
    import cv2
    import nms
    import os
    import vehicle_tracking
    import datetime, time
    # import test_database
    # import prediction_regression
    # import sklearn_test
    # import PyQt4

    update = False


    xc=10
    yc=20
    line1 = [(10,380),(180,20)]
    line2 = [(465,20),(750,295)]
    drawLines = False

    cars_count=0
    last_cars_count=0

    car_cascade = cv2.CascadeClassifier('haarcascade_nepalese_vehicles.xml')
    cap = cv2.VideoCapture('cctv1.mp4')
    ret, img = cap.read()
    scaling_factor = 0.4
    height=int(np.size(img,0) * scaling_factor)
    width=int(np.size(img,1) * scaling_factor)
    detection_region = [(0, int(0.3*height)), (width, int(0.95 * height))]
    carspresent = []


    bikes_count=0
    last_bikes_count=0
    bikespresent = []
    hog = cv2.HOGDescriptor()
    hog.setSVMDetector( cv2.HOGDescriptor_getDefaultPeopleDetector() )


    def draw_detections(img, rects, thickness = 1):
        for x, y, w, h in rects:
            # the HOG detector returns slightly larger rectangles than the real objects.
            # so we slightly shrink the rectangles to get a nicer output.
            # pad_w, pad_h = int(0.15*w), int(0.05*h)
            # cv2.rectangle(img, (x+pad_w, y+pad_h), (x+w-pad_w, y+h-pad_h), (0, 255, 0), thickness)
            pad_w, pad_h = int(0.25*w), int(0.25*h)
            cv2.rectangle(img, (int(x+1.5*pad_w), int(y+1.5*pad_h)), (x+w-pad_w, y+h-pad_h), (255, 0, 0), thickness)




    def compareObjects(cars, carspresent, cars_count, CENTROID_DIFFERENCE_THRESH, THRESHOLD_OF_HISTOGRAM):

        # if there are no cars present in the frame, then there is no point in executing any of these statements
        if len(cars) != 0:
            # roi_cars is the array of regions of interest of all the cars present in the current frame
            roi_cars = []
            roi_carspresent = []

            for (x, y, w, h) in cars:
                roi_cars.append(img[y:y + h, x:x + w])

            if len(carspresent) == 0:
                carspresent = cars
                roi_carspresent = roi_cars
            else:
                for (x, y, w, h) in carspresent:
                    roi_carspresent.append(img[y:y + h, x:x + w])

            # declaration of some temporary variables to make it easier for executing the for loops
            carspresent_temp = carspresent
            cars_temp = cars
            roi_carspresent_temp = roi_carspresent
            roi_cars_temp = roi_cars


            # for every car 'A' present in the 'carspresent' array,ie of previous frame,
            # 'A' is compared with every other car,'B', in the 'cars' array,ie  of current frame.
            # If there is some degree of similarity between A and B, car A is replaced by b,
            # If there is no similarity at all, B must be a new car, so it is added to the array carspresent
            # and count is incremeted .
            for i in range(0, len(cars_temp)):
                newcar = cars_temp[i]
                roi_newcar = roi_cars_temp[i]
                update = False
                for j in range(0, len(carspresent_temp)):
                    roi_presentcar = roi_carspresent_temp[j]
                    presentcar = carspresent_temp[j]
                    if vehicle_tracking.isCentroidNear(newcar,presentcar, CENTROID_DIFFERENCE_THRESH = CENTROID_DIFFERENCE_THRESH):
                         # and vehicle_tracking.compareHist(roi_newcar, roi_presentcar, THRESHOLD_OF_HISTOGRAM = THRESHOLD_OF_HISTOGRAM):
                        update = True
                        carspresent[j] = cars[i]
                        roi_carspresent[j] = roi_cars[i]
                if update == False:
                    carspresent = np.vstack((carspresent, cars[i]))
                    cars_count = cars_count+1
                    roi_carspresent.append(roi_cars[i])

        return carspresent, cars_count




    while 1:
        date = time.localtime()
        ret, img = cap.read()
        img = cv2.resize(img, None, fx=scaling_factor, fy=scaling_factor, interpolation=cv2.INTER_AREA)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        #***********************************************************************************************************************************************************
        # cars_without_nms may contain overlaping bounding boxes,
        # the non maximum supression removes all the overlapping bounding boxes,
        # overlapThresh is the threshold, for more info visit pyimagesearch.com
        cars_without_nms = car_cascade.detectMultiScale(gray, 6, 5)
        cars = nms.non_max_suppression_fast(cars_without_nms, overlapThresh=0.1)

        #---------------------------------------------------------------------------------------------------------------------
        # removal of false positives and filtering out those cars outside of the detection region
        cars_inside_box = []
        if len(cars)!=0:
            # cars, which is a numpy array is changed to a list before removing false positives,
            # after removing the false positives, the result is changed back to numpy array
            cars = vehicle_tracking.removeFalsePositives(cars.tolist(),LOWER_LIMIT=41, UPPER_LIMIT= 200)
            cars = np.array(cars)


            i = 0
            for (x, y, w, h) in cars:
                if y > detection_region[0][1] and y + h < detection_region[1][1]:
                    cars_inside_box.append(cars[i].tolist())
                i = i + 1
        cars_inside_box = np.array(cars_inside_box)

        #---------------------------------------------------------------------------------------------------------------------


        # the function compareObject compares all the objects detected in this frame with cars present from the previous frame
        # if new objects have arrived in the frame, it increments the count by the no of new objects that just arrived in the frame.
        carspresent, cars_count = compareObjects(cars_inside_box,carspresent,cars_count, CENTROID_DIFFERENCE_THRESH=50, THRESHOLD_OF_HISTOGRAM=0.6)
        carspresent = nms.non_max_suppression_fast(carspresent, 0.9999999)
        # emptyVehiclesOutOfWindow function removes the bounding boxes
        # of those vehicles that have possibly gone out of the window(detection region)
        carspresent = vehicle_tracking.emptyVehiclesOutOfWindow(carspresent, cars_count, last_cars_count)
        last_cars_count = cars_count


        for (x, y, w, h) in cars_inside_box:
            cv2.rectangle(gray, (x, y), (x + w, y + h), (255, 0, 0), 1)

        cv2.putText(gray, 'Four Wheelers= ' + str(cars_count), (0, height), cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 255), thickness=2)






        #***********************************************************************************************************************************************************

        bikes_without_nms, w = hog.detectMultiScale(gray, winStride=(8, 8), padding=(16, 16), scale=1.25)

        # bikes_without_nms may contain overlaping bounding boxes,
        # the non maximum supression removes all the overlapping bounding boxes,
        # overlapThresh is the threshold, for more info visit pyimagesearch.com
        bikes = nms.non_max_suppression_fast(bikes_without_nms, overlapThresh=0.8)

        # ---------------------------------------------------------------------------------------------------------------------
        # removal of false positives and filtering out those bikes outside of the detection region
        bikes_inside_box = []
        if len(bikes) != 0:
            # bikes, which is a numpy array is changed to a list before removing false positives,
            # after removing the false positives, the result is changed back to numpy array
            print "bikes before: ", bikes
            bikes = vehicle_tracking.removeFalsePositives(bikes.tolist(), UPPER_LIMIT=400)
            bikes = np.array(bikes)
            print "bikes after: ", bikes

            i = 0
            for (x, y, w, h) in bikes:
                if y > detection_region[0][1] and y + h < detection_region[1][1]:
                    bikes_inside_box.append(bikes[i].tolist())
                i = i + 1
        bikes_inside_box = np.array(bikes_inside_box)

        # ---------------------------------------------------------------------------------------------------------------------


        # the function compareObject compares all the objects detected in this frame with bikes present from the previous frame
        # if new objects have arrived in the frame, it increments the count by the no of new objects that just arrived in the frame.
        bikespresent, bikes_count = compareObjects(bikes_inside_box, bikespresent, bikes_count,
                                                   CENTROID_DIFFERENCE_THRESH=45, THRESHOLD_OF_HISTOGRAM=0.5)
        bikespresent = nms.non_max_suppression_fast(bikespresent, 0.9999999)
        # emptyVehiclesOutOfWindow function removes the bounding boxes
        # of those vehicles that have possibly gone out of the window(detection region)
        bikespresent = vehicle_tracking.emptyVehiclesOutOfWindow(bikespresent, bikes_count, last_bikes_count)
        last_bikes_count = bikes_count

        # print "bikes_inside_box: \n", bikes_inside_box
        # print "bikes_present: \n", bikespresent


        # draw_detections(gray,bikes_inside_box)
        draw_detections(gray, bikes_inside_box)

        cv2.putText(gray, 'Two Wheelers= ' + str(bikes_count), (int(width/2), height), cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 255), thickness=2)
        cv2.rectangle(gray, detection_region[0], detection_region[1], (255, 255, 0), thickness=2)



        #***********************************************************************************************************************************************************








        cv2.rectangle(gray, detection_region[0], detection_region[1], (0, 0, 255))

        cv2.circle(gray, (xc, yc), 1, (255, 0, 0), thickness=3)
        if drawLines:
            cv2.line(gray, line1[0], line1[1], (255, 0, 0))
            cv2.line(gray, line2[0], line2[1], (255, 0, 0))
        cv2.imshow("main window", gray)

        # print xc,yc
        k = cv2.waitKey(15) & 0xff
        if k == 27:
            break
        elif k == 119:
            yc = yc - 5
        elif k == 97:
            xc = xc - 5
        elif k == 115:
            yc = yc + 5
        elif k == 100:
            xc = xc + 5
        elif k == 49:
            line1.append((xc, yc))
            print 'Line 1, starting point set to: ', line1[0]
        elif k == 50:
            line1.append((xc, yc))
            print 'Line 1, ending point set to: ', line1[1]
        elif k == 51:
            line2.append((xc, yc))
            print 'Line 2, starting point set to: ', line2[0]
        elif k == 52:
            line2.append((xc, yc))
            print 'Line 2, ending point set to: ', line2[1]
        elif k == 32:
            drawLines = True

        # print date.tm_min
        # if (date.tm_min == 15 or date.tm_min == 30 or date.tm_min == 45 or date.tm_min == 00) and update==False:
        #     update = True
        #     test_database.test_func(cars_count+bikes_count)
        #     sklearn_test.sklearn_func()
        #     # Choice = prediction_regression.compare_forecast()
        #     cars_count=0
        #     bikes_count=0


        # cv2.waitKey(0)
        print"-----------------------------------------------------------------------------"


    cap.release()
    cv2.destroyAllWindows()
コード例 #28
0
    def run(self, path, name, mode, frames, set):
        filename = os.path.basename(path)  ##获取文件名
        print("filename", filename)
        catalog = filename.split("-")[1] + "-" + filename.split("-")[
            2]  #获取中间目录名nm-01
        print("catalog", catalog)
        angle = filename.split('-')[3].split(".")[0]  #获取角度
        print("angle", angle)

        id = self.getid(name)
        print("id=", id)
        hog = cv2.HOGDescriptor()
        hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
        if mode == "knn":
            fgbg = cv2.createBackgroundSubtractorKNN(history=frames,
                                                     detectShadows=1)
        else:
            fgbg = cv2.createBackgroundSubtractorMOG2(history=frames,
                                                      detectShadows=1)
        #count = 1
        video = cv2.VideoCapture(path)
        #total = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
        #video.set(cv2.CAP_PROP_POS_FRAMES, 2)
        # print(video.isOpened())
        framerate = video.get(cv2.CAP_PROP_FPS)
        # print("framerate",framerate)
        # print("path", path)
        try:
            id2 = "{0:03}".format(id)
            # id3 = "{0:02}".format(id)
            # print("id",id)
            # print("id2",id2)
            # print("id3",id3)
            os.makedirs("dataset/test_gallery/" + str(id2))

        except:
            print("diractory maked")
        list = os.listdir("dataset/test_gallery/" + id2)
        seq = len(list) + 1
        try:
            os.makedirs("dataset/test_gallery/" + str(id2) + "/" +
                        catalog.format(seq) + "/" + angle)
            # os.makedirs("dataset/test_probe/"+str(id2) + "/nm-{0:02}".format(seq) + "/090")

        except:
            print("/nm diractory maked")
        i = 1
        mark = 0
        markf = 0
        mark5 = 0
        images = None
        while (video.isOpened()):
            #frameId = video.get(1)
            success, image = video.read()

            if success != 1:
                break
            image = cv2.resize(image, (400, 300), interpolation=cv2.INTER_AREA)
            image_copy = image.copy()
            fgmask = fgbg.apply(image_copy)
            # 对原始帧进行腐蚀膨胀去噪
            th = cv2.threshold(fgmask, 244, 255, cv2.THRESH_BINARY)[1]
            thresh = cv2.dilate(th,
                                cv2.getStructuringElement(
                                    cv2.MORPH_ELLIPSE, (set[0], set[1])),
                                iterations=2)
            th = cv2.erode(thresh,
                           cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                     (set[2], set[2])),
                           iterations=2)
            if image is not None:

                #frame = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)#转为灰度值图
                rects, weights = hog.detectMultiScale(image)
                print("person:", len(rects))

                print(i)
                mark5 = mark5 + 1
                if mark5 > 60:
                    mark5 = 1
                    mark = 0
                    markf = 0

                if len(rects) > 0:
                    mark = mark + 1
                    #print(mark)
                    if mark > 10:
                        images = 1
                else:
                    markf = markf + 1
                    if markf > 30:
                        images = None
                if (images is not None):
                    #cv2.imshow("BGR2GRAY", frame)
                    #cv2.imshow("fgmask", fgmask)
                    #ret, thresh = cv2.threshold(fgmask,244,255,cv2.THRESH_BINARY)#转为二值图
                    #image=thresh
                    #image= self.noise(thresh)
                    print('sssssssssssssssssssssss')
                    print(
                        cv2.findContours(th, cv2.RETR_LIST,
                                         cv2.CHAIN_APPROX_NONE)[0])
                    contours, _ = cv2.findContours(th, cv2.RETR_LIST,
                                                   cv2.CHAIN_APPROX_NONE)
                    #cv2.imshow("thresh", thresh)
                    cimg = np.zeros_like(th)
                    #cv2.imshow("thresh", thresh)
                    print("contours:", len(contours))
                    for j in range(len(contours)):
                        #x,y,w,h = cv2.boundingRect(contours[i]) #绘制矩形
                        #cimg = cv2.rectangle(cimg,(x,y),(x+w,y+h),(255,255,255),2)
                        cv2.drawContours(cimg,
                                         contours,
                                         j,
                                         color=255,
                                         thickness=-1)
                        #cimg = self.noise(cimg)
                        #cv2.imshow("drawContours", cimg)
                    if i > 1:  #跳过前n张
                        filename = "dataset/test_gallery/{0:03}".format(
                            id
                        ) + "/" + catalog + "/" + angle + "/{0:03}-".format(
                            id
                        ) + catalog + "-" + angle + "-" + "{0:03}".format(
                            i - 4) + ".png"
                        # filename = "dataset/test_probe/{0:03}".format(id) + "/nm-{0:02}".format(seq) + "/090" + "/{0:03}-nm-".format(id) + "{0:02}-".format(seq) + "090-" + "{0:03}".format(i-4) + ".png"

                        print(filename)
                        cv2.imwrite(filename, cimg)
            i = i + 1
            if (success != True):
                break
            if i > 999:
                break
        video.release()
        sql = "INSERT INTO person (ID,Name) \
                   VALUES( '{}', '{}')".format(str(id), str(name))
        dbmain(sql)
        #写入姓名id保存为json文件
        # self.data["user1"].update({str(id):str(name)})
        # fw = open('directory/user_info.json', 'w', encoding='utf-8')
        # json.dump(self.data,fw,ensure_ascii=False,indent=4)
        # fw.close()
        #删除空白帧
        listd = os.listdir("dataset/test_gallery/" + str(id2) + "/" + catalog +
                           "/" + angle)
        # listd = os.listdir("dataset/test_probe/" + str(id2) + "/nm-{0:02}".format(seq) + "/090")
        print(listd)
        # for l in listd[-1:]:#删除后50帧
        #     os.remove("vsil/"+str(id2)+"/nm-{0:03}/".format(seq)+l+"/090")
        #     print("deleting",l)
        # for lh in listd[0:3]:
        #     os.remove("vsil/"+str(id2)+"/nm-{0:03}/".format(seq)+lh)
        #     print("deleting",lh)
        # print('done')
        #count+=1

        print("done")
コード例 #29
0
        # so we slightly shrink the rectangles to get a nicer output.
        pad_w, pad_h = int(w), int(h)
        cv2.rectangle(img, (x + pad_w, y + pad_h),
                      (x + w - pad_w, y + h - pad_h), (0, 255, 0), thickness)


if __name__ == '__main__':
    import sys
    from glob import glob
    import itertools as it
    from Background_Subtraction import sendImg

    print(__doc__)

    hog = cv2.HOGDescriptor()
    hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
    img3 = sendImg()
    #replace the image source
    default = ['000001.jpg'] if len(sys.argv[1:]) == 0 else []

    for fn in it.chain(*map(glob, default + sys.argv[1:])):
        print(
            fn,
            ' - ',
        )
        try:
            img = cv2.imread(fn)
            if img is None:
                print('Failed to load image file:', fn)
                continue
        except:
コード例 #30
0
ファイル: prove.py プロジェクト: dkrammer/cmput412-contest1
    def camera_callback(self, data):
        try:
            # We select bgr8 because its the OpenCV encoding by default
            cv_image = self.bridge_object.imgmsg_to_cv2(
                data, desired_encoding="bgr8")
        except CvBridgeError as e:
            print(e)

        #image_1 = cv2.imread('/home/user/catkin_ws/src/project/images/wanted.jpg',1)
        image_1 = cv2.imread(
            '/home/user/catkin_ws/src/cmput412-contest1/contest/src/test_cube.jpg',
            1)
        image_2 = cv_image
        hog = cv2.HOGDescriptor()
        hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

        #Size for the image
        imX = 700
        imY = 500

        #img_2 = cv2.resize(cv_image,(imX,imY))
        img_2 = cv2.resize(cv_image, (imX, imY))
        image_2 = img_2

        gray_2 = cv2.cvtColor(img_2, cv2.COLOR_RGB2GRAY)

        boxes_2, weights_2 = hog.detectMultiScale(gray_2, winStride=(8, 6))
        boxes_2 = np.array([[x, y, x + w, y + h] for (x, y, w, h) in boxes_2])

        for (xA, yA, xB, yB) in boxes_2:

            #Center in X
            medX = xB - xA
            xC = int(xA + (medX / 2))

            #Center in Y
            medY = yB - yA
            yC = int(yA + (medY / 2))

            #Draw a circle in the center of the box
            cv2.circle(img_2, (xC, yC), 1, (0, 255, 255), -1)

            # display the detected boxes in the colour picture
            cv2.rectangle(img_2, (xA, yA), (xB, yB), (255, 255, 0), 2)

        gray_1 = cv2.cvtColor(image_1, cv2.COLOR_RGB2GRAY)
        gray_2 = cv2.cvtColor(image_2, cv2.COLOR_RGB2GRAY)

        #Initialize the ORB Feature detector
        orb = cv2.ORB_create(nfeatures=1000)

        #Make a copy of th eoriginal image to display the keypoints found by ORB
        #This is just a representative
        preview_1 = np.copy(image_1)
        preview_2 = np.copy(image_2)

        #Create another copy to display points only
        dots = np.copy(image_1)

        #Extract the keypoints from both images
        train_keypoints, train_descriptor = orb.detectAndCompute(gray_1, None)
        test_keypoints, test_descriptor = orb.detectAndCompute(gray_2, None)

        #Draw the found Keypoints of the main image
        cv2.drawKeypoints(image_1,
                          train_keypoints,
                          preview_1,
                          flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        cv2.drawKeypoints(image_1, train_keypoints, dots, flags=2)

        #############################################
        ################## MATCHER ##################
        #############################################

        #Initialize the BruteForce Matcher
        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

        #Match the feature points from both images
        matches = bf.match(train_descriptor, test_descriptor)

        #The matches with shorter distance are the ones we want.
        matches = sorted(matches, key=lambda x: x.distance)
        #Catch some of the matching points to draw

        good_matches = matches[:self.x]

        #Parse the feature points
        train_points = np.float32([
            train_keypoints[m.queryIdx].pt for m in good_matches
        ]).reshape(-1, 1, 2)
        test_points = np.float32([
            test_keypoints[m.trainIdx].pt for m in good_matches
        ]).reshape(-1, 1, 2)

        #Create a mask to catch the matching points
        #With the homography we are trying to find perspectives between two planes
        #Using the Non-deterministic RANSAC method
        M, mask = cv2.findHomography(train_points, test_points, cv2.RANSAC,
                                     5.0)

        #Catch the width and height from the main image
        h, w = gray_1.shape[:2]

        #Create a floating matrix for the new perspective
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)

        #Create the perspective in the result
        dst = cv2.perspectiveTransform(pts, M)

        #Draw the matching lines

        # Draw the points of the new perspective in the result image (This is considered the bounding box)
        result = cv2.polylines(image_2, [np.int32(dst)], True, (50, 0, 255), 3,
                               cv2.LINE_AA)

        #addition = cv2.add(img_2,image_2)
        cv2.imshow('image', img_2)
        cv2.imshow('Points', preview_1)

        cv2.imshow('Detection', image_2)
        #cv2.imshow('Detection',addition)

        cv2.waitKey(1)