def main():
    global imgBuf, label
    label = open('voc_labels.txt').readlines()

    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

    #cap = cv2.VideoCapture('../sample-videos/people-detection.mp4')

    ie = iewrap.ieWrapper('public/mobilenet-ssd/FP16/mobilenet-ssd.xml', 'CPU',
                          10)
    ie.setCallback(callback)

    while True:
        ret, img = cap.read()
        if ret == False:
            break
        refId = ie.asyncInfer(img)  # Inference
        imgBuf[refId] = img
예제 #2
0
def main():
    global imgBuf, im_back

    cap = cv2.VideoCapture(0)
    #   cap = cv2.VideoCapture(r'.\rsc\mov\UseCase3.mp4')

    ie = iewrap.ieWrapper(
        r'.\intel\face-detection-adas-0001\FP16\face-detection-adas-0001.xml',
        'CPU', 10)
    ie.setCallback(callback)

    while True:
        ret, img = cap.read()
        if ret == False:
            break

        img = cv2.resize(img, (1600, 900))

        refId = ie.asyncInfer(img)  # Inference
        imgBuf[refId] = img
예제 #3
0
def main():
    ie_detect = iewrap.ieWrapper(model_det,  'CPU')

    # Open USB webcams
    cam = cv2.VideoCapture(0)
    #cam = cv2.VideoCapture(input_movie)
    cam.set(cv2.CAP_PROP_FRAME_WIDTH , 640)
    cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

    hm = heatmap(10, 10, 30)

    n = 0
    while(cv2.waitKey(1)!=27):
        ret, img = cam.read()
        if ret==False:
            break

        img_out = img.copy()
        det = ie_detect.blockInfer(img).reshape((200,7))      # Detect objects

        img_out = img_out>>1
        for obj in det:         # obj = [ image_id, label, conf, xmin, ymin, xmax, ymax ]
            if obj[2] > 0.75:   # Confidence > 75% 
                xmin = abs(int(obj[3] * img_out.shape[1]))
                ymin = abs(int(obj[4] * img_out.shape[0]))
                xmax = abs(int(obj[5] * img_out.shape[1]))
                ymax = abs(int(obj[6] * img_out.shape[0]))
                cv2.rectangle(img_out, (xmin, ymin), (xmax, ymax), (  0,255,255), 2)
                x = int((obj[3]+obj[5])/2*hm.num_x)
                y = int((obj[4]+obj[6])/2*hm.num_y)
                hm.addValue(x, y, 4)

        hm.generateFrame()
        n = (n+1) % 30
        if n==0:
            hm.incrementTime()
        frame = cv2.resize(hm.frame, dsize=(img_out.shape[1], img_out.shape[0]), 
                        interpolation = cv2.INTER_CUBIC)   # INTER_AREA, INTER_LINEAR, INTER_CUBIC
        frame = frame | img_out
        cv2.imshow('output', frame)
예제 #4
0
import iewrap

import cv2
import numpy as np

label = open('voc_labels.txt').readlines()
img = cv2.imread('car_1.bmp')

ie = iewrap.ieWrapper('public/mobilenet-ssd/FP16/mobilenet-ssd.xml', 'CPU')

output = ie.blockInfer(img)[0]  # Inference

# Draw bounding boxes and labels onto the image
output = output.reshape((100, 7))
img_h, img_w, _ = img.shape
for obj in output:
    imgid, clsid, confidence, x1, y1, x2, y2 = obj
    if confidence > 0.8:  # Draw a bounding box and label when confidence>0.8
        x1 = int(x1 * img_w)
        y1 = int(y1 * img_h)
        x2 = int(x2 * img_w)
        y2 = int(y2 * img_h)
        cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 255), thickness=4)
        cv2.putText(img,
                    label[int(clsid)][:-1], (x1, y1),
                    cv2.FONT_HERSHEY_PLAIN,
                    fontScale=4,
                    color=(0, 255, 255),
                    thickness=4)

# Displaying the result image
import math

import iewrap
import numpy as np
import cv2

# Open a USB webcam
cam = cv2.VideoCapture(0)
cam.set(cv2.CAP_PROP_FRAME_WIDTH , 640)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

ie_faceDet  = iewrap.ieWrapper('./intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml',             'CPU')
ie_headPose = iewrap.ieWrapper('./intel/head-pose-estimation-adas-0001/FP16/head-pose-estimation-adas-0001.xml', 'CPU')
ie_faceLM   = iewrap.ieWrapper('./intel/facial-landmarks-35-adas-0002/FP16/facial-landmarks-35-adas-0002.xml',   'CPU')
ie_gaze     = iewrap.ieWrapper('./intel/gaze-estimation-adas-0002/FP16/gaze-estimation-adas-0002.xml',           'CPU')

print('Please connect a USB web cam. Hit ESC to exit.')
while cv2.waitKey(1) != 27:         # 27 == ESC
    ret,img = cam.read()
    img = cv2.flip(img, 1)          # flip image

    # Detect faces in the image
    det = ie_faceDet.blockInfer(img).reshape((200,7))   # [1,1,200,7]
    for obj in det:                                     # obj = [ image_id, label, conf, xmin, ymin, xmax, ymax ]
        if obj[2] > 0.75:                               # Confidence > 75% 
            xmin = abs(int(obj[3] * img.shape[1]))
            ymin = abs(int(obj[4] * img.shape[0]))
            xmax = abs(int(obj[5] * img.shape[1]))
            ymax = abs(int(obj[6] * img.shape[0]))
            face = img[ymin:ymax,xmin:xmax].copy()      # Crop the found face
            cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (255,255,0), 2)
예제 #6
0
def main():
    ie_detect = iewrap.ieWrapper(model_det, 'CPU')
    ie_reid = iewrap.ieWrapper(model_reid, 'CPU')

    # Open USB webcams
    cap = cv2.VideoCapture(0)
    #cap = cv2.VideoCapture("../sample-videos/people-detection.mp4")
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

    objid = 0
    time_out = 3  # how long time to retain feature vector (second()
    feature_db = []

    while cv2.waitKey(1) != 27:  # 27 == ESC
        ret, image = cap.read()
        if ret == False:
            break
        #image = cv2.flip(image, 1)

        detObj = ie_detect.blockInfer(image).reshape((200, 7))  # [1,1,200,7]
        curr_feature = []
        for obj in detObj:  # obj = [ image_id, label, conf, xmin, ymin, xmax, ymax ]
            if obj[2] > 0.75:  # Confidence > 75%
                xmin = abs(int(obj[3] * image.shape[1]))
                ymin = abs(int(obj[4] * image.shape[0]))
                xmax = abs(int(obj[5] * image.shape[1]))
                ymax = abs(int(obj[6] * image.shape[0]))
                class_id = int(obj[1])

                obj_img = image[ymin:ymax,
                                xmin:xmax].copy()  # Crop the found object

                # Obtain feature vector of the detected object using re-identification model
                featVec = ie_reid.blockInfer(obj_img).reshape(
                    (256)
                )  # Run re-identification model to generate feature vectors (256 elem)
                curr_feature.append({
                    'pos': [xmin, ymin, xmax, ymax],
                    'feature': featVec,
                    'id': -1
                })

        # discard time out objects
        now = time.monotonic()
        for feature in feature_db:
            if feature['time'] + time_out < now:
                feature_db.remove(feature)  # discard feature vector from DB
                #print("Discarded  : id {}".format(feature['id']))

        # Draw boundary line
        outimg = image.copy()
        cv2.line(outimg, boundaryLine[0], boundaryLine[1], (0, 255, 255), 8)
        cv2.putText(outimg, str(crossCount[0]), boundaryLine[0],
                    cv2.FONT_HERSHEY_PLAIN, 4, (0, 255, 255), 2)
        cv2.putText(outimg, str(crossCount[1]), boundaryLine[1],
                    cv2.FONT_HERSHEY_PLAIN, 4, (0, 255, 255), 2)

        # if no object found, skip the rest of processing
        if len(curr_feature) == 0:  # total 0 faces found
            cv2.imshow('image', outimg)
            continue

        # If any object is registred in the db, assign registerd ID to the most similar object in the current image
        if len(feature_db) > 0:
            # Create a matix of cosine distance
            cos_sim_matrix = [[
                distance.cosine(curr_feature[j]["feature"],
                                feature_db[i]["feature"])
                for j in range(len(curr_feature))
            ] for i in range(len(feature_db))]
            # solve feature matching problem by Hungarian assignment algorithm
            hangarian = Munkres()
            combination = hangarian.compute(cos_sim_matrix)

            # assign ID to the object pairs based on assignment matrix
            for dbIdx, currIdx in combination:
                curr_feature[currIdx]['id'] = feature_db[dbIdx][
                    'id']  # assign an ID
                feature_db[dbIdx]['feature'] = curr_feature[currIdx][
                    'feature']  # update the feature vector in DB with the latest vector
                feature_db[dbIdx]['time'] = now  # update last found time
                xmin, ymin, xmax, ymax = curr_feature[currIdx]['pos']
                feature_db[dbIdx]['history'].append([
                    (xmin + xmax) // 2, (ymin + ymax) // 2
                ])  # position history for trajectory line
                curr_feature[currIdx]['history'] = feature_db[dbIdx]['history']

        # Register the new objects which has no ID yet
        for feature in curr_feature:
            if feature[
                    'id'] == -1:  # no similar objects is registred in feature_db
                feature['id'] = objid
                feature_db.append(feature)  # register a new feature to the db
                feature_db[-1]['time'] = now
                xmin, ymin, xmax, ymax = feature['pos']
                feature_db[-1]['history'] = [[
                    (xmin + xmax) // 2, (ymin + ymax) // 2
                ]]  # position history for trajectory line
                feature['history'] = feature_db[-1]['history']
                objid += 1

        # Draw bounding boxes and IDs
        for obj in curr_feature:
            id = obj['id']
            color = ((((~id) << 6) & 0x100) - 1, (((~id) << 7) & 0x0100) - 1,
                     (((~id) << 8) & 0x0100) - 1)
            xmin, ymin, xmax, ymax = obj['pos']
            cv2.rectangle(outimg, (xmin, ymin), (xmax, ymax), color, 2)
            cv2.putText(outimg, 'ID=' + str(id), (xmin, ymin - 7),
                        cv2.FONT_HERSHEY_COMPLEX, 1.0, color, 1)
            hist = obj['history']
            if len(hist) > 1:
                cv2.polylines(outimg, np.array([hist], np.int32), False, color,
                              4)

            # Detect boundary line crossing
            if len(hist) > 1:
                traj_t0 = (hist[-2][0], hist[-2][1])  # Trajectory of an object
                traj_t1 = (hist[-1][0], hist[-1][1])
                bLine_0 = boundaryLine[0]  # Boundary line
                bLine_1 = boundaryLine[1]
                intersect = checkIntersect(
                    traj_t0, traj_t1, bLine_0,
                    bLine_1)  # Check if intersect or not
                if intersect == True:
                    angle = calc_vector_angle(
                        traj_t0, traj_t1, bLine_0, bLine_1
                    )  # Calculate angle between trajectory and boundary line
                    if angle < 180:
                        crossCount[0] += 1
                    else:
                        crossCount[1] += 1
                    cx, cy = calcIntersectPoint(
                        traj_t0, traj_t1, bLine_0,
                        bLine_1)  # Calculate the intersect coordination
                    cv2.circle(outimg, (int(cx), int(cy)), 20,
                               (255, 0, 255 if angle < 180 else 0), -1)

        cv2.imshow('image', outimg)

    cv2.destroyAllWindows()
예제 #7
0
import iewrap

import cv2
import numpy as np

label = open('synset_words.txt').readlines()
img = cv2.imread('car.png')

ie = iewrap.ieWrapper('public/googlenet-v1/FP16/googlenet-v1.xml', 'CPU')

output = ie.blockInfer(img).reshape(
    (1000, ))  # Inference, and reshape the result

# Sort class probabilities and display top 5 classes
idx = np.argsort(output)[::-1]
for i in range(5):
    print(idx[i] + 1, output[idx[i]], label[idx[i]][:-1])

# Displaying the input image
import matplotlib.pyplot as plt
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(img)
plt.show()