コード例 #1
0
def gen():
    while True:
        ret, frame = vid.read()
        img = cv2.imencode('.jpg', frame)[1].tobytes()
        detected_faces = utils.detect_face_stream(endpoint=ENDPOINT,
                                                  key=KEY,
                                                  image=img)

        print('Image num face detected {}'.format(detected_faces))
        color = (255, 0, 0)
        thickness = 2
        for face in detected_faces:
            print(face)
            frame = cv2.rectangle(frame, *utils.get_rectangle(face), color,
                                  thickness)
        img_send = cv2.imencode('.jpg', frame)[1].tobytes()
        time.sleep(1)
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + img_send + b'\r\n')
コード例 #2
0
def identify_from_video(endpoint, key, group_id, frame):
    """ Main function to detect faces in 'frame' and detect if its are contained in the 'group_id'

    @param endpoint: Endpoint Azure Service
    @type endpoint: str
    @param key: key connection of Azure Service
    @type key: str
    @param group_id: unique identify of the group or database
    @type group_id: str
    @param frame: Frame Image
    @type frame: numpy.ndarray
    @return: (frame image, string name)
    @rtype: (numpy.ndarray, str or None)
    """
    start_measure = time.time()
    thickness = 2
    detected_faces, sleep_time = detect_face(endpoint, key, frame)
    print(detected_faces)
    if sleep_time == 10:
        time.sleep(sleep_time)

    if detected_faces is not None:
        faces_info = identify_and_process(detected_faces, endpoint, key,
                                          group_id)
        print('Detected: {} and Info {}'.format(detected_faces, faces_info))
        for face, info in zip(detected_faces, faces_info):
            if info['confidence'] > 0.5:
                color = (0, 255, 0)
            else:
                color = (0, 0, 255)
            frame = cv2.rectangle(frame, *utils.get_rectangle(face), color,
                                  thickness)

        print('Total time required:', time.time() - start_measure)
        return frame, faces_info

    print('Total time required:', time.time() - start_measure)
    return frame, None
コード例 #3
0
    def __create_castle(self):
        """
        Randomly place a castle in a map.

        Private method.
        """
        # find a random origin point which doesn't sit on a river
        start_x = random.randint(0, self.GRID_X-1)
        start_y = random.randint(0, self.GRID_Y-1)
        while self.map[start_x][start_y]["river"]:
            self.__create_castle()
            return

        # create endpoint.
        end_x = start_x + self.CASTLE_SIZE - 1
        end_y = start_y + self.CASTLE_SIZE - 1

        # test if castle sits in the field and not on river
        if start_x > self.GRID_X-1 or start_y > self.GRID_Y-1:
            self.__create_castle()
            return
        if end_x > self.GRID_X-1 or end_y > self.GRID_Y-1:
            self.__create_castle()
            return
        if self.map[end_x][end_y]["river"]:
            self.__create_castle()
            return

        # get points of the castle and put them on map
        castle_points = utils.get_rectangle((start_x, start_y), (end_x, end_y))
        for point in castle_points:
            # start again if river inside castle
            if self.map[point[0]][point[1]]["river"]:
                self.__create_castle()
                return

        for point in castle_points:
            self.map[point[0]][point[1]]["castle"] = True
コード例 #4
0
def gen():
    global start_time, fps_time, fps

    camera = cv2.VideoCapture(0)
    camera.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
    camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)

    count = 0
    while True:
        if (time.perf_counter() - fps_time) >= 1 / fps:
            ret, frame = camera.read()
            count += 1
            fps_time = time.perf_counter()

            if ret:
                img = cv2.imencode('.jpg', frame)[1].tobytes()

                if (time.perf_counter() - start_time) >= 3:
                    detected_faces = utils.detect_face_stream(
                        endpoint=ENDPOINT, key=KEY, image=img)
                    start_time = time.perf_counter()

                print('Image num {} face detected {}'.format(
                    count, detected_faces))

                color = (255, 0, 0)
                thickness = 2
                for face in detected_faces:
                    print(face)
                    frame = cv2.rectangle(frame, *utils.get_rectangle(face),
                                          color, thickness)

                img = cv2.imencode('.jpg', frame)[1].tobytes()
                yield (b'--frame\r\n'
                       b'Content-Type: image/jpeg\r\n\r\n' + img + b'\r\n')
            else:
                break
コード例 #5
0
    xy_lim[3] + padding
])

# scatter points
xy = controller.points
xs = [i[0] for i in xy]
ys = [i[1] for i in xy]
ax1.scatter(xs, ys, color='red', s=1)

# calculate intersections to plot
intersections = controller.get_intersections()
for points in intersections:
    if points is not None:
        plt.plot([i[0] for i in points], [i[1] for i in points],
                 color='pink',
                 linewidth=3)

# draw rectangle for isolated parts
rects = [get_rectangle(p.points) for p in controller.parts if p.isolated]
for rect in rects:
    for edge in rect:
        plt.plot([i[0] for i in edge], [i[1] for i in edge],
                 color='pink',
                 linewidth=3)

# formatting the plot
# ax1.text(-0.9, 3.85, 'Variance: {:8.4f}'.format(variance(regressor.points, regressor.parameters, regressor.segments)), fontsize=10)

# ax1.set_title("Segmented Linear Regression(segments = {})".format(segments))
# plt.savefig("Segemented_Linear_Regression_segments_{}.png".format(str(segments)), dpi=300)
plt.show()
コード例 #6
0
    # create a video object and configure size of the output image
    vid = cv2.VideoCapture(0)
    vid.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
    vid.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)

    count = 0
    while True:
        ret, frame = vid.read()
        img = cv2.imencode('.jpg', frame)[1].tobytes()
        detected_faces = utils.detect_face_stream(endpoint=ENDPOINT, key=KEY, image=img)
        print('Image num {} face detected {}'.format(count, detected_faces))
        count += 1
        color = (255, 0, 0)
        thickness = 2
        for face in detected_faces:
            print(face)
            frame = cv2.rectangle(frame, *utils.get_rectangle(face), color, thickness)
        # Display the resulting frame
        cv2.imshow('frame', frame)
        # the 'q' button is set as the quitting button
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        # wait 3 second to accomplish azure free tier service
        time.sleep(3)

    # After the loop release the cap object
    vid.release()
    # Destroy all the windows
    cv2.destroyAllWindows()
コード例 #7
0
    # configure the face client
    KEY = args['KEY']
    ENDPOINT = 'https://{0}.cognitiveservices.azure.com/'.format(
        args['SERVICE_NAME'])
    face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))

    # make prediction from URL
    multi_face_image_url = "http://www.historyplace.com/kennedy/president-family-portrait-closeup.jpg"
    multi_image_name = os.path.basename(multi_face_image_url)
    detected_faces = face_client.face.detect_with_url(url=multi_face_image_url)

    # make prediction from URL
    local_image_path = os.path.join(os.getcwd(), 'data',
                                    'president-family.jpg')
    image = open(local_image_path, 'r+b')
    detected_faces = face_client.face.detect_with_stream(image)

    # download the image from web source
    response = requests.get(multi_face_image_url)
    img = Image.open(BytesIO(response.content))

    # For each face returned use the face rectangle and draw a red box.
    draw = ImageDraw.Draw(img)
    for face in detected_faces:
        print(face)
        draw.rectangle(utils.get_rectangle(face), outline='red')

    # Display the image in the users default image browser.
    img.show()
コード例 #8
0
    local_image = cv2.imread(image_path)
    img = cv2.imencode('.jpg', local_image)[1].tobytes()

    # Function to call the API REST with local image
    attributes = ''
    detected_faces = utils.detect_face_stream(endpoint=ENDPOINT,
                                              key=KEY,
                                              image=img,
                                              face_attributes=attributes)

    # Function to call the API REST with web image
    image_url = "http://www.historyplace.com/kennedy/president-family-portrait-closeup.jpg"
    detected_faces = utils.detect_face_url(endpoint=ENDPOINT,
                                           key=KEY,
                                           image_url=image_url,
                                           face_attributes=attributes)

    # Display the resulting frame
    color = (255, 0, 0)
    thickness = 2
    for face in detected_faces:
        print(face)
        local_image = cv2.rectangle(local_image, *utils.get_rectangle(face),
                                    color, thickness)

    cv2.imshow('frame', local_image)
    cv2.waitKey()

    # Destroy all the windows
    cv2.destroyAllWindows()
コード例 #9
0
        print('Result of face: {0}.'.format(person['faceId']))
        face = [
            face for face in detected_faces
            if face['faceId'] == person['faceId']
        ][0]
        if not len(person['candidates']) == 0:
            candidate = person['candidates'][0]
            print('Identified in {} with a confidence: {}.'.format(
                person['faceId'], candidate['confidence']))
            person_info = utils.get_person_info(ENDPOINT, KEY, GROUP_ID,
                                                candidate['personId'])
            print('Name Group person identified: {0}'.format(
                person_info['name']))
            color = (0, 255, 0)
            local_image = cv2.rectangle(local_image,
                                        *utils.get_rectangle(face), color,
                                        thickness)
            x, y = face['faceRectangle'][
                'left'], face['faceRectangle']['top'] - 5
            cv2.putText(local_image, person_info['name'], (x, y),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1)
        else:
            print('no match found')
            color = (255, 0, 0)
            local_image = cv2.rectangle(local_image,
                                        *utils.get_rectangle(face), color,
                                        thickness)

    cv2.imshow('frame', local_image)
    cv2.waitKey()