Esempio n. 1
0
def main():
    """
    Main function.
    """
    print("OpenCV version:" + cv2.getVersionString())
    print("Waiting for input of driver...")

    keyboard.wait('a', True)

    print("Desert Bot started, enjoy the ride!")

    while not keyboard.is_pressed('q'):
        frame = pyautogui.screenshot()
        frame = cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR)

        lane = detect_lane(frame)

        if lane is not None:
            keyboard.press('a')

            for x1, y1, x2, y2 in lane:
                if min([x1, x2]) <= int(frame.shape[1] * 0.31):
                    keyboard.press('j')
                    time.sleep(0.05)
                    keyboard.release('j')
        else:
            print('No lane detected!')

    cv2.destroyAllWindows()
Esempio n. 2
0
    def __init__(self):
        self.__DEBUG = True
        self.__AVOID_LOOP = False
        self.__cv_version = cv2.getVersionString()
        self.__img_counter = 0
        self.__img_optimizer = ImageOptimizer()
        self.__match_filed_finder = MatchFieldFinder()
        self.__red_range_lower = np.array((17, 15, 60), dtype="uint8")
        self.__red_range_upper = np.array((50, 56, 255), dtype="uint8")
        self.__blue_range_lower = np.array((86, 31, 4), dtype="uint8")
        self.__blue_range_upper = np.array((220, 88, 50), dtype="uint8")
        self.__red_range_hsv_lower_1 = np.array([0, 100, 50])
        self.__red_range_hsv_lower_2 = np.array([150, 100, 50])
        self.__red_range_hsv_upper_1 = np.array([10, 255, 255])
        self.__red_range_hsv_upper_2 = np.array([190, 255, 255])
        self.__blue_range_hsv_lower = np.array([100, 120, 30])
        self.__blue_range_hsv_upper = np.array([120, 255, 255])

        self.__ip = os.getenv('PEPPER_IP')
        self.__pw = os.getenv('PEPPER_PW')
Esempio n. 3
0
    def __init__(self,
                 resolution=(800, 600),
                 use_pi_camera=False,
                 fullscreen=False):
        self._ESC = 27

        self._facerecognizer = FaceRecognizer()
        self._smilerecognizer = SmileRecognizer()

        self._trigger = SnapshotTrigger("images")
        self._display = StreamDisplay("Photobooth", fullscreen)

        self._camstream = CameraStream(use_pi_camera=use_pi_camera,
                                       resolution=resolution).start()
        self._stopped = False
        self._smile_cascade = cv2.CascadeClassifier(
            'recognition/haarcascades/smile.xml')

        print("Using OpenCV: " + cv2.getVersionString())
        print("Using Python: " + sys.version)
        sleep(2)
Esempio n. 4
0
    return Block(0, date.datetime.now(), "Genesis Block", "0")


def next_block(last_block):
    this_index = last_block.index + 1
    this_timestamp = date.datetime.now()
    this_data = "Hey! I'm block " + str(this_index)
    previous_hash = last_block.hash
    return Block(this_index, this_timestamp, this_data, previous_hash)


# Create the blockchain and add the genesis block
blockchain = [create_genesis_block()]
previous_block = blockchain[0]

# How many blocks should we add to the chain
# after the genesis block
num_of_blocks_to_add = 20

# Add blocks to the chain
for i in range(0, num_of_blocks_to_add):
    block_to_add = next_block(previous_block)
    blockchain.append(block_to_add)
    previous_block = block_to_add
    # Tell everyone about it!
    print("Block #{} has been added to the blockchain!".format(
        block_to_add.index))
    print(block_to_add)
    print("Hash: {}\n".format(block_to_add.hash))
print(cv.getVersionString())
Esempio n. 5
0
import cv2 as cv
import os
from matplotlib import pyplot as plt

os.chdir(r'.\Module\OpenCV')
print('OpenCV的当前版本:', cv.getVersionString())  # 查看版本信息
img = cv.imread('tower.jpg', 1)  # 读取图片
"""
img = cv.imread(文件名,[,参数])
第二个参数是一个标志,它指定了读取图像的方式。
cv.IMREAD_COLOR: 加载彩色图像,任何图像的透明度都会被忽视,如果不传参数,这个值是默认值。
cv.IMREAD_GRAYSCALE:以灰度模式加载图像。
cv.IMREAD_UNCHANGED:加载图像,包括alpha通道
注意:这三个标志可以简化为 1 、 0 、 -1 。
"""
"""
print(img[20, 30])  # 读取像素可以通过行坐标和列坐标来进行访问,灰度图像直接返回灰度值,彩色图像则返回B、G、R三个分量
blue = img[20, 30, 0]  # 在获取彩色图片像素时的第二个参数 0|1|2 的含义是获取 BGR 三个通道的像素。
green = img[20, 30, 1]
red = img[20, 30, 2]
print(blue, green, red)
# 像素依次赋值
img[20, 30, 0] = 255
img[20, 30, 1] = 255
img[20, 30, 2] = 255
print(img[20, 30])
# 也可以通过数组直接对像素点一次赋值:
img[20, 30] = [0, 0, 0]
print(img[20, 30])
img[0:200, 50:100] = [255, 255, 255]  # 对一个区域的像素进行赋值,全都赋值成为白色
"""
Esempio n. 6
0
import cv2
import struct
import time
import numpy as np

if __name__ == '__main__':
    print("version: {0}".format(cv2.getVersionString()))

    temp = cv2.imread(filename="WeChat.jpg")
    target = cv2.resize(src=temp, dsize=(390, 209))

    start = time.time()
    with open(file="image.bin", mode="rb") as f:
        buff = f.read(209 * 390 * 4)
        for i in range(209):
            for j in range(390):
                target[i][j] = np.array([
                    buff[i * 390 * 4 + j * 4 + 0],
                    buff[i * 390 * 4 + j * 4 + 1],
                    buff[i * 390 * 4 + j * 4 + 2]
                ],
                                        dtype=np.uint8)

    end = time.time()
    print("assignment cost: %.3f s" % (end - start))

    while True:
        cv2.imshow("target", target)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cv2.destroyAllWindows()
Esempio n. 7
0
# -*- coding: utf-8 -*-
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import cv2

print('OpenCV version:', cv2.getVersionString())
print('OpenVX:', cv2.haveOpenVX())
print('CPUs:', cv2.getNumberOfCPUs())
Esempio n. 8
0
import cv2
import time

argument = {"fps": 0, "backup_time": 0, "message": "loading. . ."}
if __name__ == '__main__':
    camera = cv2.VideoCapture(0, cv2.CAP_DSHOW)
    print("OpenCV:{0}".format(cv2.getVersionString()))
    camera.set(propId=3, value=800)
    camera.set(propId=4, value=480)
    print(camera.get(propId=3), camera.get(propId=4))

    while True:
        moment = time.gmtime().tm_sec
        if moment != argument["backup_time"]:
            argument["backup_time"] = moment
            argument["message"] = "\rfps:{0}".format(argument["fps"])
            print(argument["message"], end="")
            argument["fps"] = 0

        argument["fps"] += 1
        ret, frame = camera.read()
        frame = frame[0:480, 0:800]
        frame[0:480, 800:848] = [23, 56, 255]
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        result = frame
        result[0:240, 0:400] = cv2.resize(frame, (400, 240))
        result[0:240, 400:800] = cv2.resize(hsv, (400, 240))
        cv2.imshow(winname="frame width:800 height:480", mat=result)

        if cv2.waitKey(delay=10) & 0xFF == ord('q'):
            break
Esempio n. 9
0
                self._work_path + '\\Controller\\images\\temp\\contours.png',
                self._img_obj)

        print('--------------------------------------------')
        paragraphs = self.find_paragraph(region, iterations=3)

        if self._DEBUG:
            cv2.waitKey(0)
            cv2.destroyAllWindows()

        return paragraphs


if __name__ == '__main__':
    print('--------------------------------------------')
    print('OpenCV Ver:{}'.format(cv2.getVersionString()))

    # image_path = 'test_image/text_line/news{}.png'.format(14)  # qq bug 14 18 17
    # image_path = 'test_image/text_line/qq{}.png'.format(6)  # bug: 6 open运算后解决6
    # image_path = 'test_image/text_line/baidu{}.png'.format(2)  # bug: 6
    # image_path = 'test_image/text_line/shouhu{}.png'.format(4)
    # image_path = 'test_image/text_line/sina{}.png'.format(4)  # bug:4
    # image_path = 'test_image/text_line/163_{}.png'.format(6)  # bug: 6 7
    image_path = 'test_image/text_line/ifeng{}.png'.format(
        8)  # bug:3 7 8 font_size=15

    start = datetime.now()
    d = NewsTitleList(font_size=18)
    d._DEBUG = True
    d.find_title(image_path)
    end = datetime.now()
Esempio n. 10
0
import cv2

CONTOURS_INDEX = 1 if cv2.getVersionString()[0] == '3' else 0
 def __init__(self):
     self.__DEBUG = True
     self.__cv_version = cv2.getVersionString()
Esempio n. 12
0
import sys
from math import sqrt, floor

import cv2

# This code combines face detection with open CV's code for object tracking to detect and track a face automatically

(major_ver, minor_ver, subminor_ver) = (cv2.getVersionString()).split('.')


draw_debug_elements = True
faces_for_debug = []


def draw_face_debug_objects(frame):
    centre = get_centre(frame)

    for (x, y, w, h) in faces_for_debug:
        # Bounding box
        p1 = (x, y)
        p2 = (x + w, y + h)
        cv2.rectangle(frame, p1, p2, (0, 255, 0), 2)

        # Line to centre
        line_p1 = (int(x + (w / 2)), int(y + (h / 2)))
        cv2.line(frame, line_p1, centre, (0, 127, 255))


def save_face_for_debug(face_bounding_box):
    faces_for_debug.append(face_bounding_box)
Esempio n. 13
0
import cv2

print('cv2 version is ' + str(cv2.getVersionString()))


def capture_config(camera_port=0):
    frame_height = 480
    frame_width = 640

    cap = cv2.VideoCapture(camera_port)
    cap.set(3, frame_width)
    cap.set(4, frame_height)
    if not cap.isOpened():
        print('Unable to read camera feed')
        return False
    return cap


cap = capture_config()
while cap:
    ret, frame = cap.read()

    cv2.imshow('captured frame', frame)

    if cv2.waitKey(0) & 0xff == ord('q'):
        cap.release()
        cv2.destroyAllWindows()
        break
Esempio n. 14
0
                    offset) >= x >= (lastDimension[0] - offset) and (
                        lastDimension[1] + offset) >= y >= (lastDimension[1] -
                                                            offset):
                return True
        return False

    def rotate_clockwise(self, matrix):
        for x in range(3):
            temp = matrix[x][0]
            matrix[x][0] = matrix[x][2]
            matrix[x][2] = temp
        return matrix


if __name__ == '__main__':
    detect_board = DetectBoard()
    cv2_version = cv2.getVersionString()
    if str.find(cv2_version, "3.4.") != -1:
        print("Using opencv version ", cv2_version)

    connection_url = os.getenv('PEPPER_IP') + ":9559"
    app = qi.Application(["--qi-url=" + connection_url])
    app.start()
    session = app.session

    # local testing pipeline
    # board = detect_board.get_picture_board()
    # detect board on pepper
    board = detect_board.get_board(session)
    print(board)