Esempio n. 1
0
def find_car_num_brod():
    watch_cascade = cv2.CascadeClassifier('./cascade.xml')
    image = cv2.imread('./car_image/su.jpg')
    cv2.imshow('image', image)
    print('111111111111')
    resize_h = 1000
    height = image.shape[0]
    scale = image.shape[1] / float(height)
    image = cv2.resize(image, (int(scale * resize_h), resize_h))
    image_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    watches = watch_cascade.detectMultiScale(image_gray, 1.2, minNeighbors=4, minSize=(36, 9), maxSize=(106 * 40, 59 * 40))

    print('检测到车牌数', len(watches))
    if len(watches) == 0:
        return False
    for (x, y, w, h) in watches:
        print(x, y, w, h)
        # 不太明白
        cv2.rectangle(image, (x - h, y), (x + w, y + h), (0, 0, 255), 1)
        cut_img = image[y + 5 : y - 5 + h, x + 8 : x - 8 + w]
        cut_gray = cv2.cvtColor(cut_img, cv2.COLOR_RGB2GRAY)
        cv2.imShow('rectangle', cut_gray)
        cv2.waitKey(0)

        cv2.imwrite('./num_for_car.jpg', cut_gray)
        im = Image.open('./num_for_car.jpg')
        size = 720, 180
        mmm = im.resize(size, Image.ANTIALIAS)
        mmm.save('./num_for_car.jpg', 'JPEG', quality=95)
        #break
    return True
Esempio n. 2
0
	def display(self, name="Output", image = self.img):

		""" Display Image """
		
		cv2.imShow(name, image)

		#Temporary part - waitkey:
		if cv2.waitKey(0):
			cv2.destroyAllWindows()
 def capture_training_images(self): # train dataset with faces got through the camera
  video_capture = cv2.VideoCapture(0) # set camera
  while True:
   self.count_timer += 1
   ret, frame = video_capture.read() # read frame
   inImg = np.array(frame) # transform frame to array
   outImg = self.process_image(inImg)
   cv2.imShow('Video', outImg)
   
   # release the capture on pressing 'q'
   if cv2.waitKey(1) & 0xFF == ord('q'):
    video_capture.release() # free the capture
    cv2.destroyAllWindows() # delete memory and frames
    return
Esempio n. 4
0
    def display_hough_results(self):
        """Given an image and an array lines (resulting from a call to cv2.HoughLines),
        add the lines to the image for visualization purposes.
        """
        color_image = self.ImageScore.get_image()
        for rho, theta in self.lines[0]:
            a = np.cos(theta)
            b = np.sin(theta)
            x0 = a * rho
            y0 = b * rho
            x1 = int(x0 + self.img_width * (-b))
            y1 = int(y0 + self.img_width * (a))
            x2 = int(x0 - self.img_width * (-b))
            y2 = int(y0 - self.img_width * (a))
            cv2.line(self.image, (x1, y1), (x2, y2), (0, 0, 255), 2)

        cv2.imShow("Hough transform lines", color_image)
import numpy
import cv2 as cv

face = cv.CascadeClassifier('haarcascade_frontalface_default.xml')
img = cv.imread('bppt2.jpg')
img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)

detection_face = face.detectMultiScale(img_gray, 1.1, 5)
font = cv.FONT_HERSHEY_SIMPLEX
total = 0

for (x, y, w, h) in detection_face:
    total += 1
    cv.putText(img, "Face", (x, y - 10), font, 0.75, (0, 0, 255), 2,
               cv.LINE_AA)
    cv.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
    roi_gray = img_gray[y:y + h, x:x + w]
    roi_color = img[y:y + h, x:x + w]

cv.putText(img, "Total face is : " + str(total) + " face", (10, 30), font, 1,
           (0, 0, 0), 2, cv.LINE_AA)
cv.imShow('img', img)
cv.waitKey(1)
cv.destroyAllWindows()
Esempio n. 6
0
def processFrame(frame, thresh):
    cv2.imShow('image', thresh)
    (x, y) = findFingerXY(frame)
    if x > 0:
        cv2.circle(thresh, (x, y), 4, (0, 127, 255), -1)
        cv2.imShow('image', thresh)
Esempio n. 7
0
import cv2

cv2.CascadeClassifier("1.2 haarcascade_frontalface_default.xml")
ipm_img = cv2.VideoCapture("elon.jpg")

res, img = ipm_img.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

faces = detect.detectMultiscale(gray, 1.3, 5)

cv2.imShow("Elon Image", img)
Esempio n. 8
0
def cut_car_num_for_chart():
    # 1.读取图像,并把图像转换为灰度图像并显示
    img = cv2.imread('./num_for_car.jpg')
    img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    cv2.imShow('gray', img_gray)

    # 2.将灰度图像二值化,设定阈值为100,转化后白底黑字--->目标黑底白字
    img_thre = img_gray
    # 灰点 白点 加粗
    # cv2.threshold(img_gray, 130, 255, cv2.THRESH_BINARY_INV, img_thre)
    # 二值化处理 自适应阈值 效果不理想
    # th3 = cv2.adaptiveThreshold(img_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)

    # 高斯除燥 二值化处理
    blur = cv2.GaussianBlur(img_gray, (5, 5), 0)
    ret3, th3 = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

    cv2.imshow('threshold', th3)
    cv2.imwrite()

    # src = cv2.imread("D:\PyCharm\Test213\py_car_num_tensor\wb_img.jpg")
    # height, width, channels = src.shape
    # print("width:%s,height:%s,channels:%s" % (width, height, channels))
    # for row in range(height):
    #     for list in range(width):
    #         for c in range(channels):
    #             pv = src[row, list, c]
    #             src[row, list, c] = 255 - pv
    # cv2.imshow("AfterDeal", src)
    # cv2.waitKey(0)
    #
    # # 3、保存黑白图片
    # cv2.imwrite('D:\PyCharm\Test213\py_car_num_tensor\wb_img.jpg', src)
    # img = cv2.imread("D:\PyCharm\Test213\py_car_num_tensor\wb_img.jpg")  # 读取图片
    # src_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # 转换了灰度化
    # src_img = src_gray

    # 4.分割字符
    # 记录每一列的白色,黑色像素综合
    white = []
    black = []
    height = th3.shape[0]
    width = th3.shape[1]
    white_max = 0
    black_max = 0
    # 计算每一列的黑白色像素总和
    for i in range(width):
        s = 0
        t = 0
        for j in range(height):
            if th3[j][i] == 255:
                s += 1
            if th3[j][i] == 0:
                t += 1;
        white_max = max(white_max, s)
        black_max = max(black_max, t)
        white.append(s)
        black.append(t)
        print('blackmax ---> ' + str(black_max) + '---whitemax ---> ' + str(white_max))
        # False 代表白底黑字;True代表黑底白字
        arg = False
        if black_max > white_max:
            arg = True

        n = 1
        start = 1
        end = 2
        temp = 1
        while n < width - 2:
            n += 1
            if (white[n] if arg else black[n]) > (0.05 * white_max if arg else 0.05 * black_max):
                # 上面这些判断用来辨别是白底黑字还是黑底白字
                # 0.05这个参数请多调整,对应上面的0.95
                start = n
                end = find_end(start, white, black, arg, white_max, black_max, width)
                n = end
                # 车牌框检测分割 二值化处理后 可以看到明显的左右边框  毕竟用的是网络开放资源 所以车牌框定位角度真的不准,
                # 所以我在这里截取单个字符时做处理,就当亡羊补牢吧
                # 思路就是从左开始检测匹配字符,若宽度(end - start)小与20则认为是左侧白条 pass掉  继续向右识别,否则说明是
                # 省份简称,剪切,压缩 保存,还有一个当后五位有数字 1 时,他的宽度也是很窄的,所以就直接认为是数字 1 不需要再
                # 做预测了(不然很窄的 1 截切  压缩后宽度是被拉伸的),
                # shutil.copy()函数是当检测到这个所谓的 1 时,从样本库中拷贝一张 1 的图片给当前temp下标下的字符
                # 车牌左边白条移除
                if end - start > 5:
                    print('end - start' + str(end - start))
                    if temp == 1 and end - start < 20:
                        pass
                    elif temp > 3 and end - start < 20:
                        # 认为这个字符是数字1 copy 一个 32*40的 1 作为temp.bmp
                        shutil.copy(
                            # 111.bmp 是一张 1 的样本图片
                            os.path.join('./tf_car_license_dataset/train_images/training-set/1/', '111.bmp'),
                            os.path.join('./img_cut/', str(temp) + '.bmp')
                        )
                        pass
                    else:
                        cj = th3[1:height, start:end]
                        cv2.imwrite('./img_cut_not_3240/' + str(temp) + '.jpg', cj)
                        im = Image.open('./img_cut_not_3240/' + str(temp) + '.jpg')
                        size = 32, 40
                        mmm = im.resize(size, Image.ANTIALIAS)
                        mmm.save('./img_cut/' + str(temp) + '.bmp', quality=95)
                        cv2.imshow('裁剪后:', mmm)
                        # cv2.imwrite('./py_car_num_tensor/img_cut/' + str(temp) + '.bmp', cj)
                        temp = temp + 1
Esempio n. 9
0
    cv2.IMREAD_REDUCED_COLOR_4:如果设置,总是将图像转换为3通道BGR彩色图像,图像尺寸减小1/4。
    cv2.IMREAD_REDUCED_GRAYSCALE_8:如果设置,总是将图像转换为单通道灰度图像,图像尺寸减小1/8。 
    cv2.IMREAD_REDUCED_COLOR_8:如果设置,总是将图像转换为3通道BGR彩色图像,图像尺寸减小1/8

    常用的是前三种。因为flags是整型,所以传入数值也行:
    flags >0:等同于IMREAD_COLOR。
    flags =0:等同于 IMREAD_GRAYSCALE。
    flags <0: 等同于IMREAD_UNCHANGED。
    通常是给1、0、-1,给其他整型也是可以的。

'''

# img = cv2.imread('../images/cv2_img2.jpg',cv2.IMREAD_GRAYSCALE)
# print(img.shape)
# print(img.size)
'''
cv2.imShow():
    函数可以在窗口中显示图像。该窗口和图像的原始大小自适应
cv2.waitKey():
    cv2.waitKey() 是一个键盘绑定函数。需要指出的是它的时间尺度是毫秒级。
    函数等待特定的几毫秒,看是否有键盘输入。特定的几毫秒之内,如果
    按下任意键,这个函数会返回按键的 ASCII 码值,程序将会继续运行。如果没
    有键盘输入,返回值为 -1,如果我们设置这个函数的参数为 0,那它将会无限
    期的等待键盘输入。我们也可以将其设置为一个特定的键。
cv2.destroyALLWindows():
    销毁我们创建的所有窗口。
    如果要销毁任何特定窗口,请使用函数cv2.destroyWindow(),其中传递确切的窗口名称作为参数。
    (应该是使用创建窗口时所使用的窗口名称,字符串类型。)
注:
我们还可以使用另一种方法来加载图片:先创建一个窗口,之后在需要的时候将图像加载到该窗口。
说明:在这种情况下,用cv2.namedWindow()函数可以指定窗口是否可以调整大小。
 for f in faces :
     (x, y, w, z) = [v*size for v in f]
     # co-ordinates hai x,y,w,z :: otline around face
     # v*size : kiye :: to resize the 1/4 shape , back into orginal shape
     
     cv2.rectangle(im, (x,y), (x+w, y+h), (0,255,0) ,4 )
     # draw rectanle around the face
     
     sub_face = im[y:y+h,  x:x+w]
     # ek frame ko save krne ke liye (out of all live record frames)
     
     FaceFileName = "test.jpg"
     # saving current image from the webcam
     
     cv2.imwrite(FaceFileName, sub_face)
     # Facefile image ko ,
     # sub_face folder mein dave krna
     
     text = label_image.main(FaceFileName)
     # getting result from label_image file :: classification results
     text = text.title()
     font = cv2.FONT_HERSHAY_TRIPLEX
     cv2.putText(im, text, (x+w, y), font, 1, (0,0,255), 2 )
     
 cv2.imShow('Capture', im)
 key = cv2.waitkey(10)
 
 if key==27:
     break # if esc key pressed, break out.
 
 
Esempio n. 11
0
#!/usr/local/bin/python3

import cv2
import numpy as np
import math
import time
import boto3
import os
import PIL
import glob
import subprocess
from IPython import embed
import sys
from pprint import pprint
import botocore
from shutil import copyfile


img = cv2.imread('/Desktop/CAPSTONE_R/chess-irs/pictures/processed_states/2019-03-25-01:34:12.041180:raw_state.jpg')


fgbg = cv2.createBackgroundSubtractorMOG(128,cv2.THRESH_BINARY,1)
masked_image = fgbg.apply(img)
masked_image[masked_image==127]=0
cv2.imShow(masked_image)




    locations = face_recognition.face_locations(image, model=MODEL)
    encodings = face_recognition.face_encodings(image, locations)
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

    for faceEncoding, faceLocation in zip(encodings, locations):
        results = face_recognition.compare_faces(labeledImages, faceEncoding,
                                                 TOLERANCE)
        match = None
        print(f"Face locations: {faceLocation}")
        if True in results:
            match = labels[results.index(True)]
            print(f"Match found: {match}")

            #Draw rectangle around face
            topLeft = (faceLocation[3], face_recognition[0])
            bottomRight = (faceLocation[1], face_recognition[2])
            color = [0, 255, 0]
            cv2.rectangle(image, topLeft, bottomRight, color, FRAME_THICKNESS)

            #Draw label
            topLeft = (faceLocation[3], face_recognition[2])
            bottomRight = (faceLocation[1], face_recognition[2] + 22)
            cv2.rectangle(image, topLeft, bottomRight, color, cv2.FILLED)
            cv2.putText(image, match,
                        (faceLocation[3] + 10, face_recognition[2] + 15),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (200, 200, 200),
                        FONT_THICKNESS)

    cv2.imShow(filename, image)
    cv2.waitKey(0)
    cv2.destroyWindow(filename)
Esempio n. 13
0
                # we could write the ip in a text file and update accordingly...
                update_rover_moves(speed, direction)
                

            # show image on screen
            cv2.imshow("datass", img)

        else:
            print("Lost Connection")
            print("Trying to reconnect...")
            time.sleep(2)
            result,img = cap.read()
            if result:
                print("Connection success")
                time.sleep(1)
                cv2.imShow(img)
                waitKey(2000)
            break

    else:


    # force break python code
    if cv2.waitKey(1) & 0xFF == ord('q'):
        update_rover_moves("stop", "straight")
        break

print("ADIOS")

cap.release()
cv2.destroyAllWindows()