示例#1
0
	def update(self, dt):
		ret, frame = self.capture.read()
		if ret:
			gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
			faceCascade = cv2.CascadeClassifier("lbpcascade_frontalface.xml")
			faces = faceCascade.detectMultiScale(
				gray,
				scaleFactor=1.1,
				minNeighbors=5,
				minSize=(30, 30),
				flags = cv2.CASCADE_SCALE_IMAGE
			)

			for (x, y, w, h) in faces:
				cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)

			buf = cv2.flip(frame, 0).tostring()
			image_texture = Texture.create(
				size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')
			image_texture.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')
			cv2.imshow('Frame', frame)

			cv2.waitkey(1)
			# display image from the texture
			self.texture = image_texture
示例#2
0
def main():
    print("Changes workingular madam.")
    path = "temp path 2 image"
    ap = argparse.ArgumentParser()
    ap.add_argument("-i", "--image", help=path)
    args = vars(ap.parse_args())

    #loading the image
    image = cv2.imread(args["image"])

    ##defining list of boundaries
    #the arrays read backwards and say that RED is from RGB 100, 200 in the first
    #array

    #of course, we can adjust these boundaries as we like
    boundaries = [([17, 15, 100], [50, 56, 200]), ([86, 31, 4], [220, 88, 50]),
                  ([25, 146, 190], [62, 174, 250]),
                  ([103, 86, 65], [145, 133, 128])]

    for (lower, upper) in boundaries:
        #creating np arrays from the boundaries
        lower = np.array(lower, dtype="uint8")
        upper = np.array(upper, dtype="uint8")

        #finds the colors within the specified boundaries and aplies the mask
        mask = cv2.inRange(image, lower, upper)
        output = cv2.bitwise_and(image, image, mask=mask)

        #show the images
        cv2.imshow("images", np.hstack([image, output]))
        cv2.waitkey(0)
def colorDetection(imgPath):

    # Define color limits to identify with adjustable track bar
    cv2.namedWindow("TrackBars")
    cv2.resizeWindow("TrackBars", 640, 240)
    cv2.createTrackbar("Hue Min", "TrackBars", 0, 179, empty)
    cv2.createTrackbar("Hue Max", "TrackBars", 179, 179, empty)
    cv2.createTrackbar("Sat Min", "TrackBars", 0, 255, empty)
    cv2.createTrackbar("Sat Max", "TrackBars", 255, 255, empty)
    cv2.createTrackbar("Val Min", "TrackBars", 0, 255, empty)
    cv2.createTrackbar("Val Max", "TrackBars", 255, 255, empty)

    while True:
        img = cv2.imread(imgPath)
        imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

        hue_min = cv2.getTrackbarPos("Hue Min","TrackBars")
        hue_max = cv2.getTrackbarPos("Hue Max", "TrackBars")
        sat_min = cv2.getTrackbarPos("Sat Min", "TrackBars")
        sat_max = cv2.getTrackbarPos("Sat Max", "TrackBars")
        val_min = cv2.getTrackbarPos("Val Min", "TrackBars")
        val_max = cv2.getTrackbarPos("Val Max", "TrackBars")

        #Color
        lower = np.array([hue_min,sat_min,val_min])
        upper = np.array([hue_max,sat_max,val_max])
        mask = cv2.inRange(imgHSV,lower,upper) #color that you want as white and everything else as white
        imgResults = cv2.bitwise_and(img,img,mask=mask)

        cv2.imshow("Orig Output", img)
        cv2.imshow("HSV Output", imgHSV)
        cv2.imshow("Mask Output", mask)
        cv2.imshow("Results", imgResults)
        cv2.waitkey(1)
示例#4
0
def main():

    path = "/home/silp150/shreyashi/100307/unprocessed/3T/tfMRI_EMOTION_RL/"
    '''os.chdir(path)
        img= cv2.imread('image3.png',1)'''

    img = cv2.imread(os.path.join(path, 'image.png'), 1)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    edges = cv2.Laplacian(img,
                          -1,
                          ksize=31,
                          scale=1,
                          delta=0,
                          borderType=cv2.BORDER_DEFAULT)
    output = [img, edges]
    titles = ['original', ' Edges']

    for i in range(2):
        plt.subplot(1, 2, i + 1)
        plt.imshow(output[i], cmap='gray')
        plt.title(titles[i])
        plt.xticks([])
        plt.yticks([])

    plt.show()
    pdb.set_trace()
    cv2.waitkey(0)
示例#5
0
def global_threshold():
	imgfile = 'document.jpg'
	img = cv2.imread(imgfile,cv2.IMREAD_GRAYSCALE)

	r = 600.0 / img.shape[0]
	dim = (int(img.shape[1]*r),600)
	img = cv2.resize(img,dim,interpolation = cv2.INTER_AREA)

	WindowName = "Window"
	TrackbarName "Threshhold"

	cv2.namedWindow(WindowName)
	cv2.createTrackbar(TrackbarName,WindowName,50,255,nothing)

	Threshold = np.zeros(img.shape.np.uint8)

	while True:
		TrackbarPos = cv2.getTrackbarPos(TrackbarName,WindowName)
		cv2.threshold(img,TrackbarPos, 255, cv2.THRESH_BINARY , Threshold)
		cv2.imshow(WindowName , Threshold)

		k = cv2.waitKey(0)
		if k == 27:
			cv2.destroyAllWindows()
			cv2.waitkey(1)
			break
	return
示例#6
0
def webcam():

    face_cascade = cv2.CascadeClassifier(
        'C:\\Users\\Aditya chauhan\\AppData\\Local\\Programs\\Python\\Python37\\Lib\\site-packages\\cv2\\data\\haarcascade_frontalface_default.xml'
    )
    eye_cascade = cv2.CascadeClassifier(
        'C:\\Users\\Aditya chauhan\\AppData\\Local\\Programs\\Python\\Python37\\Lib\\site-packages\\cv2\\data\\haarcascade_eye.xml'
    )
    cap = cv2.VideoCapture(0)
    while 1:
        ret, img = cap.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray, 1.3, 5)
        for (x, y, w, h) in faces:
            cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 0), 2)
            roi_gray = gray[y:y + h, x:x + w]
            roi_color = img[y:y + h, x:x + w]
            eyes = eye_cascade.detectMultiScale(roi_gray)
            for (ex, ey, ew, eh) in eyes:
                cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh),
                              (0, 127, 255), 2)

        cv2.imshow('img', img)
        k = cv2.waitKey(30) & 0xff
        if k == 27:
            break

    cap.release()
    cv2.waitkey(0)
    cv2.destroyAllWindows()
示例#7
0
def createBlueImg():
    width = 512
    height = 512
    numChannels = 3
    img = np.zeros((width, height, numChannels), np.uint8)
    img[:] = 255, 0, 0  #blue
    cv2.imshow("Output", img)
    cv2.waitkey(0)
示例#8
0
def createRedCircle():
    width = 512
    height = 512
    numChannels = 3
    img = np.zeros((width, height, numChannels), np.uint8)
    cv2.circle(img, (256, 256), 50, (0, 0, 255), 2)
    cv2.imshow("Output", img)
    cv2.waitkey(0)
示例#9
0
 def show_result(self, color=(255, 0, 0)):
     ''' draws rectangle arround the detected faces and show result image '''
     # Parameters:
     # (1) color: the rectangle border-color
     for (x, y, w, h) in self.faces:
         cv2.rectangle(self.image, (x, y), (x + w, y + h), color, 2)
     cv2.imshow("detected", self.image)
     cv2.waitkey(0)
示例#10
0
def main():
    imgpath = "/home/sunil/opencv/imgdataset/lena_color_256.tif"
    imgoutput = "/home/sunil/opencv/out.jpg"
    img = cv2.imread(imgpath,0)
    cv2.imwrite(imgoutput,img)
    cv2.imshow("Img",img)
    cv2.waitkey(0)
    cv2.destroyAllWindows()
示例#11
0
def createRedRectange():
    width = 512
    height = 512
    numChannels = 3
    img = np.zeros((width, height, numChannels), np.uint8)
    cv2.line(img, (0, 0), (250, 350), (0, 0, 255),
             cv2.FILLED)  #From 0,0 to 250,350 w/ filling
    cv2.imshow("Output", img)
    cv2.waitkey(0)
示例#12
0
def createGreenLine():
    width = 512
    height = 512
    numChannels = 3
    img = np.zeros((width, height, numChannels), np.uint8)
    cv2.line(img, (0, 0), (img.shape[1], img.shape[0]), (0, 255, 0),
             2)  #From 0,0 to 512,512 w/ thickness=2
    cv2.imshow("Output", img)
    cv2.waitkey(0)
示例#13
0
def wave_process(input):
    if input.shape[2] == 3:
        ciffes = pywt.dwtn(input, 'haar')
    else:
        ciffes = pywt.dwt2(input, 'haar')
    for i in range(len(ciffes)):
        cv2.imshow(ciffes[i][:, :, 0])
        cv2.waitkey(0)
    return len(ciffes)
def cropImg(imgPath, newWidth, newHeight):
    '''unit of measure: pixel'''
    img = cv2.imread(imgPath)
    height, width, numChannels = img.shape

    imgCropped = img[0:newWidth, newWidth:newHeight]

    cv2.imshow("Cropped Output", imgCropped)
    cv2.waitkey(0)
def smartCombineImages(imgPath1,imgPath2):

    img = cv2.imread(imgPath1)
    img2 = cv2.imread(imgPath2)

    scale = 0.5
    imgStack = stackImages(scale,([img,img2],[img2,img])) #must have same number of cols and rows

    cv2.imshow("Vert Output", imgStack)
    cv2.waitkey(0)
def dialateImg(imgPath):
    # Increase edge thickness
    img = cv2.imread(imgPath)
    imgCanny = cv2.Canny(img, 100, 100)

    kernal = np.ones((5, 5), np.uint8)
    imgDialation = cv2.dilate(imgCanny,kernal,iterations=1)

    cv2.imshow("Dilated Output", imgDialation)
    cv2.waitkey(0)
示例#17
0
def addText2Img():
    width = 512
    height = 512
    numChannels = 3
    img = np.zeros((width, height, numChannels), np.uint8)

    cv2.putText(img, "HELLO WORLD", (300, 100), cv2.FONT_HERSHEY_COMPLEX, 1,
                (0, 255, 0), 3)  #scale = 1
    cv2.imshow("Output", img)
    cv2.waitkey(0)
示例#18
0
def mplot(img, img2=none):

    cv2.namedwindow('img', cv2.window_normal)
    cv2.movewindow('img', 600, 300)
    cv2.imshow('img', img)
    if img2 is not none:
        cv2.namedwindow('img2', cv2.window_normal)
        cv2.movewindow('img', 600, 600)
        cv2.imshow('img2', img2)
    cv2.waitkey(0)
    cv2.destroyallwindows()
示例#19
0
def getphoto(cap):
    ret, frame = cap.read()
    tmp = 0
    while (not ret):
        ret, frame = cap.read()
        tmp += 1
        cv2.waitkey(10)
        if (tmp > 100):
            print('Can\'t get photo')
            return
    return frame
def erodeImg(imgPath):
    # Decrease edge thickness
    img = cv2.imread(imgPath)
    imgCanny = cv2.Canny(img, 100, 100)

    kernal = np.ones((5, 5), np.uint8)
    imgDialation = cv2.dilate(imgCanny, kernal, iterations=1)
    imgEroded = cv2.erode(imgDialation,kernal,iterations=1)

    cv2.imshow("Eroded Output", imgEroded)
    cv2.waitkey(0)
def resizeImg(imgPath, newWidth, newHeight):
    '''unit of measure: pixel'''
    img = cv2.imread(imgPath)
    height, width, numChannels = img.shape

    imgResize = cv2.resize(img, (newWidth, newHeight))

    cv2.imshow("Resized Output", imgResize)
    print("Change in height is:", height - newHeight, ". Change in width is:",
          width - newWidth)
    cv2.waitkey(0)
示例#22
0
def image_verification(image):

    # read in the image
    image = cv.imread('filename.jpg')

    # applying Gaussian blur to the image to remove noise
    gray_image = cv.cvtColor(image, cv.COLOR_RGB2GRAY)
    blur = cv.GaussianBlur(gray_image, (5, 5), 0)

    # display the image that has had noise removed
    cv.imshow('result', blur)
    cv.waitkey(0)
def feature12(): 
   
    image = load_image(filename='image.jpg')
    plot_image(image)
    layer_tensor = model.layer_tensors[12]
    img_result = recursive_optimize(layer_tensor=layer_tensor, image=image,          # only maximizing 6th layer feauters
                    num_iterations=10, step_size=3.0, rescale_factor=0.7,
                    num_repeats=4, blend=0.2)
    save_image(img_result, filename='transformed.png')
    image = cv2.imread("transformed.png",1)
    cv2.imshow("Wavy",image)
    cv2.waitkey(0)        
def warpProspectiveImg(imgPath):
    #Takes in a set of cards from a slanted angle and returns one of the cards top down

    img = cv2.imread(imgPath)

    #Define all points of imgase slection
    width, height = 250,350
    pts1 = np.float32([[111,219],[287,188],[154,482],[352,440]])
    pts2 = np.float32([[0,0],[width,0],[0,height],[width,height]])

    matrix = cv2.getPerspectiveTransform(pts1,pts2)
    imgOut = cv2.warpPerspective(img,matrix,(width,height))
    cv2.imshow("Warped Output", imgOut)
    cv2.waitkey(0)
示例#25
0
def getImages (path):
    ImagePaths = [os.path.join(path,f) for i in os.listdir(path)]
    FaceList = []
    IDs =[]
    for ImagePath in ImagePaths: #Imports images from dataset and saves them into a numpy array and then into a list... IDs are stored in anpther list
        faceImage = faceImage.resize((110,110)) # resizes images so EIgenface recognizer can use
        faceImage = Image.open(imagePath).convert('L') # opens a single image and converts it to grayscale
        faceNP = np.array(faceImage,'unit8') #changes the image into numpy array
        ID = int(os.path.split(imagePath)[-1].split('.')[1]) #ID of the array
        IDs.append(ID) # adds ID to the list of IDS
        FaceList.append(faceNP) #adds the array to the list
        cv2.imshow('Trainingset', faceNP) #shows all the images in the list
        cv2.waitkey(1)
    return np.array(IDs), FaceListm#Ids converted into numpy array
def combineImages(imgPath1,imgPath2):

    # Stack image next to eachother
    ## Need to share number of channels
    img = cv2.imread(imgPath1)
    img2 = cv2.imread(imgPath2)
    imgHor = np.hstack((img,img2))
    cv2.imshow("Horiz Output", imgHor)

    # Stack images ontop of eachother
    imgVert = np.vstack((img, img2))
    cv2.imshow("Vert Output", imgVert)

    cv2.waitkey(0)
示例#27
0
文件: new.py 项目: KwakNW/POSCO_B3_AI
    def Detection(self, img):
        height, width, _ = img.shape

        blob = cv2.dnn.blobFromImage(img,
                                     1 / 255, (416, 416), (0, 0, 0),
                                     swapRB=True,
                                     crop=False)
        self.net.setInput(blob)

        output_layers_names = self.net.getUnconnectedOutLayeresNames()
        layerOutputs = self.net.forward(output_layers_names)

        boxes = []
        confidences = []
        class_ids = []

        for output in layerOutputs:
            for detection in output:
                scores = detection[5:]
            class_id = np.argmax(scores)
            confidence = scores[class_id]
            if confidence > 0.5:
                center_x = int(detection[0] * width)
            center_y = int(detection[1] * height)
            w = int(detection[2] * width)
            h = int(detection[3] * height)

            x = int(center_x - w / 2)
            y = int(center_y - h / 2)

            boxes.append([x, y, w, h])
            confidences.append((float(confidence)))
            class_ids.append(class_id)

        indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
        font = cv2.FONT_HERSHEY_PLAIN
        colors = np.random.uniform(0, 255, size=(len(boxes), 3))

        for i in indexes.flatten():
            x, y, w, h = boxes[i]
            label = str(self.classes[class_ids[i]])
            confidence = str(round(confidences[i], 2))
            color = colors[i]
            cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
            cv2.putText(img, label + ' ' + confidence, (x, y + 20), font, 2,
                        (255, 255, 255), 2)

        cv2.imshow('Image', img)
        cv2.waitkey(0)
示例#28
0
def test_gamma():
    img = cv2.imread('faces/青木.jpg')
    image_m = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    image_lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)

    cv2.imshow('', img)
    cv2.waitKey(1000)
    g = gamma()
    print(type(img), img.shape)
    img2 = g.enhance(np.array(img))
    img3 = np.hstack((img, img2))
    cv2.imshow('', img3)
    cv2.waitkey(1000)

    exit(0)
示例#29
0
def detectShapes(imgPath):
    img = cv2.imread(imgPath)

    #Pre process
    img_gray = cv2.cvtColor(img, cv2.COLOR_BG2BGRAY)
    img_blur = cv2.GaussianBlur(img_gray, ksize=(7, 7), sigmaX=1)
    img_canny = cv2.Canny(img_blur, threshold1=50, threshold2=50)

    detectContours(img_canny)

    cv2.imshow("Output", img)
    cv2.imshow("Canny Output", img_canny)
    cv2.imshow("Gray Output", img_gray)
    cv2.imshow("Blur Output", img_blur)
    cv2.waitkey(0)
示例#30
0
def screenRec():

    fourcc = cv2.VideoWriter_fourcc(*'XVID')

    fps = 8.0

    out = cv2.VideoWriter('output.avi', fourcc, fps, (1366, 768))

    while (True):

        img = ImageGrab.grab()

        img_np = np.array(img)

        frame = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)

        win_title = "Screen Recorder"
        cv2.imshow(win_title, frame)

        out.write(frame)

        if (cv2.waitkey(1) & 0XFF == ord('q')):
            break

    out.release()

    cv2.destroyAllWindows()
示例#31
0
def test_video():
    video = cv2.VideoCapture("../tests/test.h264")
    assert video != None
    height, width = 300, 300
    choices = ["L", "R", "L", "R"]
    state = BifurcationState(height, width, choices)
    while True:
        ret, img = video.read()
        assert ret
        assert img != None
        img = cv2.resize(img, (width, height), interpolation=cv2.INTER_CUBIC)
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        img = cv2.GaussianBlur(img, (11, 11), 0)
        img, pts = get_tracking_data(
            img, img, state, pt_count=20, skip=True, debug=True, choose_thin=False, split_detection=True
        )
        print pts[0]
        plt.imshow(img, cmap="gray", interpolation="bicubic")
        plt.xticks([]), plt.yticks([])  # to hide tick values on X and Y axis
        plt.show()
        cv2.waitkey()
示例#32
0
import numpy as np 
import cv2

cap = cv2.VideoCapture(1) #laptop webcam == 1
fourcc = cv2.cv.CV_FOURCC('M', 'J', 'P', 'G')
out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640,480))

while(cap.isOpened()):
	ret, frame = cap.read()
	if ret == True: 
		frame = cv2.flip(frame,0)
		out.write(frame)
		cv2.imshow('frame', frame)

		if cv2.waitkey(1) & 0xFF == ord('q'):
			break
	else:
		break

cap.release()
out.release()
cv2.destroyAllWindows()
示例#33
0
import cv2
import glob

critera = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

objp = np.zeros((6*7 ,3),np.float32)
objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)

objpoints = []
imgpoints = []

images = glob.glob('*.jpg')

for frame in images:
  img = cv2.imread(frame)
  gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

  ret, corners = cv2.findChessboardCorners(gray, (7,6),None)

  if ret == True:
    objpoints.append(objp)

    cv2.cornerSubPix(gray ,corners ,(11,11) ,(-1,-1) ,criteria)
    imgpoints.append(corners)

    cv2.drawChessboardCorners(img ,(7,6) ,corners2 ,ret)
    cv2.imshow('img',img)
    cv2.waitkey(500)

cv2.destroyAllWindows()
示例#34
0
import numpy as np
import cv2
img=cv2.imread('kukki.png',1)
cv2.imshow('image',img)
cv2.waitkey(0)
cv2.destroyAllWindows()
示例#35
0
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
camera=PiCamera()
camera.resolution=(320,240)
camera.framerate=32
rawCapture=PiRGBArray(camera,size=(640,480))

time.sleep(0.1)
for frame in camera.capture_continuous(rawCapture, format="bgr", use_vbideo_port=True):
	image=frame.array

	cv2.imshow("Frame", image)
	key=cv2.waitkey(1) & 0xFF

	rawCapture.truncate(0)

	if key ==ord("q"):
		break
示例#36
0
import cv2
import numpy as np

face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_dafeult.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')

cap=cv2.VideoCapture()

while True:
        ret, img = cap.read(0)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray,1.3,5)
        for (x,y,w,h) in faces:
                cv2.rectangle(img, (x,y), (x+w, y+h), (255,0,0),2)
                roi_gray = gray[y:y+h, x:x+w]
                roi_color = img[y:y+h, x:x+w]
                eyes = eye_cascade.detectMultiScale(roi_gray)
                for (ex, ey, ew, eh) in eyes:
                    cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0,255,0), 2)

        cv2.imshow('img', img)
        k = cv2.waitkey(30) & 0*ff
        if k == 27:
            break

cap.release()
cv2.destroyWindows()