示例#1
0
def LaminaTransCorImage(image, color_space, flagprint):

    if color_space == 'gray':
        image = cv.Color(image, cv.COLOR_RGB2GRAY)

    elif color_space == 'ycrb' or color_space == 'ycc':
        image = cv.cvtColor(image, cv.COLOR_BGR2YCrCb)

    elif color_space == 'lab':
        image = cv.cvtColor(image, cv.COLOR_BGR2LAB)

    elif color_space == 'xyz':
        image = cv.cvtColor(image, cv.COLOR_BGR2XYZ)

    elif color_space == 'hls':
        image = cv.cvtColor(image, cv.COLOR_BGR2HLS)

    else:
        image = cv.cvtColor(image, cv.COLOR_RGB2RGBA)

    if (flagprint == 1):
        plt.figure(figsize=(20, 10))
        plt.imshow(cv.cvtColor(image, cv.COLOR_BGR2RGB))

    channel = '012'
    if channel != 'all':
        channels = cv.split(image)
        channel_indices = []
        for char in channel:
            channel_indices.append(int(char))
        image = image[:, :, channel_indices]
        if len(image.shape) == 2:
            image.reshape(img.shape[0], img.shape[1], 1)

    return image
示例#2
0
def abs_sobel_thresh(img, orient='x', thresh_min=0, thresh_max=255):

    # Apply the following steps to img
    # 1) Convert to grayscale
    gray = cv2.Color(img, cv2.COLOR_RGB2GRAY)
    # 2) Take the derivative in x or y given orient = 'x' or 'y'
    if orient == 'x':
        sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
    elif orient == 'y':
        sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
    # 3) Take the absolute value of the derivative or gradient
    abs_sobel = np.absolute(sobel)
    # 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8
    scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
    # 5) Create a mask of 1's where the scaled gradient magnitude
            # is > thresh_min and < thresh_max
    sbinary = np.zeros_like(scaled_sobel)
    sbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
    # 6) Return this mask as your binary_output image
    return sbinary
import cv2
# face classifier
face_detector = cv2.CascadeClassifier(
    'file:///C:/Users/Aditya/Desktop/python/MachineLearning/extras/haarcascade_frontalface_default.xml'
)
smile_detector = cv2.CascadeClassifier(
    'file:///C:/Users/Aditya/Desktop/python/MachineLearning/extras/haarcascade_smile.xml'
)
#grab webcam feed
webcam = cv2.VideoCapture(0)
while True:
    #grab webcam feed
    sucessfull_frame_read, frame = webcam.read()  #SINGLE frame read each time

    #changing to grayscale
    frame_gray = cv2.Color(frame, cv2.COLOR_BGR2GRAY)

    #detect faces
    faces = face_detector.detectMultiScale(frame_gray)
    smiles = smile_detector.detectMultiScale(
        frame_gray, scaleFactor=1.7, minNeighbour=20
    )  #scalefactor is a method of optimixation it will blurr the data (face) and hence canbe accurate
    #minneighbour will set the amount of neighbour of the rectangle

    #just array of point
    print(faces)  #print coordinate of faces list
    #for (x,y,w,h) in faces(for face detection)
    for (x, y, w, h) in smiles:  #xandy
        cv2.rectangle(frame, (x, y), (x + w, y + h), (100, 200, 123), 2)

    cv2.imshow("why so serious", frame)  #display the window
     contour=max(cnts,key=cv2.contourArea)
     if cv2.contourArea(contour)>250:
         ((x,y),radius)=cv2.minEnclosingCircle(contour)
         cv2.circle(img,(int(x),int(y)),int(radius),(0,255,255),2)
         cv2.circle(img,center,5,(0,0,255),-1)
         M=cv2.moments(contour)
         center=(int(M['m10']/M['m00']),int(M['m01']/M['m00']))
         pts.appendleft(center)
         for i in range (1,len(pts)):
             if pts[i-1] is None or pts[i] is None:
                 continue
             cv2.line(blackboard,pts[i-1],pts[i],(255,255,255),10)
             cv2.line(img,pts[i-1],pts[i],(0,0,255),5)
 elif len(cnts)==0:
     if len(pts)!=[]:
         blackboard_gray=cv2.Color(blackboard,cv2.COLOUR_BGR2GRAY)
         blur1=cv2.medianBlur(blackboard_gray,15)
         blur1=cv2.GaussianBlur(blur1,(5,5),0)
         thresh1=cv2.threshold(blur1,0,255,cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
         blackboard_cnts=cv2.findContours(thresh1.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)[1]
         if len(blackboard_cnts)>=1:
             cnt=max(blackboard_cnts,key=cv2.contourArea)
             print(cv2.contourArea(cnt))
             if cv2.contourArea(cnt)>2000:
                 x,y,w,h=cv2.boundingRect(cnt)
                 digit=blackboard_gray[y:y+h,x:x+w]
                 #newImage=process_letter(digit)
                 pred_probab,pred_class=keras_predict(model1,digit)
                 print(pred_class,pred_probab)
     pts=deque(maxlen=512)
     blackboard=np.zeros((480,640,3),dtype=np.uint8)
import cv2
import numpy as np

cap = cv2.VideoCapture(0)

while (1):
    frame = cap.read()

    hsv = cv2.Color(frame, cv2.COLOR_BGR2HSV)

    lower_blue = np.array([0, 50, 50])
    upper_blue = np.array([10, 255, 255])

    mask = cv2.inRange(hsv, lower_blue, upper_blue)

    res = cv2.bitwise_and(frame, frame, mask=mask)

    cv2.imshow("res", res)
    k = cv2.waitKey(5)
    if k == 27:
        break

cap.release()
cv2.destroyAllWindows()
def convert_to_rgb(img):
    return cv2.Color(img, cv2.COLOR_BGR2RGB)
        print "Chosen Autonomous:", message.payload


def on_connect(client, userdata, flags, rc):
    print "connected with result code", rc
    client.subscribe("Vision/Frame")


def on_disconnect(client, userdata, rc):
    print "disconnected with code", rc


mqttclient = client.Client()

mqttclient.on_message = on_message
mqttclient.on_connect = on_connect
mqttclient.on_disconnect = on_disconnect

while True:
    data, addr = sock.recvfrom(1024000)
    #print "recieved frame"
    data2 = numpy.fromstring(data, dtype='uint8')
    decimg = cv2.imdecode(data2, 1)
    cv2.rectangle(decimg, cv2.Point(decimg.rows / 3.0, decimg.cols / 3.0),
                  cv2.Point(decimg.rows * 2.0 / 3.0, decimg.cols * 2.0 / 3.0),
                  cv2.Color(255, 0, 0), 3)
    cv2.imshow('frame', decimg)
    cv2.waitKey(1)
    #print "finished frame"
    mqttclient.loop()