if confidence > 0.5:
            # Object detected
            center_x = int(detection[0] * width)
            center_y = int(detection[1] * height)
            w = int(detection[2] * width)
            h = int(detection[3] * height)

            # Rectangle coordinates
            x = int(center_x - w / 2)
            y = int(center_y - h / 2)

            boxes.append([x, y, w, h])
            confidences.append(float(confidence))
            class_ids.append(class_id)

indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
print(indexes)
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
    if i in indexes:
        x, y, w, h = boxes[i]
        label = str(classes[class_ids[i]])
        color = colors[i]
        cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
        cv2.putText(img, label, (x, y + 30), font, 3, color, 3)
        


cv2.imshow("Image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Example #2
0
            cv2.resize(image[x_start:x_end, y_start:y_end],
                       (IMG_WIDTH, IMG_HEIGHT)))

        print("{}:{}x{}:{}".format(x_start, x_end, y_start, y_end))

# predict for every tile and append to result
num = 1
for tile in tiles:
    show = cv2.resize(tile.copy(), (640, 360))
    tile = np.expand_dims(img_to_array(tile.astype("float") / 255.0), axis=0)

    (nm, m) = model.predict(tile)[0]
    probability = max(nm, m) * 100
    has_marker = m > nm

    cv2.putText(show, "{}, {}% sure".format(has_marker, probability),
                (200, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (200, 255, 40), 2)
    cv2.imshow("Tile #{}".format(num), show)
    cv2.waitKey(2500)
    results.append([has_marker, probability])
    num += 1

# predict whole image for debugging purposes
(nm, m) = model.predict(tile)[0]
probability = max(nm, m) * 100
has_marker = m > nm

image = cv2.resize(image, (640, 480))
cv2.putText(image, "{}, {}% sure".format(has_marker, probability), (200, 25),
            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (200, 255, 40), 2)
cv2.imshow("Image", image)
cv2.waitKey(2500)
Example #3
0
cap = cv2.VideoCapture(0)
pytesseract.pytesseract.tesseract_cmd = "C://Program Files (x86)//Tesseract-OCR//tesseract.exe"

while (True):
    # Capture frame-by-frame
    ret, frame = cap.read()

    # Our operations on the frame come here
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)

    boxes = pytesseract.image_to_data(gray)
    for x, b in enumerate(boxes.splitlines()):
        if x != 0:
            b = b.split()
            print(b)
            if len(b) == 12:
                x, y, w, h = int(b[6]), int(b[7]), int(b[8]), int(b[9])
                cv2.rectangle(gray, (x, y), (w + x, h + y), (0, 0, 255), 3)
                cv2.putText(gray, b[11], (x, y), cv2.FONT_HERSHEY_COMPLEX, 1,
                            (50, 50, 255), 2)

    # Display the resulting frame
    cv2.imshow('frame', gray)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# When everything done, release the capture
cap.release()
#GHOSTH4CK3R
cv2.destroyAllWindows()
Example #4
0
    # grab the frame from the threaded video stream, resize it to
    # have a maximum width of 400 pixels, and convert it to
    # grayscale
    frame = vs.read()
    frame = imutils.resize(frame, width=1024, height=576)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    size = gray.shape

    # detect faces in the grayscale frame
    rects = detector(gray, 0)

    # check to see if a face was detected, and if so, draw the total
    # number of faces on the frame
    if len(rects) > 0:
        text = "{} face(s) found".format(len(rects))
        cv2.putText(frame, text, (10, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)

    # loop over the face detections
    for rect in rects:
        # compute the bounding box of the face and draw it on the
        # frame
        (bX, bY, bW, bH) = face_utils.rect_to_bb(rect)
        cv2.rectangle(frame, (bX, bY), (bX + bW, bY + bH), (0, 255, 0), 1)
        # determine the facial landmarks for the face region, then
        # convert the facial landmark (x, y)-coordinates to a NumPy
        # array
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)

        # extract the left and right eye coordinates, then use the
        # coordinates to compute the eye aspect ratio for both eyes
        r = int(r)


if area <= 662000:
    cv2.namedWindow('image')
else:
    cv2.namedWindow('image', cv2.WINDOW_NORMAL)

cv2.setMouseCallback('image', draw_function)

while (1):
    cv2.imshow('image', img)
    if clicked:
        recEnd = (round(width * .735), round(height * .1))
        textStart = (round(width * .05), round(height * .08))
        cv2.rectangle(img, (20, 20), recEnd, (b, g, r), -1)
        text = getColorName(
            r, g, b) + ' R=' + str(r) + ' G=' + str(g) + ' B=' + str(b)
        if r + g + b >= 600:
            cv2.putText(img, text, textStart, cv2.FONT_HERSHEY_TRIPLEX, 1,
                        (0, 0, 0), 1, cv2.LINE_AA)
        else:
            cv2.putText(img, text, textStart, cv2.FONT_HERSHEY_TRIPLEX, 1,
                        (255, 255, 255), 1, cv2.LINE_AA)
        clicked = False

# break loop when user hits esc key
    if cv2.waitKey(20) & 0xFF == 27:
        break

cv2.destroyWindow()
Example #6
0
import numpy as np
from cv2 import cv2

image = cv2.imread('lena.jpg', 1)
image = cv2.line(image, (0, 0), (255, 255), (0, 0, 255), 2)
image = cv2.arrowedLine(image, (0, 0), (255, 255), (0, 255, 255), 2)
image = cv2.rectangle(image, (25, 70), (200, 170), (0, 255, 0), 5)
image = cv2.circle(image, (447, 63), 63, (255, 0, 0), 5)
font = cv2.FONT_HERSHEY_SIMPLEX
image = cv2.putText(image, "OpenCV", (10, 500), font, 5, (255, 255, 255), 5,
                    cv2.LINE_AA)

cv2.imshow('image', image)

cv2.waitKey(0)
cv2.destroyAllWindows()
Example #7
0
import numpy as np
from cv2 import cv2

img = cv2.imread('shapes.png')
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(imgray, 240, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

for contour in contours:
    approx = cv2.approxPolyDP(contour, 0.01*cv2.arcLength(contour, True), True)
    cv2.drawContours(img, [approx], 0, (0, 0, 0), 5)
    x = approx.ravel()[0]
    y = approx.ravel()[1]
    if len(approx) == 3:
        cv2.putText(img, "Triangle", (x, y),
                    cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0))
    elif len(approx) == 4:
        x, y, w, h = cv2.boundingRect(approx)
        aspectRatio = float(w)/h
        print(aspectRatio)
        if aspectRatio >= 0.95 and aspectRatio <= 1.05:
            cv2.putText(img, "Square", (x, y),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0))
        else:
            cv2.putText(img, "Rectangle", (x, y),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0))
    elif len(approx) == 5:
        cv2.putText(img, "Pentagon", (x, y),
                    cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0))
    elif len(approx) == 10:
        cv2.putText(img, "Star", (x, y),
Example #8
0
colors[:] = c

# Start video streaming
vid = cv2.VideoCapture(0)
time.sleep(2)

while (True):
    ret, frame = vid.read()
    (boxes, predictions) = detect_predict(frame, face_detector, mask_detector)

    for (box, pred) in zip(
            boxes,
            predictions):  # loop over predictions with their respective boxes
        (startX, startY, endX, endY) = box  # box coordinates

        index = np.argmax(pred)  # index of predicted class
        confidence = pred[index] * 100
        classified = classes[index]
        color = colors[index]

        # Format label and place it on the frame above the bounding box
        label = "{}: {:.2f}%".format(classified, confidence)
        cv2.putText(frame, label, (startX, startY - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
        cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)

    cv2.imshow('frame', frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):  # press q to quit
        break

cv2.destroyAllWindows()
Example #9
0
            # cv2.circle(img, (center_x, center_y), 10, (0, 255, 0), 2)
            # circle args(center, radius, color/green, thickness)
            # rectangle coordinates
            x = int(center_x - w / 2)
            y = int(center_y - h / 2)
            # cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)

            boxes.append([x, y, w, h])
            confidences.append(float(confidence))
            class_ids.append(class_id)

font = cv2.FONT_HERSHEY_PLAIN
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
# boxes, scores, score threshold, nms threshold

num_of_Obj_detected = len(boxes)
for i in range(num_of_Obj_detected):
    if i in indexes:
        x, y, w, h = boxes[i]
        labels = str(classes[class_ids[i]])
        # print(labels)
        color = colors[i]
        cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
        cv2.putText(img, labels, (x, y + 35), font, 2, color, 2)
        # image, text, origination, fontface, fontscale, color, thickness

cv2.imshow('Image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Example #10
0
def read_plate(frame):
    path = "src/alpr/alpr_data/ocr/ocr-net.names"
    labelsPath = path
    LABELS = open(labelsPath).read().strip().split("\n")

    np.random.seed(42)
    COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8")

    weightsPath = "src/alpr/alpr_data/ocr/ocr-net.data"
    configPath = "src/alpr/alpr_data/ocr/ocr-net.cfg"
    print("Running ocr.py")
    net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)

    #This is not the path of the image
    #it is after cv2 has read the image

    image = frame
    (H, W) = image.shape[:2]

    ln = net.getLayerNames()
    ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]

    blob = cv2.dnn.blobFromImage(image,
                                 1 / 255.0, (128, 128),
                                 swapRB=True,
                                 crop=False)
    net.setInput(blob)
    layerOutputs = net.forward(ln)

    boxes = []
    confidences = []
    classIDs = []
    keylist = LABELS
    dict_boundingbox = {key: [] for key in keylist}
    dict_confidence = {key: [] for key in keylist}

    for output in layerOutputs:
        for detection in output:
            scores = detection[5:]
            classID = np.argmax(scores)
            confidence = scores[classID]
            if confidence > 0.4:
                box = detection[0:4] * np.array([W, H, W, H])
                (centerX, centerY, width, height) = box.astype("int")
                x = int(centerX - (width / 2))
                y = int(centerY - (height / 2))
                if LABELS[classID] in keylist:
                    dict_boundingbox[LABELS[classID]].append(
                        [x, y, int(width), int(height)])
                    dict_confidence[LABELS[classID]].append(float(confidence))
                confidences.append(float(confidence))
                boxes.append([x, y, int(width), int(height)])
                classIDs.append(classID)
    idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3)

    if len(idxs) > 0:
        for i in idxs.flatten():
            (x, y) = (boxes[i][0], boxes[i][1])
            (w, h) = (boxes[i][2], boxes[i][3])
            color = [int(c) for c in COLORS[classIDs[i]]]
            cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
            text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i])
            cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                        color, 2)

    print(dict_boundingbox)
    print(dict_confidence)
    cropped_images = []
    for key in keylist:
        for bbox in dict_boundingbox[key]:
            x = bbox[0]
            y = bbox[1]
            w = bbox[2]
            h = bbox[3]
            cropped_image = image[y:y + h, x:x + w]
            cropped_images.append(cropped_image)

    return cropped_images
Example #11
0
        # extract the face ROI, convert it from BGR to RGB channel
        # ordering, resize it to 224x224, and preprocess it
        face = image[startY:endY, startX:endX]
        face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
        face = cv2.resize(face, (224, 224))
        face = img_to_array(face)
        face = preprocess_input(face)
        face = np.expand_dims(face, axis=0)

        # pass the face through the model to determine if the face
        # has a mask or not
        (mask, withoutMask) = model.predict(face)[0]

        # determine the class label and color we'll use to draw
        # the bounding box and text
        label = "Mask" if mask > withoutMask else "No Mask"
        color = (0, 0, 255) if label == "Mask" else (0, 0, 255)

        # include the probability in the label
        label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)

        # display the label and bounding box rectangle on the output
        # frame
        cv2.putText(image, label, (startX, startY - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 1)
        cv2.rectangle(image, (startX, startY), (endX, endY), color, 1)

# show the output image
cv2.imshow("Output", image)
cv2.waitKey(0)
Example #12
0
    encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)

    for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
        matches = face_recognition.compare_faces(encodeListKnown, encodeFace)
        faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)
        matchIndex = np.argmin(faceDis)

        # Known Face is detected
        if faceDis[matchIndex] < 0.50:
            name = className[matchIndex].upper()
            y1, x2, y2, x1 = faceLoc
            y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4  # Resize back to original size
            cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
            cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0),
                          cv2.FILLED)
            cv2.putText(img, name, (x1 + 3, y2 - 6), cv2.FONT_HERSHEY_COMPLEX,
                        1, (255, 255, 255), 1)
            time.sleep(10)
            markAttendance(name)

        else:  # If unknown Face is detected
            name = 'Unknown'
            y1, x2, y2, x1 = faceLoc
            y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
            cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
            cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0),
                          cv2.FILLED)
            cv2.putText(img, name, (x1 + 3, y2 - 6), cv2.FONT_HERSHEY_COMPLEX,
                        1, (255, 255, 255), 1)
    cv2.imshow('face', img)
    if cv2.waitKey(1) == ord('q'):  # Exit by pressing 'q'
        break
Example #13
0
    def Fillattendances():
        sub=tx.get()
        now = time.time()  ###For calculate seconds of video
        future = now + 20
        if time.time() < future:
            if sub == '':
                err_screen1()
            else:
                recognizer = cv2.face.LBPHFaceRecognizer_create()  # cv2.createLBPHFaceRecognizer()
                try:
                    recognizer.read("TrainingImageLabel\Trainner.yml")
                except:
                    e = 'Model not found,Please train model'
                    Notifica.configure(text=e, bg="red", fg="black", width=33, font=('times', 15, 'bold'))
                    Notifica.place(x=20, y=250)

                harcascadePath = "haarcascade_frontalface_default.xml"
                faceCascade = cv2.CascadeClassifier(harcascadePath)
                df = pd.read_csv("StudentDetails\StudentDetails.csv")
                cam = cv2.VideoCapture(0)
                font = cv2.FONT_HERSHEY_SIMPLEX
                col_names = ['Enrollment', 'Name', 'Date', 'Time']
                attendance = pd.DataFrame(columns=col_names)
                while True:
                    ret, im = cam.read()
                    gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
                    faces = faceCascade.detectMultiScale(gray, 1.2, 5)
                    for (x, y, w, h) in faces:
                        global Id

                        Id, conf = recognizer.predict(gray[y:y + h, x:x + w])
                        if (conf <70):
                            print(conf)
                            global Subject
                            global aa
                            global date
                            global timeStamp
                            Subject = tx.get()
                            ts = time.time()
                            date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
                            timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
                            aa = df.loc[df['Enrollment'] == Id]['Name'].values
                            global tt
                            tt = str(Id) + "-" + aa
                            En = '15624031' + str(Id)
                            attendance.loc[len(attendance)] = [Id, aa, date, timeStamp]
                            cv2.rectangle(im, (x, y), (x + w, y + h), (0, 260, 0), 7)
                            cv2.putText(im, str(tt), (x + h, y), font, 1, (255, 255, 0,), 4)

                        else:
                            Id = 'Unknown'
                            tt = str(Id)
                            cv2.rectangle(im, (x, y), (x + w, y + h), (0, 25, 255), 7)
                            cv2.putText(im, str(tt), (x + h, y), font, 1, (0, 25, 255), 4)
                    if time.time() > future:
                        break

                    attendance = attendance.drop_duplicates(['Enrollment'], keep='first')
                    cv2.imshow('Filling attedance..', im)
                    key = cv2.waitKey(30) & 0xff
                    if key == 27:
                        break

                ts = time.time()
                date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
                timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
                Hour, Minute, Second = timeStamp.split(":")
                fileName = "Attendance/" + Subject + "_" + date + "_" + Hour + "-" + Minute + "-" + Second + ".csv"
                attendance = attendance.drop_duplicates(['Enrollment'], keep='first')
                print(attendance)
                attendance.to_csv(fileName, index=False)

                ##Create table for Attendance
                date_for_DB = datetime.datetime.fromtimestamp(ts).strftime('%Y_%m_%d')
                DB_Table_name = str( Subject + "_" + date_for_DB + "_Time_" + Hour + "_" + Minute + "_" + Second)
                import pymysql.connections

                ###Connect to the database
                try:
                    global cursor
                    connection = pymysql.connect(host='localhost', user='******', password='', db='Face_reco_fill')
                    cursor = connection.cursor()
                except Exception as e:
                    print(e)

                sql = "CREATE TABLE " + DB_Table_name + """
                (ID INT NOT NULL AUTO_INCREMENT,
                 ENROLLMENT varchar(100) NOT NULL,
                 NAME VARCHAR(50) NOT NULL,
                 DATE VARCHAR(20) NOT NULL,
                 TIME VARCHAR(20) NOT NULL,
                     PRIMARY KEY (ID)
                     );
                """
                ####Now enter attendance in Database
                insert_data =  "INSERT INTO " + DB_Table_name + " (ID,ENROLLMENT,NAME,DATE,TIME) VALUES (0, %s, %s, %s,%s)"
                VALUES = (str(Id), str(aa), str(date), str(timeStamp))
                try:
                    cursor.execute(sql)  ##for create a table
                    cursor.execute(insert_data, VALUES)##For insert data into table
                except Exception as ex:
                    print(ex)  #

                M = 'Attendance filled Successfully'
                Notifica.configure(text=M, bg="Green", fg="white", width=33, font=('times', 15, 'bold'))
                Notifica.place(x=20, y=250)

                cam.release()
                cv2.destroyAllWindows()

                import csv
                import tkinter
                root = tkinter.Tk()
                root.title("Attendance of " + Subject)
                root.configure(background='snow')
                cs = 'C:/Users/kusha/PycharmProjects/Attendace managemnt system/' + fileName
                with open(cs, newline="") as file:
                    reader = csv.reader(file)
                    r = 0

                    for col in reader:
                        c = 0
                        for row in col:
                            # i've added some styling
                            label = tkinter.Label(root, width=8, height=1, fg="black", font=('times', 15, ' bold '),
                                                  bg="lawn green", text=row, relief=tkinter.RIDGE)
                            label.grid(row=r, column=c)
                            c += 1
                        r += 1
                root.mainloop()
                print(attendance)
Example #14
0
#/usr/bin/env python

import numpy as np
from cv2 import cv2 as cv
from cv2 import cv2 as cv.cv as cv

def clock():
    return cv.getTickCount() / cv.getTickFrequency()

def draw_str(dst, (x, y), s):
    cv.putText(dst, s, (x+1, y+1), cv.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv.CV_AA)
    cv.putText(dst, s, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv.CV_AA)

def detect(img, cascade):
    #rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags = cv.CV_HAAR_SCALE_IMAGE)
    #rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(80, 80), flags = cv.CV_HAAR_SCALE_IMAGE)
    rects = cascade.detectMultiScale(img, scaleFactor=1.1, minNeighbors=3, minSize=(80, 80), flags = cv.CV_HAAR_SCALE_IMAGE)
    if len(rects) == 0:
        return []
    rects[:,2:] += rects[:,:2]
    return rects

def draw_rects(img, rects, color):
    for x1, y1, x2, y2 in rects:
        cv.rectangle(img, (x1, y1), (x2, y2), color, 2)

if __name__ == '__main__':

    #cascade_fn = "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml"
    cascade_fn = "/usr/local/share/OpenCV/lbpcascades/lbpcascade_frontalface.xml"
    #nested_fn  = "/usr/local/share/OpenCV//haarcascades/haarcascade_eye.xml"
def escreve(img, texto, cor=(255,0,0)):
    fonte = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(img, texto, (10,20), fonte, 0.5, cor, 0,cv2.LINE_AA)
Example #16
0
        for (markerCorner, markerID) in zip(corners, ids):
            corners = markerCorner.reshape((4, 2))
            (topLeft, topRight, bottomRight, bottomLeft) = corners

            topRight = (int(topRight[0]), int(topRight[1]))
            bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
            bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))
            topLeft = (int(topLeft[0]), int(topLeft[1]))

            cX = int((topLeft[0] + bottomRight[0]) / 2.0)
            cY = int((topLeft[1] + bottomRight[1]) / 2.0)

            frame = cv2.flip(frame, 1)

            if cY < LINE:
                cv2.putText(frame, "Up", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1,
                            (0, 255, 0), 3)
            else:
                cv2.putText(frame, "Down" + str(current_time), (10, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3)
                current_time += time.time()

            if current_time > TIME and markerID == 5:
                keyboard.press_and_release("enter")
                current_time = 0

    cv2.namedWindow('Frame', cv2.WINDOW_AUTOSIZE)
    #cv2.rectangle(frame, (W // 4, H // 4), ((W // 4) * 3, (H // 4) * 3), (0, 0, 255), 3)
    cv2.line(frame, (0, LINE), (W, LINE), (0, 255, 0), 2)
    cv2.imshow("Frame", frame)

    key = cv2.waitKey(1) & 0xFF
black_img = cv2.circle(black_img, (302, 256), 40, (255, 0, 0), -1)
# 右下-内圆-黑色填充
black_img = cv2.circle(black_img, (302, 256), 16, (0, 0, 0), -1)

# 用一个四角的多边形来填充覆盖
pts = np.array([[256, 176], [210, 256], [250, 256], [276, 210]], np.int32)
pts = pts.reshape((-1, 1, 2))
black_img = cv2.fillPoly(black_img, [pts], (0, 0, 0))

# 第2个覆盖,使用三角形
pts2 = np.array([[276, 210], [302, 256], [322, 210]], np.int32)
pts2 = pts2.reshape((-1, 1, 2))
black_img = cv2.fillPoly(black_img, [pts2], (0, 0, 0))

# OpenCV
font = cv2.FONT_HERSHEY_SIMPLEX
black_img = cv2.putText(black_img, 'OpenCV', (140, 355), font, 2,
                        (255, 255, 200), 4, cv2.LINE_AA)

# white_img = 255-black_img
lower_red = np.array([1, 1, 1])
upper_red = np.array([255, 255, 255])
# mask -> 1 channel
white_img = cv2.inRange(black_img, lower_red, upper_red, (255, 255, 255))

cv2.imshow('draw exercise', black_img)

cv2.imshow("white", white_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
                                        flags=cv2.CASCADE_SCALE_IMAGE)

if len(faces) > 0:

    token = 0
    for f in faces:
        face = sorted(faces,
                      reverse=True,
                      key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[token]
        (fX, fY, fW, fH) = face
        token += 1
        roi = frame[fY:fY + fH, fX:fX + fW]
        roi = cv2.resize(roi, (48, 48))
        roi = roi.astype("float") / 255.0
        roi = img_to_array(roi)
        roi = np.expand_dims(roi, axis=0)
        preds = emotion_classifier.predict(roi)[0]
        emotion_probability = np.max(preds)
        label = EMOTIONS[preds.argmax()]
        cv2.putText(orig_frame, label, (fX, fY - 10), cv2.FONT_HERSHEY_SIMPLEX,
                    0.45, (0, 0, 255), 2)
        cv2.rectangle(orig_frame, (fX, fY), (fX + fW, fY + fH), (0, 0, 255), 2)

cv2.imshow('test_face', orig_frame)
cv2.imwrite('test2_output/' + img_path.split('/')[-1], orig_frame)
if (cv2.waitKey(2000) & 0xFF == ord('q')):
    sys.exit("Thanks")

time.sleep(10)
cv2.destroyAllWindows()
def button_check():
    GPIO.setmode(GPIO.BCM)
    GPIO.setup(1, GPIO.IN, pull_up_down=GPIO.PUD_UP)
    while True:
        input_state = GPIO.input(1)
        if not input_state:
            print('f**k')
            settings.IS_FRIEND = True
            time_start = time.time()
            recognizer = cv2.face.LBPHFaceRecognizer_create()
            recognizer.read('/home/pi/python/MicroprocessorCourseWork/micro/core/trainer/trainer.yml')
            cascadePath = '/home/pi/python/recognition/haarcascade_frontalface_default2.xml'
            faceCascade = cv2.CascadeClassifier(cascadePath);

            font = cv2.FONT_HERSHEY_SIMPLEX

            # iniciate id counter
            id = 0

            # names related to ids: example ==> Marcelo: id=1,  etc
            names = ['None', 'Marcelo', 'Paula', 'Ilza', 'Z', 'W']

            # Initialize and start realtime video capture
            cam = cv2.VideoCapture(0)
            cam.set(3, 640)  # set video widht
            cam.set(4, 480)  # set video height

            # Define min window size to be recognized as a face
            minW = 0.1 * cam.get(3)
            minH = 0.1 * cam.get(4)

            sleep(2)

            while True:
                ret, img = cam.read()
                #img = cv2.flip(img, -1)  # Flip vertically
                gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

                faces = faceCascade.detectMultiScale(
                    gray,
                    scaleFactor=1.2,
                    minNeighbors=5,
                    minSize=(int(minW), int(minH)),
                )
                
                print(faces)

                for (x, y, w, h) in faces:
                    cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
                    id, confidence = recognizer.predict(gray[y:y + h, x:x + w])

                    # Check if confidence is less them 100 ==> "0" is perfect match
                    if (confidence < 100):
                        user = User.objects.get(pk=id)
                        print(user.name)
                        confidence = "  {0}%".format(round(100 - confidence))
                    else:
                        id = "unknown"
                        confidence = "  {0}%".format(round(100 - confidence))

                    cv2.putText(img, str(id), (x + 5, y - 5), font, 1, (255, 255, 255), 2)
                    cv2.putText(img, str(confidence), (x + 5, y + h - 5), font, 1, (255, 255, 0), 1)

                    cv2.imwrite('image.jpg', img)

                    invoke_telegram('sendMessage', chat_id=settings.CHAT_ID,
                                    text=f'Это пытается зайти к вам домой, может являться {user.name} с вероятностью {confidence}, Открываем дверь? Да/Нет')

                    req = requests.post(
                        'https://api.telegram.org/bot%s/sendPhoto' % settings.TELEGRAM_BOT_TOKEN,
                        params={'chat_id': settings.CHAT_ID},
                        files={'photo': open('image.jpg', 'rb')},
                        timeout=30,
                    )

                cv2.imshow('camera', img)
                break

                k = cv2.waitKey(10) & 0xff  # Press 'ESC' for exiting video
                if k == 27:
                    break

            # Do a bit of cleanup
            print("\n [INFO] Exiting Program and cleanup stuff")
            cam.release()
            cv2.destroyAllWindows()
        sleep(1)
    button_check.delay()
Example #20
0
        # if TOTAL == 1:
        #         pass
        # elif TOTAL == 2 and phone_call_done == 0:
        #         os.system("python /home/apoorv/Desktop/blinkNdo/phone_call_updated.py")
        #         phone_call_done = 1
        # elif TOTAL == 3 and flashlight_done == 0:
        #         os.system("adb shell input tap 540 960")
        #         flashlight_done = 1
        # elif TOTAL == 4 and flashlight_done == 1:
        #         os.system("adb shell input tap 540 960")
        #         flashlight_done = 0

        # draw the total number of blinks on the frame along with
        # the computed eye aspect ratio for the frame
        cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        # show the left eye
        cv2.imshow("Left Eye", left_eye_image_resized)
        # cv2.imwrite("1.jpg",left_eye_image)
        # cv2.imwrite("2.jpg",equ_left)
        # show the right eye
        cv2.imshow("Right Eye", right_eye_image_resized)
        # show the thresholded left
        equ_left_resized = imutils.resize(equ_left, width=200)
        cv2.imshow("Equalized Left Eye", equ_left_resized)
        # show the thresholded right eye
        equ_right_resized = imutils.resize(equ_right, width=200)
        cv2.imshow("Equalized Right Eye", equ_right_resized)
        # minimum intensity colored
        # draw the bounding box
        (startx, starty, endx, endy) = bbox
        cv2.rectangle(clone, (startx, starty), (endx, endy), (0, 0, 255), 2)

    # show the results before applying non max suppresion
    cv2.imshow("Before Non-Max", clone)
    cv2.waitKey(0)

    # extract the bounding boxes and class probabilites and apply non max supression
    boxes = np.array([p[0] for p in labels[label]])
    probs = np.array([p[1] for p in labels[label]])
    boxes = non_max_suppression(boxes, probs)

    # clone the original image again and loop over all the remaining boxes
    clone = orig.copy()
    for (startx, starty, endx, endy) in boxes:
        # draw the bounding box
        cv2.rectangle(clone, (startx, starty), (endx, endy), (0, 0, 255), 2)

        # write the class label
        y = starty - 10 if starty - 10 > 10 else starty + 10
        cv2.putText(clone, label, (startx, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45,
                    (0, 255, 0), 1)

    # show the results after non max suppresion
    cv2.imshow("After Non-Max", clone)
    cv2.waitKey(0)

# close all the windows
cv2.destroyAllWindows()
Example #22
0
def boxDetection2411(rgbaImg, width, height):
    """
    Takes Image frame along with width and height and returns positions of cube with respect to reference frame
    """
    rgba = bytes(rgbaImg)
    # Make a new image object from the bytes
    img = Image.frombytes('RGBA', (width, height), rgba)
    opencv_img = np.array(img)

    # Converting RGBA image to RGB(dropping alpha channel)
    rgbImage = cv2.cvtColor(opencv_img, cv2.COLOR_RGBA2RGB)
    # Converting RGB image to HSV(For Color detection)
    hsvFrame = cv2.cvtColor(rgbImage, cv2.COLOR_RGB2HSV)
    # Converting RGB image to BGR
    imageFrame = cv2.cvtColor(rgbImage, cv2.COLOR_RGB2BGR)

    # Set range for red color and define mask
    red_lower = np.array([0, 70, 50], np.uint8)
    red_upper = np.array([10, 255, 255], np.uint8)
    red_mask = cv2.inRange(hsvFrame, red_lower, red_upper)

    # Set range for green color and define mask
    green_lower = np.array([40, 52, 72], np.uint8)
    green_upper = np.array([70, 255, 255], np.uint8)
    green_mask = cv2.inRange(hsvFrame, green_lower, green_upper)

    # Set range for blue color and define mask
    blue_lower = np.array([110, 80, 2], np.uint8)
    blue_upper = np.array([120, 255, 255], np.uint8)
    blue_mask = cv2.inRange(hsvFrame, blue_lower, blue_upper)

    # Set range for orange color and define mask
    orange_lower = np.array([10, 70, 50], np.uint8)
    orange_upper = np.array([20, 255, 255], np.uint8)
    orange_mask = cv2.inRange(hsvFrame, orange_lower, orange_upper)

    # Set range for yellow color and define mask
    yellow_lower = np.array([30, 200, 200], np.uint8)
    yellow_upper = np.array([40, 255, 255], np.uint8)
    yellow_mask = cv2.inRange(hsvFrame, yellow_lower, yellow_upper)

    # Set range for purple color and define mask
    purple_lower = np.array([150, 230, 200], np.uint8)
    purple_upper = np.array([220, 255, 255], np.uint8)
    purple_mask = cv2.inRange(hsvFrame, purple_lower, purple_upper)

    # initializing kernel for convolution over image frame
    kernal = np.ones((5, 5), "uint8")

    # For red color
    red_mask = cv2.dilate(red_mask, kernal)
    res_red = cv2.bitwise_and(imageFrame, imageFrame, mask=red_mask)

    # For green color
    green_mask = cv2.dilate(green_mask, kernal)
    res_green = cv2.bitwise_and(imageFrame, imageFrame, mask=green_mask)

    # For blue color
    blue_mask = cv2.dilate(blue_mask, kernal)
    res_blue = cv2.bitwise_and(imageFrame, imageFrame, mask=blue_mask)

    # For blue color
    orange_mask = cv2.dilate(orange_mask, kernal)
    res_orange = cv2.bitwise_and(imageFrame, imageFrame, mask=orange_mask)

    # For blue color
    yellow_mask = cv2.dilate(yellow_mask, kernal)
    res_yellow = cv2.bitwise_and(imageFrame, imageFrame, mask=yellow_mask)

    # For purple color
    purple_mask = cv2.dilate(purple_mask, kernal)
    res_purple = cv2.bitwise_and(imageFrame, imageFrame, mask=purple_mask)

    positionsCube = []
    positionsPatch = []
    a = 120
    # Creating contour to track red color
    contours, hierarchy = cv2.findContours(red_mask, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
    for pic, contour in enumerate(contours):
        area = cv2.contourArea(contour)
        if (area < 300):
            x, y, w, h = cv2.boundingRect(contour)
            positionsCube.append([(y + h / 2 - 1500) / a,
                                  (x + w / 2 - 1500) / a])
            cv2.putText(imageFrame, "Reality Stone", (x, y),
                        cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 0, 200))
    for pic, contour in enumerate(contours):
        area = cv2.contourArea(contour)
        if (area < 5000 and area > 300):
            x, y, w, h = cv2.boundingRect(contour)
            positionsPatch.append([(y + h / 2 - 1500) / a,
                                   (x + w / 2 - 1500) / a])
            cv2.putText(imageFrame, "Reality Stone Holder", (x, y),
                        cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 0, 200))

    # Creating contour to track green color
    contours, hierarchy = cv2.findContours(green_mask, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
    for pic, contour in enumerate(contours):
        area = cv2.contourArea(contour)
        if (area < 300):
            x, y, w, h = cv2.boundingRect(contour)
            positionsCube.append([(y + h / 2 - 1500) / a,
                                  (x + w / 2 - 1500) / a])
            cv2.putText(imageFrame, "Time Stone", (x, y),
                        cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 130, 18))
        elif (area < 1000):
            x, y, w, h = cv2.boundingRect(contour)
            positionsPatch.append([(y + h / 2 - 1500) / a,
                                   (x + w / 2 - 1500) / a])
            cv2.putText(imageFrame, "Time Stone Holder", (x, y),
                        cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 130, 18))

    # Creating contour to track blue color
    contours, hierarchy = cv2.findContours(blue_mask, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
    for pic, contour in enumerate(contours):
        area = cv2.contourArea(contour)
        if (area < 300):
            x, y, w, h = cv2.boundingRect(contour)
            positionsCube.append([(y + h / 2 - 1500) / a,
                                  (x + w / 2 - 1500) / a])
            cv2.putText(imageFrame, "Space Stone", (x, y),
                        cv2.FONT_HERSHEY_TRIPLEX, 2, (180, 0, 0))
        elif (area < 5000):
            x, y, w, h = cv2.boundingRect(contour)
            positionsPatch.append([(y + h / 2 - 1500) / a,
                                   (x + w / 2 - 1500) / a])
            cv2.putText(imageFrame, "Space Stone Holder", (x, y),
                        cv2.FONT_HERSHEY_TRIPLEX, 2, (180, 0, 0))

    # Creating contour to track orange color
    contours, hierarchy = cv2.findContours(orange_mask, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
    for pic, contour in enumerate(contours):
        area = cv2.contourArea(contour)
        if (area < 300):
            x, y, w, h = cv2.boundingRect(contour)
            positionsCube.append([(y + h / 2 - 1500) / a,
                                  (x + w / 2 - 1500) / a])
            cv2.putText(imageFrame, "Soul Stone", (x, y),
                        cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 106, 225))
        elif (area < 5000):
            x, y, w, h = cv2.boundingRect(contour)
            positionsPatch.append([(y + h / 2 - 1500) / a,
                                   (x + w / 2 - 1500) / a])
            cv2.putText(imageFrame, "Soul Stone Holder", (x, y),
                        cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 106, 225))

    # Creating contour to track yellow color
    contours, hierarchy = cv2.findContours(yellow_mask, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
    for pic, contour in enumerate(contours):
        area = cv2.contourArea(contour)
        if (area < 300):
            x, y, w, h = cv2.boundingRect(contour)
            positionsCube.append([(y + h / 2 - 1500) / a,
                                  (x + w / 2 - 1500) / a])
            cv2.putText(imageFrame, "Mind Stone", (x, y),
                        cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 200, 130))
        elif (area < 2000):
            x, y, w, h = cv2.boundingRect(contour)
            positionsPatch.append([(y + h / 2 - 1500) / a,
                                   (x + w / 2 - 1500) / a])
            cv2.putText(imageFrame, "Mind Stone Holder", (x, y),
                        cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 200, 130))

    # Creating contour to track purple color
    contours, hierarchy = cv2.findContours(purple_mask, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
    for pic, contour in enumerate(contours):
        area = cv2.contourArea(contour)
        if (area < 300):
            x, y, w, h = cv2.boundingRect(contour)
            positionsCube.append([(y + h / 2 - 1500) / a,
                                  (x + w / 2 - 1500) / a])
            cv2.putText(imageFrame, "Power Stone", (x, y),
                        cv2.FONT_HERSHEY_TRIPLEX, 2, (255, 0, 225))
        elif (area < 2000):
            x, y, w, h = cv2.boundingRect(contour)
            positionsPatch.append([(y + h / 2 - 1500) / a,
                                   (x + w / 2 - 1500) / a])
            cv2.putText(imageFrame, "Power Stone Holder", (x, y),
                        cv2.FONT_HERSHEY_TRIPLEX, 2, (255, 0, 225))

    return imageFrame, positionsCube, positionsPatch
Example #23
0
    for faceBox in faceBoxes:
        face = frame[max(0, faceBox[1] -
                         padding):min(faceBox[3] + padding, frame.shape[0] -
                                      1),
                     max(0, faceBox[0] -
                         padding):min(faceBox[2] + padding, frame.shape[1] -
                                      1)]
        # Blob function facilitates image processing for deep learning classification
        blob = cv2.dnn.blobFromImage(face,
                                     1.0, (227, 227),
                                     MODEL_MEAN_VALUES,
                                     swapRB=False)

        # Set the input to our pre-trained gender network
        genderNet.setInput(blob)
        genderPreds = genderNet.forward()
        gender = genderList[genderPreds[0].argmax()]
        print(genderPreds[0])
        print("Gender: " + gender)

        # Set the input to our pre-trained age network
        ageNet.setInput(blob)
        agePreds = ageNet.forward()
        age = ageList[agePreds[0].argmax()]
        print("Age: " + age[1:-1])

        cv2.putText(resultImg, f'{gender}, {age}',
                    (faceBox[0], faceBox[1] - 10), cv2.FONT_HERSHEY_SIMPLEX,
                    0.8, (0, 255, 255), 2, cv2.LINE_AA)
        cv2.imshow("Gender & Age Detector", resultImg)
Example #24
0
def draw_rectangles_show_points_show_buttons_reset_counters(
    rgb_colours_list_,
    champions_list_for_ocr_,
    origin_champs_counters_to_buy_,
    reader_,
    champions_list_,
    tk_window,
    origin_champs_counters_,
    df_,
    origin_list_,
    origin_counters_,
    class_list_,
    class_counters_,
    mode="points",
    CARDS_TO_BUY_AMOUNT_=CARDS_TO_BUY_AMOUNT,
    LINE_TYPE_=LINE_TYPE,
    MARKER_TYPE_=MARKER_TYPE,
):
    """
    This function is making OCR detection on champion cards, and then draws by
    input mode like default points on screenshot.

    Parameters
    ----------
    rgb_colours_list_ : ["worst", "medium3", "medium2", "medium1", "best"]. list of RGB tuples.
    The default is rgb_colours_list.
    mode :  The default is "points". Also there are cross and rectangle.

    Returns
    -------
    None.

    """
    logging.debug(
        "Function draw_rectangles_show_points_show_buttons_reset_counters() called"
    )
    reset_counters_in_list(origin_champs_counters_to_buy_)
    (
        list_of_champs_to_buy_this_turn,
        index_list,
    ) = update_champions_to_buy_from_ocr_detection(
        champions_list_for_ocr_, origin_champs_counters_to_buy_, reader_)

    champions_to_buy_in_order_as_in_screen = list_of_champs_to_buy_this_turn
    champions_to_buy_points_and_position = show_nonzero_counters_with_points_from_ocr(
        tk_window,
        origin_champs_counters_,
        origin_champs_counters_to_buy_,
        champions_list_,
        df_,
        index_list,
        origin_list_,
        origin_counters_,
        class_list_,
        class_counters_,
    )

    champions_position_to_buy_ordered_by_screen = [
        champions_list_for_ocr_.index(i)
        for i in champions_to_buy_in_order_as_in_screen
    ]
    logging.info(
        "champions_position_to_buy_ordered_by_screen: %s",
        champions_position_to_buy_ordered_by_screen,
    )

    champions_to_buy_points = list(
        zip(*champions_to_buy_points_and_position))[0]
    champions_to_buy_position = list(
        zip(*champions_to_buy_points_and_position))[1]
    logging.info("Points (in alphabetical by champ name order?): %s",
                 champions_to_buy_points)
    logging.info(
        "Champions position (in alphabetical by champ name order?): %s",
        champions_to_buy_position,
    )
    sorted_champions_to_buy_points_and_position = sorted(
        champions_to_buy_points_and_position)
    logging.info(
        "Points and Champions position (in alphabetical by champ name order?): %s",
        sorted_champions_to_buy_points_and_position,
    )
    sorted_champions_to_buy_position = list(
        zip(*sorted_champions_to_buy_points_and_position))[1]
    logging.info(
        "sorted_champions_to_buy_position in alphabetical order?: %s",
        sorted_champions_to_buy_position,
    )
    values_by_points_indexes_order_by_position_on_screen = [
        sorted_champions_to_buy_position.index(i)
        for i in champions_position_to_buy_ordered_by_screen
    ]
    logging.info(
        "values_by_points_indexes_order_by_position_on_screen 0 worst 4 best card: %s",
        values_by_points_indexes_order_by_position_on_screen,
    )
    cards_rectangles = build_list_of_champion_cards_rectangles()
    screenshot = make_cropped_ss()[1]

    # at the end
    # values_by_points_indexes_order_by_position_on_screen contains champions
    # sorted by points from lowest(0) to highest(4)
    # and indexes represents champion placement on the screen

    if mode == "rectangle":
        for i in range(0, CARDS_TO_BUY_AMOUNT_):
            cv.rectangle(
                screenshot,
                cards_rectangles[i][0],
                cards_rectangles[i][1],
                color=rgb_colours_list_[
                    values_by_points_indexes_order_by_position_on_screen[i]],
                lineType=LINE_TYPE_,
                thickness=2,
            )
        cv.imshow("draw_rectangles_show_points_show_buttons_reset_counters()",
                  screenshot)
    elif mode == "cross":
        for i in range(0, CARDS_TO_BUY_AMOUNT_):
            # Draw the center point
            cv.drawMarker(
                screenshot,
                cards_rectangles[i][2],
                color=rgb_colours_list_[
                    values_by_points_indexes_order_by_position_on_screen[i]],
                markerType=MARKER_TYPE_,
                markerSize=40,
                thickness=2,
            )
        cv.imshow("draw_rectangles_show_points_show_buttons_reset_counters()",
                  screenshot)
    elif mode == "points":
        for i in range(0, CARDS_TO_BUY_AMOUNT_):
            # Draw the center point
            cv.putText(
                screenshot,
                "{:.3f}".format(sorted_champions_to_buy_points_and_position[
                    values_by_points_indexes_order_by_position_on_screen[i]]
                                [0]),
                cards_rectangles[i][2],
                cv.FONT_HERSHEY_SIMPLEX,
                0.6,
                rgb_colours_list_[
                    values_by_points_indexes_order_by_position_on_screen[i]],
                2,
            )
        cv.imshow("draw_rectangles_show_points_show_buttons_reset_counters()",
                  screenshot)

    logging.debug(
        "Function draw_rectangles_show_points_show_buttons_reset_counters() end"
    )
            # Each location contains positions in order: top, right, bottom, left
            top_left = (face_location[3], face_location[0])
            bottom_right = (face_location[1], face_location[2])

            # Get color by name using our fancy function
            color = [0, 255, 0]

            # Paint frame
            cv2.rectangle(image, top_left, bottom_right, color,
                          FRAME_THICKNESS)

            # Now we need smaller, filled grame below for a name
            # This time we use bottom in both corners - to start from bottom and move 50 pixels down
            top_left = (face_location[3], face_location[2])
            bottom_right = (face_location[1], face_location[2] + 22)

            # Paint frame
            cv2.rectangle(image, top_left, bottom_right, color, cv2.FILLED)

            # Wite a name
            cv2.putText(image, match,
                        (face_location[3] + 10, face_location[2] + 15),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (200, 200, 200),
                        FONT_THICKNESS)

    # Show image
    cv2.imshow(filename, image)

    if cv2.waitKey(1) & 0xFF == ord("q"):
        break
Example #26
0
import numpy as np
from cv2 import cv2

# Create a black image
img = np.zeros((512,512,3), np.uint8)

img = cv2.line(img,(0,0),(511,511),(0,0,255),1,cv2.LINE_AA)

img = cv2.rectangle(img,(384,0),(510,128),(0,255,0),3)

img = cv2.circle(img,(447,63), 63, (0,0,255), -1)

img = cv2.ellipse(img,(256,256),(100,50),0,0,180,255,-1)

pts = np.array([[10,5],[20,30],[70,20],[50,10]], np.int32)
pts = pts.reshape((-1,1,2))
img = cv2.polylines(img,[pts],True,(0,255,255))

font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'OpenCV',(10,500), font, 4,(255,255,255),2,cv2.LINE_AA)

# using cv show
cv2.imshow('hello', img)
# hold window
cv2.waitKey(0)
cv2.destroyAllWindows()
Example #27
0
from cv2 import cv2
from pyzbar import pyzbar
vid = cv2.VideoCapture(0)

while True:
    ret, frame = vid.read()

    barcodes = pyzbar.decode(frame)
    for barcode in barcodes:
        barcodeData = barcode.data.decode("utf-8")
        barcodeType = barcode.type
        text = "{} {}".format(barcodeData, barcodeType)
        (x,y,w,h) = barcode.rect
        cv2.rectangle(frame, (x,y), (x+w, y+h), (0, 0, 255), 3)
        cv2.putText(frame, text, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,255,0), 2)


    cv2.imshow('frame', frame)

    if cv2.waitKey(1) & 0xFF == ord("q"):
        break
    

vid.release()
cv2.destroyAllWindows()
Example #28
0
    return crop_face


cap = cv2.VideoCapture(0)
count = 0

while True:
    ret, frame = cap.read()
    if face_extrator(frame) is not None:
        count+=1
        face = cv2.resize(face_extrator(frame),(200,200))
        face = cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)

        file_name_path = 'face_samples\\user'+str(count)+'.jpg'
        cv2.imwrite(file_name_path,face)

        cv2.putText(face,str(count),(50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,45),2)

        cv2.imshow('face cropped',face)
    else:
        print('face not found')
        pass

    if cv2.waitKey(1) == 13 or count == 100:
        break

cap.release()
cv2.destroyAllWindows()
print('collecting samples complete')

Example #29
0

#life video
def changeRes(width, height):
    capture.set(3, width)
    capture.set(4, height)


#Reading Videos
capture = cv.VideoCapture(0)

while True:
    isTrue, frame = capture.read()
    cv.putText(frame,
               'Hello World', (0, 400),
               cv.FONT_HERSHEY_TRIPLEX,
               1.0, (255, 255, 255),
               thickness=1)

    frameResized = rescaleFrame(frame, scale=0.5)

    cv.imshow('Video', frame)
    cv.imshow('VideoResized', frameResized)

    #Let's find the contour
    gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
    blurred = cv.GaussianBlur(gray, (3, 3), cv.BORDER_DEFAULT)
    cv.imshow('blurred', blurred)
    canny = cv.Canny(blurred, 50, 150)
    cv.imshow('canny', canny)
Example #30
0
            p2[i][j] = x + r * np.cos(separation * i * np.pi / 180)
            p_text[i][j] = x - text_offset_x + 1.2 * r * np.cos(
                (separation) * (i + 9) * np.pi / 180
            )  # point for text labels, i+9 rotates the labels by 90 degrees
        else:
            p2[i][j] = y + r * np.sin(separation * i * np.pi / 180)
            p_text[i][j] = y + text_offset_y + 1.2 * r * np.sin(
                (separation) * (i + 9) * np.pi / 180
            )  # point for text labels, i+9 rotates the labels by 90 degrees

# add the lines and labels to the image
for i in range(0, interval):
    cv2.line(output3, (int(p1[i][0]), int(p1[i][1])),
             (int(p2[i][0]), int(p2[i][1])), (0, 255, 0), 2)
    cv2.putText(output3, '%s' % (int(i * separation)),
                (int(p_text[i][0]), int(p_text[i][1])),
                cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0), 1, cv2.LINE_AA)

cv2.imshow('output', output3)
cv2.waitKey(0)
cv2.destroyAllWindows()
# cv2.imwrite('gauge-%s-calibration.%s' % (gauge_number, file_type), img)

separation = 10  #in degrees
interval = int(360 / separation)
p3 = np.zeros((interval, 2))  #set empty arrays
p4 = np.zeros((interval, 2))

for i in range(0, interval):
    for j in range(0, 2):
        if (j % 2 == 0):