img = cv2.imread("img/josue.jfif")
# Instead of capturing image, let's use video
# webcam = cv2.VideoCapture("video.mp4")#(0) means access a default webcam / (video_example.mp4) means accessing the specified video

# Iterate forever over the frames
# while True:
### Read the current frame
#    successful_frame_read, frame = webcam.read() #successful_frame_read [it means that if the frame was read successfully, then read the frame <but this is of no use at all because it will always be true.

#    # must convert to grayscale
#    grayscaled_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#    cv2.imshow("Clever Programmer Face Dectector", grayscaled_frame)
#    cv2.waitKey()
# Must convert image to grayscale : (this helps computer to recognise image easily with only few colors):
grayscaled_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

#Detect faces
face_coordinates = trained_face_data.detectMultiScale(grayscaled_img)
# Draw rectangles around the faces
for (x, y, w, h) in face_coordinates:
    cv2.rectangle(img, (x, y), (x + w, y + h), (0, randrange(128, 256), 0), 5)
#print the coordinates
# print(face_coordinates)

#Display image with the faces
cv2.imshow("Clever Programmer Face Detector", img)
#Waits until you press a key
cv2.waitKey()
print("Code Complete!")

# 1:5:00
Exemplo n.º 2
0
        # compute both the starting and ending (x, y)-coordinates for
        # the text prediction bounding box
        endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
        endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
        startX = int(endX - w)
        startY = int(endY - h)

        # add the bounding box coordinates and probability score to
        # our respective lists
        rects.append((startX, startY, endX, endY))
        confidences.append(scoresData[x])

boxes = non_max_suppression(np.array(rects), probs=confidences)

# loop over the bounding boxes
for (startX, startY, endX, endY) in boxes:
    # scale the bounding box coordinates based on the respective
    # ratios
    startX = int(startX * rW)
    startY = int(startY * rH)
    endX = int(endX * rW)
    endY = int(endY * rH)

    # draw the bounding box on the image
    cv2.rectangle(orig, (startX, startY), (endX, endY), (0, 255, 0), 2)

# show the output image
cv2.imshow("Text Detection", orig)
cv2.waitKey(0)
    thresh_frame = cv2.threshold(delta_frame, 30, 255, cv2.THRESH_BINARY)[1]
    thresh_frame = cv2.dilate(thresh_frame, None, iterations=2)

    # find the contours
    (cnts, _) = cv2.findContours(thresh_frame.copy(), cv2.RETR_EXTERNAL,
                                 cv2.CHAIN_APPROX_SIMPLE)

    # check  for the contour area
    # if the moving area is greater than 10000 draw a rectangle and change the status
    for countour in cnts:
        if cv2.contourArea(countour) < 10000:
            continue

        status = 1
        (x, y, w, h) = cv2.boundingRect(countour)
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)

    # adding status of each frame to the list
    status_list.append(status)

    # status_list = [None, None, 0] frame 1 no >>>> movement
    #   [None, None, 0, 0] frame 2 >>>> no movement
    #   [None, None, 0, 0, 0] frame 3 >>>> no movement
    #   [None, None, 0, 0, 0, 1] frame 4 >>>> move detected <<<<< Start time
    #   [None, None, 0, 0, 0, 1, 1] frame 5 >>>> still moving
    #   [None, None, 0, 0, 0, 1, 1, 1] frame 6 >>>> still moving
    #   [None, None, 0, 0, 0, 1, 1, 1, 1] frame 7 >>>> still moving
    #   [None, None, 0, 0, 0, 1, 1, 1, 1, 0] frame 8 >>>> no movement <<<< End time
    #   [None, None, 0, 0, 0, 1, 1, 1, 1, 0, 0] frame 9 >>>> no movement
    #   [None, None, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0] frame 10 >>>> no movement
Exemplo n.º 4
0
# convert the images to grayscale
grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)
# compute the Structural Similarity Index (SSIM) between the two
# images, ensuring that the difference image is returned
(score, diff) = compare_ssim(grayA, grayB, full=True)
diff = (diff * 255).astype("uint8")
print("SSIM: {}".format(score))
# threshold the difference image, followed by finding contours to
# obtain the regions of the two input images that differ
thresh = cv2.threshold(diff, 0, 255,
                       cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                        cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# loop over the contours
for c in cnts:
    # compute the bounding box of the contour and then draw the
    # bounding box on both input images to represent where the two
    # images differ
    (x, y, w, h) = cv2.boundingRect(c)
    if w < 50 or h < 50:
        continue
    cv2.rectangle(imageA, (x, y), (x + w, y + h), (0, 0, 255), 2)
    cv2.rectangle(imageB, (x, y), (x + w, y + h), (0, 0, 255), 2)
# show the output images
cv2.imshow("Original", imageA)
cv2.imshow("Modified", imageB)
cv2.imshow("Diff", diff)
cv2.imshow("Thresh", thresh)
cv2.waitKey(0)
Exemplo n.º 5
0
import pickle
from cv2 import cv2


def load_dataset():
    dataset = pickle.loads(open("dataset/data/data.pickle", "rb").read())
    return dataset


dataset = load_dataset()
print(dataset[1])

image = cv2.imread("dataset/images/" + dataset[1]['image'], 0)

coord = dataset[1]['data']
print(coord)
cv2.rectangle(image, (coord['x1'], coord['y1']), (coord['x2'], coord['y2']),
              (255, 0, 0), 2)

cv2.imshow("Test", image)
cv2.waitKey(10000)
#print(image.shape)

# conv = net.Conv3x3(8)
# pool = net.MaxPool2()

# output = conv.forward(image)
# output = pool.forward(output)
# print(output.shape)
# network=net.Net()
# out, l, acc=network.forward(image,2)
from cv2 import cv2 as cv

cascade = cv.CascadeClassifier(
    "datas/haar_cascade_files/haarcascade_frontalface_default.xml")
img = cv.imread("datas/images/lena.png")
imgGray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
faces = cascade.detectMultiScale(imgGray, 1.1, 4)
for (x, y, w, h) in faces:
    cv.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)

cv.imshow("Result", img)
cv.waitKey(0)
Exemplo n.º 7
0
def draw_bb(image: str, classes: list, destination: Path) -> None:
    img: np.ndarray = cv2.imread(image, cv2.IMREAD_COLOR)
    image_height, image_width, channels = img.shape

    font: int = cv2.FONT_HERSHEY_SIMPLEX
    font_scale: float = 1.0
    thickness: int = 3
    colors: list = [
        (255, 255, 00),
        (00, 255, 00),
        (255, 00, 255),
        (00, 255, 255),
        (00, 00, 255),
        (255, 255, 255),
    ]

    labelsName: list = classes

    detections_path: str = "{}.txt".format(str(Path(image).with_suffix("")))
    with open(detections_path, "r", encoding="utf-8") as detectionsReader:
        detections: list = detectionsReader.readlines()

        for detection in detections:
            labelId, x, y, w, h = map(float, detection.split(" ")[0:5])
            labelId: int = int(labelId)
            x *= image_width
            y *= image_height
            w *= image_width
            h *= image_height
            text_size: tuple = cv2.getTextSize(labelsName[labelId].rstrip(),
                                               font, font_scale, thickness)
            (text_width, text_height) = text_size[0]
            color: tuple = colors[labelId % len(colors)]
            cv2.rectangle(
                img,
                (int(x - (w / 2)), int(y - (h / 2))),
                (int(x + (w / 2)), int(y + (h / 2))),
                color,
                thickness,
            )

            if int(y - (h / 2) - text_height - 9) < 0:
                cv2.rectangle(
                    img,
                    (int(x - (w / 2) - 1), int(y - (h / 2))),
                    (int(x - (w / 2) + text_width),
                     int(y - (h / 2) + text_height + 9)),
                    color,
                    cv2.FILLED,
                )
                cv2.putText(
                    img,
                    labelsName[labelId].rstrip(),
                    (int(x - (w / 2)), int(y - (h / 2) + text_height + 3)),
                    font,
                    font_scale,
                    (0, 0, 0),
                )
            else:
                cv2.rectangle(
                    img,
                    (int(x - (w / 2) - 1), int(y - (h / 2) - text_height) - 9),
                    (int(x - (w / 2) + text_width), int(y - (h / 2))),
                    color,
                    cv2.FILLED,
                )
                cv2.putText(
                    img,
                    labelsName[labelId].rstrip(),
                    (int(x - (w / 2)), int(y - (h / 2) - 3)),
                    font,
                    font_scale,
                    (0, 0, 0),
                )

    cv2.imwrite(str(destination), img)
Exemplo n.º 8
0
def scanImage():
    FaceRecognize = cv2.face.LBPHFaceRecognizer_create()
    FaceRecognize.read("AllData/TrainedData/DataTrained.yml")
    harcascadeFilePath = "AllData/haarcascade_frontalface_default.xml"
    faceCascade = cv2.CascadeClassifier(harcascadeFilePath)
    datafile = pd.read_csv("AllData/StudentDataRecord.csv")
    collum_names = ['Id', 'Name', 'Date', 'Time']
    LoginStudent = pd.DataFrame(columns=collum_names)

    cap = cv2.VideoCapture(0)
    if (cap.isOpened() == False):
        messagebox.showerror(title="Error",
                             message="There is some problem with your camera")
        root.destroy()

    while True:
        _, img = cap.read()
        if (_ == False):
            messagebox.showerror(
                title="Error",
                message=
                "You have to close video running on home page or select image checkbox"
            )
            root.destroy()
            break

        grayImg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        Id = 0
        Detectedfaces = faceCascade.detectMultiScale(grayImg, 1.2, 10)
        for (x, y, width, height) in Detectedfaces:
            cv2.rectangle(img, (x, y), (x + width, y + height), (225, 0, 0), 2)
            Id, confidence = FaceRecognize.predict(grayImg[y:y + height,
                                                           x:x + width])

            if (confidence <= 50):
                CurrentTime = time.time()
                CurrentDate = dt.datetime.fromtimestamp(CurrentTime).strftime(
                    '%Y-%m-%d')
                timeStamp = dt.datetime.fromtimestamp(CurrentTime).strftime(
                    '%H:%M:%S')
                name = datafile.loc[datafile['Id'] == Id]['Name'].values
                key = str(Id) + "-" + name
                LoginStudent.loc[len(LoginStudent)] = [
                    Id, name, CurrentDate, timeStamp
                ]

            else:
                Id = 'Unknown'
                key = str(Id)

            cv2.putText(img, str(key), (x, y + height),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
        LoginStudent = LoginStudent.drop_duplicates(subset=['Id'],
                                                    keep='first')

        cv2.imshow('Face Recognizing', img)

        if cv2.waitKey(1) & 0xFF == ord('q'):

            if Id != 'Unknown':
                my_conn = sqlite3.connect('SquareOne.db')
                my_conn.execute(''' UPDATE IdName SET id = %s''' % (Id))
                my_conn.commit()

            if Id != 'Unknown':

                res = messagebox.askquestion(
                    title="Congratulation",
                    message="WELCOME TO SQUARE ONE \n Press Yes To Proceed")

                if res == "yes":
                    root.destroy()
                    Popen([sys.executable, "./mainpage.py"])
                else:
                    root.destroy()
                    Popen([sys.executable, "./gui.py"])

            break

    cap.release()
    cv2.destroyAllWindows()
Exemplo n.º 9
0
# Imports
import numpy as np
from cv2 import cv2
import math

# Open Camera
capture = cv2.VideoCapture(0)

while capture.isOpened():

    # Capture frames from the camera
    ret, frame = capture.read()

    # Get hand data from the rectangle sub window
    cv2.rectangle(frame, (100, 100), (300, 300), (0, 255, 0), 0)
    crop_image = frame[100:300, 100:300]

    # Apply Gaussian blur
    blur = cv2.GaussianBlur(crop_image, (3, 3), 0)

    # Change color-space from BGR -> HSV
    hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)

    # Create a binary image with where white will be skin colors and rest is black
    mask2 = cv2.inRange(hsv, np.array([2, 0, 0]), np.array([20, 255, 255]))

    # Kernel for morphological transformation
    kernel = np.ones((5, 5))

    # Apply morphological transformations to filter out the background noise
    dilation = cv2.dilate(mask2, kernel, iterations=1)
def click_event(event, x, y, flags, param):
    # Mouse click for left button
    global mouseX
    global mouseY
    global global_click_event

    global_click_event = event
    mouseX = x
    mouseY = y
    if event == cv2.EVENT_LBUTTONDOWN and image_selection.tagged_as == '' \
        and mouse_click.enable_draw_on_grid == True:
        mouse_click.last_mouse_button_clicked.append('left')
        # Gets row and column number on left mouse click
        col_number = x / grid.cell_width
        x1 = math.trunc(col_number)
        row_number = y / grid.cell_height
        y1 = math.trunc(row_number)

        # Get cell number based on coordinates x1, y1
        def coordinate_to_cell(x, y):
            # https://stackoverflow.com/questions/9816024/coordinates-to-grid-box-number
            cell_number = int(x + (y * grid.number_of_columns))

            # Temporary list for drawing rectangles
            image_selection.cell_numbers_selection_temporary.append(cell_number)
            image_selection.cell_numbers_selection_for_drawing_text.append(cell_number)
            create_text_file.cell_numbers_list_for_each_grid.append(cell_number)
            return cell_number

        # For use in drawing rectangle function.
        cell = coordinate_to_cell(x1, y1)

        # Making copy of image for undo function.
        image_selection.new_image = param[0].copy()
        image_selection.image_list.append(image_selection.new_image)

        # Uses the cell number converts to coordinates and draws rectangles
        def draw_rectangles(cell_number):
            # !SO 8669189/converting-numbers-within-grid-to-their-corresponding-x-y-coordinates
            x2 = cell_number % grid.number_of_columns
            y2 = cell_number // grid.number_of_columns
            cell_x_position = x2 * grid.cell_width
            cell_y_position = y2 * grid.cell_height
            draw_to_x = cell_x_position + grid.cell_width
            draw_to_y = cell_y_position + grid.cell_height

            cv2.rectangle(param[0], (int(cell_x_position), int(cell_y_position)), (int(draw_to_x), int(draw_to_y)),
                          (152,251,152), 2)
            cv2.imshow('image_selector_from_video', param[0])

        # draw rectangles between two grid images
        def draw_rectangles_span():
            # Draws rectangle in the first cell
            if len(image_selection.cell_numbers_selection_temporary) == 1:
                draw_rectangles(cell)
                image_selection.drawn_one_cell_or_span.append('one')

            if len(image_selection.cell_numbers_selection_temporary) >= 2:
                between_backwards = list(
                    range(int(image_selection.cell_numbers_selection_temporary[-1]), \
                        int(image_selection.cell_numbers_selection_temporary[-2] + 1)))  # backwards
                between_forwards = list(
                    range(int(image_selection.cell_numbers_selection_temporary[-2]), \
                        int(image_selection.cell_numbers_selection_temporary[-1] + 1)))  # forwards

                # Draw rectangles backwards
                if image_selection.cell_numbers_selection_temporary[-1] < \
                     image_selection.cell_numbers_selection_temporary[-2]:
                    for next_cell1 in between_backwards:
                        draw_rectangles(next_cell1)

                # Draw rectangles forwards
                if image_selection.cell_numbers_selection_temporary[-1] > \
                     image_selection.cell_numbers_selection_temporary[-2]:
                    for next_cell2 in between_forwards:
                        draw_rectangles(next_cell2)

                # Clear temporary list so another two squares can be selected
                image_selection.cell_numbers_selection_temporary.clear()

                image_selection.drawn_one_cell_or_span.append('span')

                if image_selection.tagged_as == '':
                    # Window_open = True
                    image_selection.tagged_as = 'window open'
                    image_selection.tagged_as = easygui.enterbox("What is tagged in the images?")
                    # replace
                    if image_selection.tagged_as is not None:
                        image_selection.tagged_as = image_selection.tagged_as.replace(" ","")
                        draw_label_on_selection_span_of_images()
                        ####

                if image_selection.tagged_as != 'window open':
                    if image_selection.tagged_as is None:
                        image_selection.tagged_as = ''
                    image_selection.image_list_temporary.append(image_selection.tagged_as)

                    image_selection.tagged_as = ''

        def draw_label(cell_number):
            # !SO 8669189/converting-numbers-within-grid-to-their-corresponding-x-y-coordinates
            x2 = cell_number % grid.number_of_columns
            y2 = cell_number // grid.number_of_columns
            cell_x_position = x2 * grid.cell_width
            cell_y_position = y2 * grid.cell_height
            cv2.putText(img=param[0], text=image_selection.tagged_as, org=(cell_x_position+5, cell_y_position+20),
                        fontFace=cv2.FONT_ITALIC, fontScale=0.5, color=(255,105,180),
                        thickness=1, lineType=cv2.LINE_AA)
            cv2.imshow('image_selector_from_video', param[0])

        def draw_label_on_selection_span_of_images():
            if len(image_selection.cell_numbers_selection_for_drawing_text) >= 2:
                between_backwards = list(
                    range(int(image_selection.cell_numbers_selection_for_drawing_text[-1]),
                          int(image_selection.cell_numbers_selection_for_drawing_text[-2] + 1)))  # backwards
                between_forwards = list(
                    range(int(image_selection.cell_numbers_selection_for_drawing_text[-2]),
                          int(image_selection.cell_numbers_selection_for_drawing_text[-1] + 1)))  # forwards

                # Draw rectangles backwards
                if image_selection.cell_numbers_selection_for_drawing_text[-1] < \
                     image_selection.cell_numbers_selection_for_drawing_text[
                    -2]:
                    for backward_cell in between_backwards:
                        draw_label(backward_cell)

                # Draw rectangles forwards
                if image_selection.cell_numbers_selection_for_drawing_text[-1] > \
                     image_selection.cell_numbers_selection_for_drawing_text[
                    -2]:
                    for forward_cell in between_forwards:
                        draw_label(forward_cell)

        draw_rectangles_span()

    elif event == cv2.EVENT_MBUTTONUP and image_selection.tagged_as == '' \
        and mouse_click.enable_draw_on_grid == True and len(
            mouse_click.last_mouse_button_clicked) > 0 and \
                 mouse_click.last_mouse_button_clicked[-1] == 'left':


        # Allows undo if one image selected.
        if len(image_selection.image_list) >= 1 and image_selection.drawn_one_cell_or_span[-1] == 'one':
            param[0] = image_selection.image_list[-1]
            cv2.imshow('image_selector_from_video', image_selection.image_list[-1])
            cv2.waitKey(1)
            image_selection.image_list.pop()
            create_text_file.cell_numbers_list_for_each_grid.pop()
            mouse_click.last_mouse_button_clicked.pop()
            image_selection.drawn_one_cell_or_span.pop()
            image_selection.cell_numbers_selection_temporary.clear()

        # Allows undo if span of images selected.
        elif len(image_selection.image_list) >= 2 and image_selection.drawn_one_cell_or_span[-1] == 'span':
            param[0] = image_selection.image_list[-2]
            cv2.imshow('image_selector_from_video', image_selection.image_list[-2])
            cv2.waitKey(1)

            # Popping lists to undo selection span of images
            image_selection.image_list.pop()
            image_selection.image_list.pop()
            mouse_click.last_mouse_button_clicked.pop()
            mouse_click.last_mouse_button_clicked.pop()
            image_selection.drawn_one_cell_or_span.pop()
            image_selection.drawn_one_cell_or_span.pop()
            create_text_file.cell_numbers_list_for_each_grid.pop()
            create_text_file.cell_numbers_list_for_each_grid.pop()

            if len(image_selection.image_list_temporary) > 0:
                image_selection.image_list_temporary.pop()

    elif event == cv2.EVENT_MBUTTONUP and mouse_click.enable_draw_on_grid == True and \
        len(mouse_click.last_mouse_button_clicked) > 0 and \
             mouse_click.last_mouse_button_clicked[-1] == 'right':
        if len(image_selection.image_list) >= 1 and mouse_click.last_mouse_button_clicked[-1] == 'right':
            param[0] = image_selection.image_list[-1]
            cv2.imshow('image_selector_from_video', image_selection.image_list[-1])
            cv2.waitKey(1)

            image_selection.image_list.pop()
            mouse_click.last_mouse_button_clicked.pop()
            # Allows for the potential to have multiple boundary boxes in same cell
            if len(bounding_box.temp_dict_and_cell_number_bboxes[bounding_box. \
                temp_list_cells_with_bboxes[-1]]) > 4:

                # Removing four coordinates if more than one boundary box in cell.
                for i in range(4):
                    del bounding_box.temp_dict_and_cell_number_bboxes[bounding_box. \
                        temp_list_cells_with_bboxes[-1]][-1]

            # Removes boundary boxes if in a single cell
            elif len(bounding_box.temp_dict_and_cell_number_bboxes[bounding_box. \
                temp_list_cells_with_bboxes[-1]]) == 4:
                bounding_box.temp_dict_and_cell_number_bboxes.pop(bounding_box. \
                    temp_list_cells_with_bboxes[-1])


            # Removes cell number from temporary list
            bounding_box.temp_list_cells_with_bboxes.pop()

    # Allow bounding boxes to be places over images
    elif global_click_event == cv2.EVENT_RBUTTONDOWN and len(image_selection.drawn_one_cell_or_span) > 0 and \
         image_selection.drawn_one_cell_or_span[-1] == 'span':

        def get_bounding_box_start_coordinates(x, y):
            x_start_boundary = x
            y_start_boundary = y
            # Calculates starting cell number
            col_number = x / grid.cell_width
            x1 = math.trunc(col_number)
            row_number = y / grid.cell_height
            y1 = math.trunc(row_number)
            cell_number_on_start_of_drawing = int(x1 + (y1 * grid.number_of_columns))
            return x_start_boundary, y_start_boundary, cell_number_on_start_of_drawing

        bounding_box.bounding_box_start_coordinates_x_y = get_bounding_box_start_coordinates(x, y)

        # Handle visual drawing of placing bounding box; to give user feedback of where they are placing bbox.
        global allow_draw_bbox
        allow_draw_bbox = True
        while allow_draw_bbox == True:
            draw_bbox_images = param[0].copy()
            cv2.rectangle(draw_bbox_images, (bounding_box.bounding_box_start_coordinates_x_y[0], \
                 bounding_box.bounding_box_start_coordinates_x_y[1]),(mouseX, mouseY), (32,178,170), 2)
            cv2.imshow('image_selector_from_video',draw_bbox_images)
            cv2.waitKey(10)


    elif event == cv2.EVENT_RBUTTONUP and len(image_selection.drawn_one_cell_or_span) > 0 and \
         image_selection.drawn_one_cell_or_span[-1] == 'span':
        # Get cell number at end of bounding box
        col_number = x / grid.cell_width
        cell_x = math.trunc(col_number)
        row_number = y / grid.cell_height
        cell_y = math.trunc(row_number)
        cell_number_on_end_of_drawing = int(cell_x + (cell_y * grid.number_of_columns))
        # Minus x,y positions to get cells relative position
        cell_x_position = cell_x * grid.cell_width
        cell_y_position = cell_y * grid.cell_height
        allow_draw_bbox = False

        def draw_boundary_box(x, y, start_boundary_x_and_y):
            # Making copy for un-drawing bounding box
            new_image_boundary = param[0].copy()
            image_selection.image_list.append(new_image_boundary)

            # Draws bounding box
            end_boundary_x_and_y = (x, y)
            cv2.rectangle(param[0], (start_boundary_x_and_y[0], start_boundary_x_and_y[1]),
                          (end_boundary_x_and_y[0], end_boundary_x_and_y[1]), (0,255,127), 2)
            cv2.imshow('image_selector_from_video', param[0])

            # Used for un-drawing bounding box logic
            mouse_click.last_mouse_button_clicked.append('right')

            # Makes the coordinates relative to within that cell rather than the whole window.
            cell_start_relative_position_x = start_boundary_x_and_y[0] - cell_x_position
            cell_start_relative_position_y = start_boundary_x_and_y[1] - cell_y_position
            cell_end_relative_position_x = end_boundary_x_and_y[0] - cell_x_position
            cell_end_relative_position_y = end_boundary_x_and_y[1] - cell_y_position

            # Resizing coordinates back to the those of the original sized images rounded to whole pixels.
            cell_start_relative_position_x_resized = round(cell_start_relative_position_x / grid.image_resize_x)
            cell_start_relative_position_y_resized = round(cell_start_relative_position_y / grid.image_resize_y)
            cell_end_relative_position_x_resized = round(cell_end_relative_position_x / grid.image_resize_x)
            cell_end_relative_position_y_resized = round(cell_end_relative_position_y / grid.image_resize_y)

            # Gets bounding box x,y start and x,y end, relative to that individual cell.
            list_bounding_box_coordinates = [cell_start_relative_position_x_resized, cell_start_relative_position_y_resized, \
                cell_end_relative_position_x_resized, cell_end_relative_position_y_resized]

            # Cells the user have selected that contain bounding boxes
            bounding_box.temp_list_cells_with_bboxes.append(bounding_box.bounding_box_start_coordinates_x_y[2])




            # Checks if there is already a key from there already being a bounding box in the cell
            if bounding_box.bounding_box_start_coordinates_x_y[2] in bounding_box.temp_dict_and_cell_number_bboxes:
                bounding_box.temp_dict_and_cell_number_bboxes[bounding_box.bounding_box_start_coordinates_x_y[2]]. \
                    extend(list_bounding_box_coordinates)

            else:
                bounding_box.temp_dict_and_cell_number_bboxes[bounding_box.bounding_box_start_coordinates_x_y[2]] = \
                     list_bounding_box_coordinates

        # Checks if its still within the same cell and if so draws bounding box
        if bounding_box.bounding_box_start_coordinates_x_y[2] == cell_number_on_end_of_drawing and \ 
        len(create_text_file.cell_numbers_list_for_each_grid) > 0:
            # Check if drawing boundary boxes in span of images that has been selected.
            if bounding_box.bounding_box_start_coordinates_x_y[2] in range(create_text_file.cell_numbers_list_for_each_grid[-2], \
                 create_text_file.cell_numbers_list_for_each_grid[-1]+1)  \
            or bounding_box.bounding_box_start_coordinates_x_y[2] in range(create_text_file.cell_numbers_list_for_each_grid[-1], \
                 create_text_file.cell_numbers_list_for_each_grid[-2]+1):
                draw_boundary_box(x, y, bounding_box.bounding_box_start_coordinates_x_y)
Exemplo n.º 11
0
        # Sort correctly final results
        df = df.sort_values(by=['Start_col', 'Start_row'],
                            ascending=[True, True])

        # Load satellite image
        img_path = os.path.join(image_path, img_name)
        img = cv2.imread(img_path)

        # Display ships
        ships = 0
        for index, row in df.iterrows():
            s_row = row['Start_row']
            e_row = row['End_row']
            s_col = row['Start_col']
            e_col = row['End_col']
            # Display square
            img = cv2.rectangle(img, (s_row, s_col), (e_row, e_col),
                                (0, 0, 255), 2)
            ships += 1
        print('NB of ships after filter = {}'.format(ships))
        img_name = os.path.splitext(img_name)[0]
        final_name = img_name + '__' + stride + '__' + str(
            epsilon) + '__cleaned.png'
        final_path = os.path.join(res_path_final, final_name)
        # Save satellite image with ships
        cv2.imwrite(final_path, img)
        print('----------------------------------------------\n')
sys.stdout = orig_stdout
f.close()
Exemplo n.º 12
0
    def recognize_for_sign(self):
        self.read_known_faces()
        # 创建 cv2 摄像头对象
        # cv2.VideoCapture(0) to use the default camera of PC,
        # and you can use local video name by use cv2.VideoCapture(filename)
        cap = cv2.VideoCapture(0)

        # cap.set(propId, value)
        # 设置视频参数,propId 设置的视频参数,value 设置的参数值
        cap.set(3, 480)

        # cap.isOpened() 返回 true/false 检查初始化是否成功
        # when the camera is open
        while cap.isOpened():

            flag, img_rd = cap.read()
            kk = cv2.waitKey(1)

            # 取灰度
            img_gray = cv2.cvtColor(img_rd, cv2.COLOR_RGB2GRAY)

            # 人脸数 faces
            faces = self.detector(img_gray, 0)

            # 待会要写的字体 font to write later
            font = cv2.FONT_HERSHEY_COMPLEX

            # 存储当前摄像头中捕获到的所有人脸的坐标/名字
            # the list to save the positions and names of current faces captured
            pos_namelist = []
            name_namelist = []

            # 按下 q 键退出
            # press 'q' to exit
            if kk == ord('q'):
                break
            else:
                # 检测到人脸 when face detected
                if len(faces) != 0:
                    # 获取当前捕获到的图像的所有人脸的特征,存储到 features_cap_arr
                    # get the features captured and save into features_cap_arr
                    features_cap_arr = []
                    for i in range(len(faces)):
                        shape = self.predictor(img_rd, faces[i])
                        features_cap_arr.append(
                            self.facerec.compute_face_descriptor(
                                img_rd, shape))

                    # 遍历捕获到的图像中所有的人脸
                    # traversal all the faces in the database
                    for k in range(len(faces)):
                        print("##### camera person", k + 1, "#####")
                        # 让人名跟随在矩形框的下方
                        # 确定人名的位置坐标
                        # 先默认所有人不认识,是 unknown
                        # set the default names of faces with "unknown"
                        name_namelist.append("unknown")

                        # 每个捕获人脸的名字坐标 the positions of faces captured
                        pos_namelist.append(
                            tuple([
                                faces[k].left(),
                                int(faces[k].bottom() +
                                    (faces[k].bottom() - faces[k].top()) / 4)
                            ]))

                        # 对于某张人脸,遍历所有存储的人脸特征
                        # for every faces detected, compare the faces in the database
                        e_distance_list = []
                        for i in range(len(self.features_known_arr)):
                            # 如果 person_X 数据不为空
                            if str(self.features_known_arr[i][0]) != '0.0':
                                print("with person",
                                      str(i + 1),
                                      "the e distance: ",
                                      end='')
                                e_distance_tmp = return_euclidean_distance(
                                    features_cap_arr[k],
                                    self.features_known_arr[i])
                                print(e_distance_tmp)
                                e_distance_list.append(e_distance_tmp)
                            else:
                                # 空数据 person_X
                                e_distance_list.append(999999999)
                        # 找出最接近的一个人脸数据是第几个
                        # Find the one with minimum e distance
                        similar_person_num = e_distance_list.index(
                            min(e_distance_list))
                        print("Minimum e distance with person",
                              int(similar_person_num) + 1)

                        # 计算人脸识别特征与数据集特征的欧氏距离
                        # 距离小于0.4则标出为可识别人物
                        if min(e_distance_list) < 0.4:
                            # 这里可以修改摄像头中标出的人名
                            # Here you can modify the names shown on the camera
                            # 1、遍历文件夹目录
                            folder_name = './People/person'
                            # 最接近的人脸
                            sum = similar_person_num + 1
                            key_id = 1  # 从第一个人脸数据文件夹进行对比
                            # 获取文件夹中的文件名:1wang、2zhou、3...
                            file_names = os.listdir(folder_name)
                            for name in file_names:
                                # print(name+'->'+str(key_id))
                                if sum == key_id:
                                    #winsound.Beep(300,500)# 响铃:300频率,500持续时间
                                    name_namelist[k] = name[
                                        1:]  #人名删去第一个数字(用于视频输出标识)
                                key_id += 1
                            # 播放欢迎光临音效
                            #playsound('D:/myworkspace/JupyterNotebook/People/music/welcome.wav')
                            # print("May be person "+str(int(similar_person_num)+1))
                            # -----------筛选出人脸并保存到visitor文件夹------------
                            for i, d in enumerate(faces):
                                x1 = d.top() if d.top() > 0 else 0
                                y1 = d.bottom() if d.bottom() > 0 else 0
                                x2 = d.left() if d.left() > 0 else 0
                                y2 = d.right() if d.right() > 0 else 0
                                face = img_rd[x1:y1, x2:y2]
                                size = 64
                                face = cv2.resize(face, (size, size))
                                # 要存储visitor人脸图像文件的路径
                                path_visitors_save_dir = "./People/visitor/known"
                                # 存储格式:2019-06-24-14-33-40wang.jpg
                                now_time = time.strftime(
                                    "%Y-%m-%d-%H-%M-%S", time.localtime())
                                save_name = str(now_time) + str(
                                    name_namelist[k]) + '.jpg'
                                # print(save_name)
                                # 本次图片保存的完整url
                                save_path = path_visitors_save_dir + '/' + save_name
                                # 遍历visitor文件夹所有文件名
                                visitor_names = os.listdir(
                                    path_visitors_save_dir)
                                visitor_name = ''
                                for name in visitor_names:
                                    # 名字切片到分钟数:2019-06-26-11-33-00wangyu.jpg
                                    visitor_name = (name[0:16] + '-00' +
                                                    name[19:])
                                # print(visitor_name)
                                visitor_save = (save_name[0:16] + '-00' +
                                                save_name[19:])
                                # print(visitor_save)
                                # 一分钟之内重复的人名不保存
                                if visitor_save != visitor_name:
                                    cv2.imwrite(save_path, face)
                                    print('新存储:' + path_visitors_save_dir +
                                          '/' + str(now_time) +
                                          str(name_namelist[k]) + '.jpg')
                                else:
                                    print('重复,未保存!')

                        else:
                            # 播放无法识别音效
                            #playsound('D:/myworkspace/JupyterNotebook/People/music/sorry.wav')
                            print("Unknown person")
                            # -----保存图片-------
                            # -----------筛选出人脸并保存到visitor文件夹------------
                            for i, d in enumerate(faces):
                                x1 = d.top() if d.top() > 0 else 0
                                y1 = d.bottom() if d.bottom() > 0 else 0
                                x2 = d.left() if d.left() > 0 else 0
                                y2 = d.right() if d.right() > 0 else 0
                                face = img_rd[x1:y1, x2:y2]
                                size = 64
                                face = cv2.resize(face, (size, size))
                                # 要存储visitor-》unknown人脸图像文件的路径
                                path_visitors_save_dir = "./People/visitor/unknown"
                                # 存储格式:2019-06-24-14-33-40unknown.jpg
                                now_time = time.strftime(
                                    "%Y-%m-%d-%H-%M-%S", time.localtime())
                                # print(save_name)
                                # 本次图片保存的完整url
                                save_path = path_visitors_save_dir + '/' + str(
                                    now_time) + 'unknown.jpg'
                                cv2.imwrite(save_path, face)
                                print('新存储:' + path_visitors_save_dir + '/' +
                                      str(now_time) + 'unknown.jpg')

                        # 矩形框
                        # draw rectangle
                        for kk, d in enumerate(faces):
                            # 绘制矩形框
                            cv2.rectangle(img_rd, tuple([d.left(),
                                                         d.top()]),
                                          tuple([d.right(),
                                                 d.bottom()]), (0, 255, 255),
                                          2)
                        print('\n')

                    # 在人脸框下面写人脸名字
                    # write names under rectangle
                    for i in range(len(faces)):
                        cv2.putText(img_rd, name_namelist[i], pos_namelist[i],
                                    font, 0.8, (0, 255, 255), 1, cv2.LINE_AA)

            print("Faces in camera now:", name_namelist, "\n")

            #cv2.putText(img_rd, "Press 'q': Quit", (20, 450), font, 0.8, (84, 255, 159), 1, cv2.LINE_AA)
            cv2.putText(img_rd, "Face Recognition", (20, 40), font, 1,
                        (0, 0, 255), 1, cv2.LINE_AA)
            cv2.putText(img_rd, "Visitors: " + str(len(faces)), (20, 100),
                        font, 1, (0, 0, 255), 1, cv2.LINE_AA)

            # 窗口显示 show with opencv
            cv2.imshow("camera", img_rd)

        # 释放摄像头 release camera
        cap.release()

        # 删除建立的窗口 delete all the windows
        cv2.destroyAllWindows()
# karena jika gambar terlalu besar maka gambar akan keluar dari layar
if lebar<=662000:
    cv2.namedWindow('image')
else:
    cv2.namedWindow('image', cv2.WINDOW_NORMAL)

# untuk memanggil kembali function draw_function
cv2.setMouseCallback('image', draw_function)

#loop untuk menunjukan gambar sesuai lokasi (path) menggunakan openCV
while(1):
    cv2.imshow("image", img)
    if clicked:
        recEnd = (round(lebar*.735),round(panjang*.1))
        textStart = (round(lebar*.05), round(panjang*.08))
        cv2.rectangle(img, (20,20), recEnd, (b, g, r), -1)
        #akan menampilkan jenis warna yang ditunjuk oleh mouse digambar dengan nilai RGB-nya
        text = getColorName(r,g,b) + ' R=' + str(r) + ' G=' + str(g) + ' B=' + str(b)
        if (r+g+b>=600):
            #untuk menampilkan tulisan, jika warna piksel yang ditunjuk merupakan warna terang r+g+b >= 600 maka kita pastikan tulisan dapat dibaca
            cv2.putText(img, text, textStart, cv2.FONT_HERSHEY_PLAIN, 1, (0,0,0), 1, cv2.LINE_AA)
            #ukuran font akan = 1, warna yang ditampilkan akan menjadi hitam (0,0,0) dan font HERSHEY PLAIN
        else:
            cv2.putText(img, text, textStart, cv2.FONT_HERSHEY_PLAIN, 1, (255,255,255), 1, cv2.LINE_AA)
        clicked = False

    # keluar dari loop jika user mengklik tombol ESC dan akan keluar dari program

# cv2.waitkey artinya loop akan berakhir jika tombol ESC (kode 27) di klik
    if cv2.waitKey(20) & 0xff == 27:
        break
Exemplo n.º 14
0
    pred_array = model.predict(image)
    #print(pred_array)
    pred = np.argmax(sum(pred_array))
    print('Predicting: {} with Accuracy: {}%'.format(
        pred, int(pred_array[0][pred] * 100)))
    accuracy = int(pred_array[0][pred] * 100)
    return (pred, accuracy)


pred, accuracy = 0, 0

while True:
    success, img = cap.read()
    flipHorizontal = cv2.flip(img, 1)
    imgResult = flipHorizontal.copy()
    cv2.rectangle(imgResult, (300, 0), (640, 340), (255, 255, 255), cv2.FILLED)
    newPoints = findColor(flipHorizontal, myColors, myColorValues)
    if len(myPoints) != 0:
        Draw(myPoints, myColorValues)
    k = cv2.waitKey(1)
    if k == 27:
        break
    elif k == ord('d'):
        if len(newPoints) != 0:
            for newP in newPoints:
                myPoints.append(newP)
        else:
            myPoints.append(None)
    elif k == ord('r'):
        myPoints = []
    elif k == ord('p'):
Exemplo n.º 15
0
    def execBodyRecognition(self):
        cap = cv2.VideoCapture(self.path)

        # Boolean to store if it is the first frame
        firstFrame = True

        frameCount = 0
        length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

        # Loop through all the frames
        while (cap.isOpened()):
            ret, frame = cap.read()

            print(str(frameCount) + "/" + str(length))
            frameCount += 1

            # Drawing polygons over initial picture (if there are shapes)
            if self.shapes is not None:
                color = (0, 0, 255)
                thickness = 2
                for shape in self.shapes:
                    pts = np.asarray(shape.coordinates,
                                     np.int32).reshape(-1, 1, 2)
                    cv2.polylines(frame, [pts],
                                  True,
                                  color,
                                  thickness,
                                  lineType=cv2.LINE_AA)

            # ML model processing
            if ret == True:
                # Detecting bodies using haarcascades model
                # imgUMat = cv2.UMat(frame)
                imgGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                bodies = self.cascade.detectMultiScale(imgGray,
                                                       scaleFactor=1.05,
                                                       maxSize=(70, 100))

                # Looping through all the squares
                for (x, y, w, h) in bodies:
                    # Store the people in a list of point objects
                    self.people.append(
                        Pt(
                            self.summarize(x, y, w, h)[0],
                            self.summarize(x, y, w, h)[1]))

                    # Draw the rectangle
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0),
                                  2)

                # If it is the first frame, load up the tracked array with all the people
                if not firstFrame:
                    self.countPeople(frame)
                else:
                    self.tracked = self.people
                    firstFrame = False

                # If there are already 5 frames stored, cut out the first one
                if len(self.frameBuffer) == 5:
                    self.frameBuffer = self.frameBuffer[1:]

                # Add all the people into the framebuffer and clear people
                self.frameBuffer.append(self.people)
                self.people = []

                # Scale the frame down to 720p for resolution compatibility
                frame = cv2.resize(frame, (1280, 720))
                cv2.imshow("Result", frame)

                # The escape key
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
            else:
                break
        cap.release()
        cv2.destroyAllWindows()

        # Write the end results to the csv file
        self.exportCSV()
Exemplo n.º 16
0
            print("Status: ", r.status_code)
        else:
            name = "unknown"
            if (timer > 50):
                timer = 0
            if (timer == 0):
                sendingName = name + str(count)
                json_data["name"] = sendingName
                json_data[
                    'hour'] = f'{time.localtime().tm_hour}:{time.localtime().tm_min}'
                json_data[
                    'date'] = f'{time.localtime().tm_year}-{time.localtime().tm_mon}-{time.localtime().tm_mday}'
                json_data['imgpath'] = f'unknown_images/unknown_{count}.jpg'

                cv2.imwrite("unknown_images/unknown_%d.jpg" % count, img)
                r = requests.post(url=sendingUrl, json=json_data)
                count += 1
                print("Status: ", r.status_code)
            timer += 1

        y1, x2, y2, x1 = location
        # y1, x2, y2, x1 = y1*4, x2*4, y2*4, x1*4
        cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
        cv2.putText(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1,
                    (255, 255, 255), 1)

    cv2.imshow('webcam', img)
    if cv2.waitKey(10) == ord('q'):  # wait until 'q' key is pressed
        webcam.release()
        cv2.destroyAllWindows()
Exemplo n.º 17
0
face_cascade = cv2.CascadeClassifier("xml/haarcascade_frontalface_default.xml")
# eye_cascade = cv2.CascadeClassifier("xml/haarcascade_eye.xml")

cap = cv2.VideoCapture(0)

while True:

    _, frame = cap.read()

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    faces = face_cascade.detectMultiScale(gray, 1.3, 5)

    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 0), 2)
        
        roi_gray = gray[y:y+h, x:x+w]
        roi_color = frame[y:y+h, x:x+w]

        # eyes = eye_cascade.detectMultiScale(roi_gray, 1.3, 5)
        # for (sx, sy, sw, sh) in eyes:
        #     cv2.rectangle(roi_color, (sx, sy), (sx+sw, sy+sh), (0, 255, 255), 2)

    cv2.imshow("Face Detector", frame)

    if cv2.waitKey(1) & 0xFF == ord("q"):

        break

cap.release()
Exemplo n.º 18
0
            if matches[best_match_index]:
                name = known_names[best_match_index]

            face_names.append(name)

    process_this_frame = not process_this_frame

    for (top, right, bottom, left), name in zip(face_locations, face_names):

        top *= 4
        bottom *= 4
        left *= 4
        right *= 4

        cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 200), 2)

        cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 200),
                      cv2.FILLED)
        font = cv2.FONT_HERSHEY_DUPLEX
        cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0,
                    (255, 255, 255), 1)

    cv2.imshow('Video', frame)

    out.write(cv2.resize(frame, (int(vidWidth), int(vidHeight))))

    if cv2.waitKey(1) & 0xFF == ord('Q'):
        print("Gestopt!")

        break
Exemplo n.º 19
0
def face_recog(frame, currentTime=None):

    global known_face_encodings, known_face_metadata, thread_counter, debug, \
        log_document_template, db, bucket, devId, frequency, parser, intruder_collection, \
            model_options, model_mode

    temp_log_document = copy.deepcopy(log_document_template)

    permFileName = (devId + "_" +
                    str(currentTime).replace(" ", "_").replace(":", "_") +
                    ".jpg")

    small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
    rgb_small_frame = small_frame[:, :, ::-1]
    # Find all the face locations and face encodings in the current frame of video
    face_locations = face_recognition.face_locations(
        rgb_small_frame, model=model_options[model_mode])
    face_encodings = face_recognition.face_encodings(rgb_small_frame,
                                                     face_locations)
    # Loop through each detected face and see if it is one we have seen before
    # If so, we'll give it a label that we'll draw on top of the video.
    face_labels = []
    doc_id_path = ""
    for face_location, face_encoding in zip(face_locations, face_encodings):
        # See if this face is in our list of known faces.
        metadata = lookup_known_face(face_encoding)
        # If we found the face, label the face with some useful information.
        if metadata is not None:
            face_label = metadata["label"]
            doc_id_path = "/" + parser["CLOUD_CONFIG"].get(
                "FAD") + "/" + metadata["userId"]

        # If this is a brand new face, add it to our list of known faces
        else:
            face_label = "New visitor!"
            # Grab the image of the the face from the current frame of video
            top, right, bottom, left = face_location
            face_image = small_frame[top:bottom, left:right]
            face_image = cv2.resize(face_image, (150, 150))
            # Add the new face to our known face data
            doc_id_path = ("/" + parser["CLOUD_CONFIG"].get("FAD") + "/" +
                           register_new_face(face_encoding, face_image))

        face_labels.append(face_label)

        temp_log_document["peopleDetected"].append(doc_id_path)
    # Draw a box around each face and label each face
    for (top, right, bottom, left), face_label in zip(face_locations,
                                                      face_labels):
        # Scale back up face locations since the frame we detected in was scaled to 1/4 size
        top *= 4
        right *= 4
        bottom *= 4
        left *= 4
        # Draw a box around the face
        cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
        # Draw a label with a name below the face
        cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255),
                      cv2.FILLED)
        cv2.putText(
            frame,
            face_label,
            (left + 6, bottom - 6),
            cv2.FONT_HERSHEY_DUPLEX,
            0.8,
            (255, 255, 255),
            1,
        )

    thread_counter += 1
    if len(temp_log_document["peopleDetected"]
           ) and frequency // 2 < thread_counter < frequency:
        thread_counter = frequency

    if thread_counter % frequency == 0:
        if debug:
            print()
            print("Total threads completed :", thread_counter)
        thread_counter = 0
        if logging:
            cv2.imwrite(permFileName, frame)
            blob = bucket.blob(permFileName)
            blob.upload_from_filename(permFileName)

            tempIntruderLogDocument = intruder_collection.document(
                devId + " " + str(currentTime))
            temp_log_document["timestamp"] = datetime_helpers.utcnow()
            temp_log_document["imageUri"] = permFileName
            temp_log_document["location"] = "/" + parser["CLOUD_CONFIG"].get(
                "CAM") + "/" + devId
            tempIntruderLogDocument.create(temp_log_document)
            del temp_log_document
            del tempIntruderLogDocument
            os.remove(permFileName)

    return frame
Exemplo n.º 20
0
from cv2 import cv2  #Importing OpenCV library
from random import randrange  #Randrange for generate random colors for squares

# Load pre-trained data on face frontals form openCV
trained_face_data = cv2.CascadeClassifier(
    'haarcascade_frontalface_default.xml')
#Open Image
webcam = cv2.VideoCapture(0)

while True:
    successful_frame_read, frame = webcam.read()
    grayscaled_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    Fc_Cor = trained_face_data.detectMultiScale(
        grayscaled_img, 1.4, 2)  #This checks whole image for faces

    for (x, y, w, h) in Fc_Cor:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
    cv2.imshow('Face Detector', frame)
    key = cv2.waitKey(1)

    if key == 81 or key == 113:
        break

webcam.release()

print("Code Completed!")
Exemplo n.º 21
0
def main():
    # size of camera output
    cam_size = 600

    # label's config
    font_scale = 1.5
    font = cv2.FONT_HERSHEY_PLAIN
    text_background = (0, 0, 255)
    text_offset_x = 10
    text_offset_y = cam_size - 25

    # getting all class names
    with open('class_names.txt', 'r') as f:
        class_names = f.read().splitlines()

    # loading the model
    model = get_model('./models/doodle_model.pt', class_names)
    model.eval()

    # starting cv2 video capture
    cap = cv2.VideoCapture(0)
    while True:
        # getting middle of cropped camera output
        crop_size = int(cam_size / 2)

        _, frame = cap.read()

        # setting white backgound for lines to draw on to
        img = 255 * np.ones(shape=frame.shape, dtype=np.uint8)

        # line detection
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 75, 150)
        lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 15, maxLineGap=10)
        for line in lines:
            x1, y1, x2, y2 = line[0]
            cv2.line(img, (x1, y1), (x2, y2), (0, 0, 0), 5)

        # cropping the image for setted output
        mid_h = int(img.shape[0] / 2)
        mid_w = int(img.shape[1] / 2)
        img = img[mid_h - crop_size:mid_h + crop_size,
                  mid_w - crop_size:mid_w + crop_size]

        # converting and normalizing image to array
        # also expanding dims for further keras prediction
        im = Image.fromarray(img, 'RGB')

        # classifying the doodle
        pred = get_prediction(model, im, class_names)

        # generating output text
        text = '{} {}%'.format(pred[0]['label'],
                               int(pred[0]['confidence'] * 100))

        # generating text box
        (text_width, text_height) = cv2.getTextSize(text,
                                                    font,
                                                    fontScale=font_scale,
                                                    thickness=1)[0]
        box_coords = ((text_offset_x, text_offset_y),
                      (text_offset_x + text_width - 2,
                       text_offset_y - text_height - 2))

        # drawing text box, text and showing the lines for better camera adjustment
        cv2.rectangle(img, box_coords[0], box_coords[1], text_background,
                      cv2.FILLED)
        cv2.putText(img,
                    text, (text_offset_x, text_offset_y),
                    font,
                    fontScale=font_scale,
                    color=(255, 255, 255),
                    thickness=1)
        cv2.imshow('CharAIdes', img)

        key = cv2.waitKey(1)
        if key == 27:
            break

    # ending cv2 cam capture
    cap.release()
    cv2.destroyAllWindows()
    ret, frame = cap.read()
    avg = cv2.mean(frame)[:3]
    print("current frame index : " + str(i))
    avgs.append(avg)

#confirming the dimensions for barcode image

barcode_image = "colorcandy.png"
#output image

barcode_width = 1
#should be usally 1, for sample making this as 10

barcode_height = 300
#height of image 250px

barcode = numpy.zeros((barcode_height, len(avgs) * barcode_width, 3),
                      dtype="uint8")
#allocate memory for the barcode visualization

#filling the barcodes with colors from averages
for (i, avg) in enumerate(avgs):
    cv2.rectangle(barcode, (i * barcode_width, 0),
                  ((i + 1) * barcode_width, barcode_height), avg, -1)

cv2.imwrite(barcode_image, barcode)
createdimage = cv2.imread("colorcandy.png", 1)
cv2.imshow("Barcode", createdimage)
cv2.waitKey(0)

#Video by Kelly Lacy from Pexels
Exemplo n.º 23
0
from cv2 import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while (1):
    _, img = cap.read()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    face = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    faces = face.detectMultiScale(gray, 1.5, 5)
    for (x, y, w, h) in faces:
        img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 1)
    cv2.imshow('final', img)
    if cv2.waitKey(10) & 0xFF == 27:
        cap.release()
        cv2.destroyAllWindows()
        break
'''img = cv2.imread('crowd.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
faces = face.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
    img = cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),1)

cv2.imshow('final',img)
cv2.waitKey(0)'''
                confidences.append(float(confidence))
                class_ids.append(class_id)


    indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)

    font = cv2.FONT_HERSHEY_PLAIN
    for i in range(len(boxes)):
        if i in indexes:
            x, y, w, h = boxes[i]
            label = str(classes[class_ids[i]])
            if label == "car":
                ret,frame=cap.read()
                color = colors[i]
                a=cv2.putText(frame, label, (x, y + 30), font, 3, color, 3)
                b=cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
                key=cv2.waitKey(1)
                if key==115:
                    car_det=cv2.imwrite("DetectedCars/dusricar%d.jpg" %count,frame)
                    car_detect=cv2.imread("DetectedCars/dusricar%d.jpg" %count)
                    cv2.imshow("Detected car",car_detect)
                    count +=1
                    cv2.waitKey(3000)
                    cv2.destroyWindow("Detected car")
                    insertBLOB(1, "Detected car:1", "C:\Python37\objectDetection\DetectedCars\dusricar0.jpg")
                    break
                
    cv2.imshow("Image", frame)
    key = cv2.waitKey(1)
    if key == 27:
        break
font = cv2.FONT_HERSHEY_COMPLEX_SMALL

# perulangan untuk melakukan proses penggambaran teks, kotak pada wajah dan mata dan menampilkan / memutar video sampai selesai
# dan menunggu user klik tombol escape
while 1:
    ret, img = cap.read()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # menyimpan hasil pendeteksian wajah
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)

    for (x, y, w, h) in faces:
        # menggambar kotak disekitar wajah yang terdeteksi
        cv2.putText(img, "Wajah", (x, y - 10), font, 0.75, (100, 255, 51), 2,
                    cv2.LINE_AA)
        # draw_text(img, 'wajah', font, x, y-20, 1, 1, (255, 255, 255), (100, 255, 51))
        cv2.rectangle(img, (x, y), (x + w, y + h), (100, 255, 51), 2)
        roi_gray = gray[y:y + h, x:x + w]
        roi_color = img[y:y + h, x:x + w]

        # mendeteksi mata dan menyimpan hasil ke variabel
        mata = eye.detectMultiScale(roi_gray)
        # menggambar kotak disekitar mata
        for (mx, my, mw, mh) in mata:
            cv2.rectangle(roi_color, (mx, my), (mx + mw, my + mh),
                          (255, 255, 0), 2)

    cv2.imshow('deteksi wajah', img)
    k = cv2.waitKey(30) & 0xff
    if k == 27:
        break
Exemplo n.º 26
0
from cv2 import cv2 as cv
import numpy as np

blank = np.zeros((400, 400), dtype='uint8')

rectangle = cv.rectangle(blank.copy(), (30, 30), (370, 370), 255, -1)
circle = cv.circle(blank.copy(), (200, 200), 200, 255, -1)

cv.imshow('Rectangle', rectangle)
cv.imshow('circle', circle)

# bitwise AND ---> intersectiong regions
bitwise_and = cv.bitwise_and(rectangle, circle)
cv.imshow('Bitwise AND', bitwise_and)

# Bitwise OR ---> non intersecting and intersecting region
bitwise_or = cv.bitwise_or(rectangle, circle)
cv.imshow('Bitwise OR', bitwise_or)

# Bitwise XOR ---> non intersecting regions
bitwise_xor = cv.bitwise_xor(rectangle, circle)
cv.imshow('Bitwise XOR', bitwise_xor)

# Bitwise NOT
bitwise_not = cv.bitwise_not(rectangle, circle)
cv.imshow('Bitwise NOT', bitwise_not)

cv.waitKey(0)
    def generate_dataset(self):
        if self.var_emp_Department.get(
        ) == "Select Department" or self.var_emp_Name.get(
        ) == "" or self.va_Emp_Id.get() == "":
            messagebox.showerror("Error",
                                 "All Fields are required",
                                 parent=self.root)
        else:
            try:
                conn = mysql.connector.Connect(host="localhost",
                                               username="******",
                                               password="******",
                                               database="face_recognizer")
                my_cursor = conn.cursor()
                my_cursor.execute("select * from emp_table")
                myresult = my_cursor.fetchall()
                id = 0
                for x in myresult:
                    id += 1
                my_cursor.execute(
                    "update emp_table set Name=%s,Department=%s,DOB=%s,DOJ=%s,Gender=%s,Proof_type=%s,Proof_number=%s,Address=%s,Email=%s,Contact_no=%s,PhotoSample=%s where emp_id=%s",
                    (self.var_emp_Name.get(), self.var_emp_Department.get(),
                     self.var_emp_Dob.get(), self.var_emp_Doj.get(),
                     self.var_emp_Gender.get(), self.var_emp_Proof_type.get(),
                     self.var_emp_Proof_Number.get(),
                     self.var_emp_Address.get(), self.var_emp_Email.get(),
                     self.var_emp_Contact_No.get(), self.var_radio1.get(),
                     self.va_Emp_Id.get() == id + 1))
                conn.commit()
                self.fetch_data()
                self.reset_data()
                conn.close()

                # Load Predefined data on frontal face from opencv

                # Initializing the face and eye cascade classifiers from xml files
                face_cascade = cv2.CascadeClassifier(
                    'haarcascade_frontalface_default.xml')
                eye_cascade = cv2.CascadeClassifier(
                    'haarcascade_eye_tree_eyeglasses.xml')
                eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
                recognizer = cv2.face.LBPHFaceRecognizer_create()
                train_dataset = recognizer.read('trainer/trainer.yml')

                id = 2  #two persons
                names = [
                    '', 'unknownone', 'unknown', 'abc', 'xyz'
                ]  #key in names, start from the second place, leave first empty
                font = cv2.FONT_HERSHEY_SIMPLEX
                face_id = input('\n enter user id end press <return> ==>  ')

                # face_id = sys.argv[1]
                # Variable store execution state
                first_read = True

                # Starting the video capture
                cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
                ret, img = cap.read()
                count = 0
                while (ret):
                    ret, img = cap.read()
                    # Coverting the recorded image to grayscale
                    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                    # Applying filter to remove impurities
                    gray = cv2.bilateralFilter(gray, 5, 1, 1)

                    # Detecting the face for region of image to be fed to eye classifier
                    faces = face_cascade.detectMultiScale(gray,
                                                          1.3,
                                                          5,
                                                          minSize=(200, 200))

                    if (len(faces) > 0):
                        for (x, y, w, h) in faces:
                            img = cv2.rectangle(img, (x, y), (x + w, y + h),
                                                (0, 255, 0), 2)
                            id, confidence = recognizer.predict(gray[y:y + h,
                                                                     x:x + w])
                            # roi_face is face which is input to eye classifier
                            roi_face = gray[y:y + h, x:x + w]
                            roi_face_clr = img[y:y + h, x:x + w]
                            eyes = eye_cascade.detectMultiScale(roi_face,
                                                                1.3,
                                                                5,
                                                                minSize=(50,
                                                                         50))

                            conn = mysql.connector.Connect(
                                host="localhost",
                                username="******",
                                password="******",
                                database="face_recognizer")
                            my_cursor = conn.cursor()

                            # Examining the length of eyes object for eyes
                            if (len(eyes) >= 2):
                                # Check if program is running for detection
                                if (first_read):
                                    cv2.putText(img, "Eye detected press s",
                                                (70, 70),
                                                cv2.FONT_HERSHEY_PLAIN, 3,
                                                (0, 255, 0), 2)

                                else:
                                    count += 1
                                    cv2.putText(img, "Eyes open!", (70, 70),
                                                cv2.FONT_HERSHEY_PLAIN, 2,
                                                (255, 255, 255), 2)
                                    # print('click ' + str(count) + ' photo' + ' new face')
                                    if (train_dataset
                                        ) and train_dataset != False:
                                        id, confidence = recognizer.predict(
                                            gray[y:y + h, x:x + w])
                                        if (confidence > "30%"):
                                            cv2.putText(
                                                img, 'Already in the dataset',
                                                (x + 50, y + w + 20), font, 1,
                                                (255, 255, 0), 2)
                                        else:
                                            print('click ' + str(count) +
                                                  ' photo' + confidence +
                                                  ' new face' + id)
                                            print(confidence)
                                            cv2.imwrite(
                                                "data/User." + str(face_id) +
                                                '.' + str(count) + ".jpg",
                                                gray[y:y + h, x:x + w])
                                    else:
                                        cv2.putText(img,
                                                    'New face was detected',
                                                    (x + 50, y + w + 20), font,
                                                    1, (255, 255, 0), 1)
                                        print(
                                            'click ' + str(count) + ' photo' +
                                            ' new face', confidence, id)
                                        cv2.imwrite(
                                            "data/User." + str(face_id) + '.' +
                                            str(count) + ".jpg", gray[y:y + h,
                                                                      x:x + w])

                            else:
                                if (first_read):
                                    # To ensure if the eyes are present before starting
                                    cv2.putText(img, "No eyes detected",
                                                (70, 70),
                                                cv2.FONT_HERSHEY_PLAIN, 3,
                                                (0, 0, 255), 2)
                                else:
                                    cv2.waitKey(30)
                                    first_read = True

                    else:
                        cv2.putText(img, "No face detected", (100, 100),
                                    cv2.FONT_HERSHEY_PLAIN, 3, (0, 255, 0), 2)

                    # Controlling the algorithm with keys
                    cv2.imshow('img', img)
                    a = cv2.waitKey(1)
                    if (a == ord('q')):
                        break
                    elif (a == ord('s') and first_read):
                        # This will start the detection
                        first_read = False
                    elif count >= 100:  # Take 30 face sample and stop video
                        break
                cap.release()
                cv2.destroyAllWindows()
                messagebox.showinfo("Result",
                                    "Generating data set complted !!!!")

            except Exception as es:
                messagebox.showerror("Error",
                                     f"Due to:{str(es)}",
                                     parent=self.root)
Exemplo n.º 28
0
from cv2 import cv2

# Load the cascade
face_cascade = cv2.CascadeClassifier(
    "/home/lokesh/anaconda3/envs/my_env/Detect/haarcascade_frontalface_default.xml"
)
# Read the input image
img = cv2.imread(
    "/home/lokesh/anaconda3/envs/my_env/Detect/WhatsApp Image 2020-04-28 at 18.18.35 (copy).jpeg"
)
# Convert into grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# Draw rectangle around the faces
for (x, y, w, h) in faces:
    cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 3)
# Display the output
cv2.imshow('img', img)
cv2.waitKey()
Exemplo n.º 29
0
    else:
        text = "examining component {}/{}".format(i + 1, num_labels)

    print("[INFO] {}".format(text))
    x = stats[i, cv2.CC_STAT_LEFT]
    y = stats[i, cv2.CC_STAT_TOP]
    w = stats[i, cv2.CC_STAT_WIDTH]
    h = stats[i, cv2.CC_STAT_HEIGHT]
    area = stats[i, cv2.CC_STAT_AREA]
    (cX, cY) = centroids[i]
    output = th1.copy()
    if (
            1000 < area < 2000
    ):  #filtering out relavent detections (the ones big enough to be black keys)
        final_labels.append(i)
        cv2.rectangle(output, (x, y), (x + w, y + h), (0, 255, 0), 3)
        cv2.circle(output, (int(cX), int(cY)), 4, (255, 255, 0), -1)
        componentMask = (labels == i).astype("uint8") * 255
        display_img("Output", output)
        display_img("Connected Component", componentMask)
        cv2.waitKey(0)

#just for visualization lol
for i in range(len(final_labels)):
    xc, yc = centroids[final_labels[i]]
    #x1 = stats[final_labels[i], cv2.CC_STAT_LEFT]
    #del_x = stats[final_labels[i], cv2.CC_STAT_WIDTH]
    #lol = cv2.line(img,(int(x1),0),(int(x1),900),(0,255,0),1)
    #lol = cv2.line(img,(int(x1+del_x),0),(int(x1+del_x),900),(0,255,0),1)
    lol = cv2.line(img, (int(xc), 0), (int(xc), 900), (0, 0, 255), 1)
    cv2.imshow("lol", lol)
    # locations
    for (box, pred) in zip(locs, preds):
        # unpack the bounding box and predictions
        (startX, startY, endX, endY) = box
        (mask, withoutMask) = pred

        # determine the class label and color we'll use to draw
        # the bounding box and text
        label = "Mask" if mask > withoutMask else "No Mask"
        color = (0, 255, 0) if label == "Mask" else (0, 0, 255)

        # include the probability in the label
        label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)

        # display the label and bounding box rectangle on the output
        # frame
        cv2.putText(frame, label, (startX, startY - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
        cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)

    # show the output frame
    cv2.imshow("Frame", frame)
    key = cv2.waitKey(1) & 0xFF

    # if the `q` key was pressed, break from the loop
    if key == ord("q"):
        break

# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()