Beispiel #1
0
def show_images(images):
    """ Shows all images in a window"""
    if images == None:
        logging.error(
            'Cannot Show Images (No image saved). Image-Type: %s (tools.py)' %
            str(type(images).__name__))
    elif type(images).__name__ == 'list':
        for i in range(len(images)):
            print type(images[i])
            if type(images[i]).__name__ == 'ndarray':
                tmpimage = []
                tmpimage[i] = array2cv(images[i])
                cv.ShowImage("Image", tmpimage[i])
                if cv.WaitKey() == 27:
                    cv.DestroyWindow("Image")
            else:
                cv.ShowImage("Image", images[i])
                if cv.WaitKey() == 27:
                    cv.DestroyWindow("Image")
    elif type(images).__name__ == 'cvmat':
        cv.ShowImage("Image", images)
        if cv.WaitKey() == 27:
            cv.DestroyWindow("Image")
    elif type(images).__name__ == 'iplimage':
        cv.ShowImage("Image", images)
        if cv.WaitKey() == 27:
            cv.DestroyWindow("Image")
    elif type(images).__name__ == 'ndarray':
        images = array2cv(images)
        cv.ShowImage("Image", images)
        if cv.WaitKey() == 27:
            cv.DestroyWindow("test")
    elif type(images).__name__ == 'str':
        logging.error(
            'TypeError: Cannot Show Images (No image saved?). Image-Type: %s (tools.py)'
            % str(type(images).__name__))
    else:
        logging.error(
            'TypeError: Cannot Show Images. Image-Type: %s (tools.py)' %
            str(type(images).__name__))
Beispiel #2
0
    def close_camera(self):
        try:
            if self.showVideo:
                cv.DestroyWindow("video")

            if self.find_face_is_on():
                self.stop_find_face()

            del self.capture
            self.capture = None
        except:
            print "Error in close_camera"
            pass
Beispiel #3
0
def analyze_webcam(width, height):
    print("""
    ' ' : extract colors of detected face
    'b' : toggle onlyBlackCubes
    'd' : toggle dodetection
    'm' : shift right
    'n' : shift left
    'r' : reset everything
    'q' : print hsvs
    'p' : resolve colors
    'u' : toggle didassignments
    's' : save image
""")

    # 0 for laptop camera
    # 1 for usb camera
    capture = cv2.VideoCapture(0)

    # Set the capture resolution
    #cv2.SetCaptureProperty(capture, cv2.CV_CAP_PROP_FRAME_WIDTH, width)
    #cv2.SetCaptureProperty(capture, cv2.CV_CAP_PROP_FRAME_HEIGHT, height)

    # Create the window and set the size to match the capture resolution
    cv2.namedWindow("Fig", cv2.WINDOW_NORMAL)
    #cv2.ResizeWindow("Fig", width, height)

    # Capture frame-by-frame
    ret, frame = capture.read()
    rf = RubiksFinder(width, height)

    while True:
        ret, frame = capture.read()

        if not frame:
            cv2.WaitKey(0)
            break

        rf.analyze_frame(frame)

        if not rf.process_keyboard_input():
            break

    cv2.DestroyWindow("Fig")
import serial  #import the pyserial module

#Module -1: Image Processing
hc = cv2.imread(
    '/home/george/PycharmProjects/Embeded image processing system/haarcascade_frontalface_alt2.xml'
)
img = cv2.imshow('/home/jayneil/beautiful-faces.jpg', 0)
faces = cv2.HaarDetectObjects(img, hc, cv2.CreateMemStorage())
a = 1
print(faces)
for (x, y, w, h), n in faces:
    cv2.Rectangle(img, (x, y), (x + w, y + h), 255)
cv2.SaveImage("faces_detected.jpg", img)
dst = cv2.imread('faces_detected.jpg')
cv2.NamedWindow('Face Detected', cv2.CV_WINDOW_AUTOSIZE)
cv2.imshow('Face Detected', dst)
cv2.WaitKey(5000)
cv2.DestroyWindow('Face Detected')

#Module -2: Trigger Pyserial
if faces == []:

    ser = serial.Serial('/dev/ttyUSB0', 9600)
    print(ser)
    ser.write('N')
else:

    ser = serial.Serial('/dev/ttyUSB0', 9600)
    print(ser)
    ser.write('Y')
Beispiel #5
0
total = 0

for c in counter:
    peri = cv2.arcLength(c, True)
    approx = cv2.approxPolyDP(c, 0.02 * peri, True)

    if len(approx) == 4:
        cv2.drawContours(img, [approx], -1, (0, 255, 0), 4)
        total = total + 1
        pts1 = np.float32([approx])
        a = pts1[0][0][0][0]
        b = pts1[0][0][0][1]
        c = pts1[0][1][0][0]
        d = pts1[0][1][0][1]
        e = pts1[0][2][0][0]
        f = pts1[0][2][0][1]
        g = pts1[0][3][0][0]
        h = pts1[0][3][0][1]
        array = np.float32([[a, b], [c, d], [e, f], [g, h]])
        M = cv2.getPerspectiveTransform(array, pts2)
        new = cv2.warpPerspective(img, M, (120, 180))
        cv2.imshow("new", new)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            cv2.DestroyWindow("new")
        print("This is one contour")
        print(approx)

print('I found %d books in that image' % total)
cv2.imshow("Output", img)
cv2.waitKey(0)
Beispiel #6
0
            break
        #
        ### check OS
        if (osName == "nt"):
            cv2.Flip(frame, frame, 0)
        else:
            cv2.Flip(frame, None, 1)
        #
        ### detecting faces here
        detect_and_draw(frame, cascade)
        #
        ### handle key events
        k = cv2.WaitKey(5)
        if k % 0x100 == 27:
            # user has press the ESC key, so exit
            cv2.DestroyWindow('Camera')
            break

        import numpy as np
import cv2

# multiple cascades: https://github.com/Itseez/opencv/tree/master/data/haarcascades

#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_eye.xml

cap = cv2.VideoCapture(0)

while 1:
    ret, img = cap.read()
Beispiel #7
0
from time import time as timer
import tensorflow as tf
import numpy as np
import sys
import cv2 as cv
import os
vidFile = cv.CaptureFromFile('Test_Avi')

nFrames = int(cv.GetCaptureProperty(vidFile, cv.CV_CAP_PROP_FRAME_COUNT))
fps = cv.GetCaptureProperty(vidFile, cv.CV_CAP_PROP_FPS)
waitPerFrameInMillisec = int(1 / fps * 1000 / 1)

print('Num. Frames = ', nFrames)
print('Frame Rate = ', fps, ' frames per sec')

for f in xrange(nFrames):
    frameImg = cv.QueryFrame(vidFile)
    cv.ShowImage("My Video Window", frameImg)
    cv.WaitKey(waitPerFrameInMillisec)

# When playing is done, delete the window
#  NOTE: this step is not strictly necessary,
#         when the script terminates it will close all windows it owns anyways
cv.DestroyWindow("My Video Window")
Beispiel #8
0
def main():

    global current_image
    global current_img_file_name
    global has_roi
    global roi_x0
    global roi_y0
    global roi_x1
    global roi_y1

    iKey = 0

    files = glob.glob(image_file_glob)
    if len(files) == 0:
        print("No files match glob pattern")
        return

    files = [os.path.abspath(f) for f in files]
    files.sort()

    # init GUI
    cv.NamedWindow(window_name, 1)
    cv.SetMouseCallback(window_name, on_mouse, None)

    sys.stderr.write("Opening directory...")
    # init output of rectangles to the info file
    #os.chdir(input_directory)
    sys.stderr.write("done.\n")

    current_file_index = 0

    while True:

        current_img_file_name = files[current_file_index]

        num_of_rec = 0
        sys.stderr.write(
            "Loading current_image (%d/%d) %s...\n" %
            (current_file_index + 1, len(files), current_img_file_name))

        try:
            current_image = cv.LoadImage(current_img_file_name, 1)
        except IOError:
            sys.stderr.write("Failed to load current_image %s.\n" %
                             current_img_file_name)
            return -1

        #  Work on current current_image
        #cv.ShowImage(window_name, current_image)
        redraw()

        # Need to figure out waitkey returns.
        # <Space> =  32     add rectangle to current image
        # <left>  =  81     save & next
        # <right> =  83     save & prev
        # <a>     =  97     add rect to table
        # <b>     =  98     toggle file is background or not
        # <d>     = 100     remove old rect
        # <q>     = 113     exit program
        # <s>     = 115     save rect table
        # <x>     = 136     skip image
        iKey = cv.WaitKey(0) % 255
        # This is ugly, but is actually a simplification of the C++.
        #sys.stderr.write(str(iKey) + '\n')
        if draging:
            continue

        if iKey == 81:
            current_file_index -= 1
            if current_file_index == -1:
                current_file_index = len(files) - 1
            clear_roi()
        elif iKey == 83:
            current_file_index += 1
            if current_file_index == len(files):
                current_file_index = 0
            clear_roi()
        elif iKey == 113:
            cv.DestroyWindow(window_name)
            return 0
        elif iKey == 97:
            rect_table.setdefault(current_img_file_name, set()).add(
                (roi_x0, roi_y0, roi_x1 - roi_x0, roi_y1 - roi_y0))
            clear_roi()
            write_rect_table()
            redraw()
        elif iKey == 98:
            if current_img_file_name in background_files:
                background_files.remove(current_img_file_name)
            else:
                background_files.add(current_img_file_name)
        elif iKey == 100:
            remove_rect(cur_mouse_x, cur_mouse_y)
        elif iKey == 115:
            write_rect_table()
        elif iKey == 136:
            sys.stderr.write("Skipped %s.\n" % current_file_index)
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 1280)
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 720)
frame = cv.QueryFrame(capture)
test = cv.CreateImage(cv.GetSize(frame), 8, 3)
cv.NamedWindow("output")
previous_x = 0
previous_y = 0
while (1):
    frame = cv.QueryFrame(capture)
    cv.Flip(frame, frame, 1)
    # we make all drawings on imdraw.
    imdraw = cv.CreateImage(cv.GetSize(frame), 8, 3)
    # we get coordinates from imgyellowthresh
    imgyellowthresh = getthresholdedimg(frame)
    # eroding removes small noises
    cv.Erode(imgyellowthresh, imgyellowthresh, None, 1)
    (leftmost, rightmost, topmost, bottommost) = getpositions(imgyellowthresh)
    if (leftmost - rightmost != 0) or (topmost - bottommost != 0):
        lastx = posx
        lasty = posy
        posx = cv.Round((rightmost + leftmost) / 2)
        posy = cv.Round((bottommost + topmost) / 2)
        if lastx != 0 and lasty != 0:
            win32api.SetCursorPos((posx, posy))

    cv.Add(test, imdraw, test)
    cv.ShowImage("output", test)
    if cv.WaitKey(10) >= 0:
        break
cv.DestroyWindow("output")
                midFaceX = x1 + ((x2 - x1) / 2)
                midFaceY = y1 + ((y2 - y1) / 2)
                midFace = (midFaceX, midFaceY)

                offsetX = midFaceX / float(frame.width / 2)
                offsetY = midFaceY / float(frame.height / 2)
                offsetX -= 1
                offsetY -= 1

                cam_pan -= (offsetX * 5)
                cam_tilt += (offsetY * 5)
                cam_pan = max(0, min(180, cam_pan))
                cam_tilt = max(0, min(180, cam_tilt))

                print(offsetX, offsetY, midFace, cam_pan, cam_tilt,
                      frame.width, frame.height)

                pan(int(cam_pan - 90))
                tilt(int(cam_tilt - 90))
                break

    # Display the resulting frame
    cv2.imshow('Tracker', frame)
    k = cv2.waitKey(30) & 0xff
    if k == 27:
        break

# When everything done, release the capture
cv2.DestroyWindow("Tracker")
Beispiel #11
0
def Dilation(pos):
    element = cv.CreateStructuringElementEx(pos * 2 + 1, pos * 2 + 1, pos, pos,
                                            element_shape)
    cv.Dilate(src, dest, element, 1)
    cv.ShowImage("Erosion & Dilation", dest)


if __name__ == "__main__":
    if len(sys.argv) > 1:
        src = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
    else:
        url = 'https://code.ros.org/svn/opencv/trunk/opencv/samples/c/fruits.jpg'
        filedata = urllib2.urlopen(url).read()
        imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
        cv.SetData(imagefiledata, filedata, len(filedata))
        src = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)

    image = cv.CloneImage(src)
    dest = cv.CloneImage(src)
    cv.NamedWindow("Opening & Closing", 1)
    cv.NamedWindow("Erosion & Dilation", 1)
    cv.ShowImage("Opening & Closing", src)
    cv.ShowImage("Erosion & Dilation", src)
    cv.CreateTrackbar("Open", "Opening & Closing", 0, 10, Opening)
    cv.CreateTrackbar("Close", "Opening & Closing", 0, 10, Closing)
    cv.CreateTrackbar("Dilate", "Erosion & Dilation", 0, 10, Dilation)
    cv.CreateTrackbar("Erode", "Erosion & Dilation", 0, 10, Erosion)
    cv.WaitKey(0)
    cv.DestroyWindow("Opening & Closing")
    cv.DestroyWindow("Erosion & Dilation")
Beispiel #12
0
import cv2 as cv

cv.namedWindow('cap', 1)

## its really import to check the source image size and set up the destination
## window
w = cv.VideoWriter('test.avi', cv.cv.CV_FOURCC('H', '2', '6', '4'), 25,
                   (640, 480))
cap = cv.VideoCapture(1)
cap.set(3, 640)
cap.set(4, 480)

videoCodec = cap.get(cv.cv.CV_CAP_PROP_FOURCC)
print videoCodec

success, frame = cap.read()

while success:
    cv.imshow('cap', frame)
    w.write(frame)
    success, frame = cap.read()

    if cv.waitKey(1) == 27:
        break

cv.DestroyWindow('cap')
Beispiel #13
0
import time

if __name__ == '__main__':

    cv.namedWindow("camera", 1)
    #开启ip摄像头
    video = "http://*****:*****@172.16.200.214:8081/"
    capture = cv.VideoCapture(video)

    num = 0
    while True:
        img = cv.QueryFrame(capture)
        cv.ShowImage("camera", img)

        #按键处理,注意,焦点应当在摄像头窗口,不是在终端命令行窗口
        key = cv.WaitKey(10)

        if key == 27:
            #esc键退出
            print('esc break...')
            break
        if key == ord(' '):
            #保存一张图像
            num = num + 1
            filename = "frames_%s.jpg" % num
            cv.SaveImage(filename, img)

    del (capture)
    cv.DestroyWindow("camera")
Beispiel #14
0
    imag = diffImg(tzero, t, tmais)
    imag2 = np.fromstring(imag, np.uint8)

    d = cv.Scalar
    c = cv.Scalar

    for x in imag:
        imag3 = np.fromstring(imag2, np.uint8)
        d = cv.Get2D(imag3, i, j)

        if ((d.val(2) == 230) and (d.val(1) == 230) and (d.val(0) == 230)):
            c.val[2] = 0
            c.val[1] = 255
            c.val[0] = 0
            cv.Set2D(imag2, i, j, c)
        i = i + 1
        j = j + 1

    cv2.imshow(janela2, imag)
    cv2.imshow(janela3, imag2)

    a = cv2.waitKey(22)

    if a == 'g':
        cv2.DestroyWindow(janela)
        break
    if a == 'q':
        cv2.DestroyWindow('Teste')
        break