def main():
    windowName="Live Video Feed"
    cv2.namedWindow(windowName)
    
    cap=cv2.VideoCapture(0)
    
    if cap.isOpened():
        ret,frame=cap.read()
        print(ret)
        print(frame)
    else:
        ret=False;
        
    
    
#TO capture Video Recordings
    while ret:
        ret,frame=cap.read()
        output= cv2.imshow(windowName,frame)
        cv2.imshow("Gray",output)

        if cv2.waitKey(1)==27:
            break
        
    cv2.destroyWindows()
        
    
    
    cap.isrelease()
Example #2
0
def main():
    imgpath = "E:\\OCV\\Images\\standard_test_images\\lena_color_256.tif"
    img = cv2.imread(imgpath)
    
    cv2.imshow('Lena', img)
    cv2.waitKey(0)
    cv2.destroyWindows('Lena')
Example #3
0
def Read_video(video_path,thresh):
    videoCap = cv2.VideoCapture(video_path)
    frame_count = int(videoCap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    print frame_count
    count = 0
    while(int(count)<frame_count):
        count  = count + 3
        videoCap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,count)
        videoCap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT,480)
        videoCap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH,640)
        ret,frame = videoCap.read()
        src = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
        cv2.imwrite('./temp.jpg',src)
        img_path = './temp.jpg'
        loc,conf = get_face_loc(img_path)
        if  loc is None:
            print 'non face'
            cv2.putText(frame,'NONE_FACE',(10,30),cv2.FONT_HERSHEY_TRIPLEX,0.6,(255,255,255))
        else:
            roi_ary = get_roi(loc,img_path)
            cv2.rectangle(frame,(int(roi_ary[0]),int(roi_ary[1])),(int(roi_ary[2]),int(roi_ary[3])),(0,0,255),1)
        #do predict
            try:
                loc = get_phone_loc(roi_ary,thresh)
                if sum(loc) !=0:
                    cv2.rectangle(frame,(int(loc[0]),int(loc[1])),(int(loc[2]),int(loc[3])),(0,255,0),1)
                    cv2.putText(frame,str(loc[-1]),(int(loc[0]),int(loc[1])),cv2.FONT_HERSHEY_TRIPLEX,0.6,(255,0,0))
            except:
                print 'predict_erro'
        cv2.imshow('main',frame)
        if cv2.waitKey(10) & 0xFF == ord('q'):
            count = frame_count
    videoCap.release()
    cv2.destroyWindows()
def main():
    # load haar cascades model
    faces = cv2.CascadeClassifier(FACE_MODEL_FILE)
    eyes = cv2.CascadeClassifier(EYES_MODEL_FILE)
    plates = [cv2.CascadeClassifier(p) for p in PLATE_FILES]

    # connect to camera
    camera = cv2.VideoCapture(0)
    while not camera.isOpened():
        time.sleep(0.5)

    # read and show frames
    progress = tqdm()
    while True:

        ret, frame = camera.read()
        frame = process(frame, [
            (faces, (255, 255, 0), dict(scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)), face_name),
            (eyes, (0, 0, 255), dict(scaleFactor=1.1, minNeighbors=5, minSize=(20, 20)), eye_name),
        ])
        #        frame = process(frame, [
        #            (model, (0, 255, 0), dict(scaleFactor=1.1, minNeighbors=5, minSize=(20, 20)))
        #            for model in plates
        #            ])
        cv2.imshow('Objects', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        progress.update()

    # gracefully close
    camera.release()
    cv2.destroyWindows()
    tqdm.close()
Example #5
0
def main():
    imgpath = "C:\\Users\\Rahul\\Pictures\\photo\\me.jpg" #image path
    img = cv2.imread(imgpath) #image read
    cv2.namedWindow('me', cv2.WINDOW_AUTOSIZE)
    cv2.imshow('me',img)#image show
    
    cv2.waitKey(0)
    cv2.destroyWindows('me')
Example #6
0
def main():
    # all( (x1 <= x <= x + w <= x1 + w ) and (y1 <= y <= y + h <= y1 + h) for (x1,y1,h1,w1) in faces)
    model = cv2.CascadeClassifier(CASCADESFILE)
    modeleyes = cv2.CascadeClassifier(EYESFILE)
    webcam = cv2.VideoCapture(0)

    # infinite image processing loop
    while True:

        if not webcam.isOpened():
            logging.warning('Unable to connect to camera.')
            time.sleep(5)
            continue

        # get image from camera
        ret, frame = webcam.read()

        # if frame is read correctly ret is True
        if not ret:
            print("Can't receive frame (stream end?). Exiting ...")
            break
        # convert image to grayscale
        grayframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces
        faces = model.detectMultiScale(grayframe,
                                       scaleFactor=3.1,
                                       minNeighbors=5,
                                       minSize=(30, 30))
        eyes = modeleyes.detectMultiScale(grayframe,
                                          scaleFactor=3.1,
                                          minNeighbors=20,
                                          minSize=(40, 40))
        logging.info(f'Detected faces: {len(faces)}')

        # add boxes
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
        for (x, y, w, h) in eyes:
            if bool(list(faces)):
                if all((x1 <= x <= x + w <= x1 + w1) and (
                        y1 <= y <= y + h <= y1 + h1)
                       for (x1, y1, h1, w1) in faces):
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0),
                                  2)

        # show image
        cv2.imshow('Video', frame)

        # stop if user presses 'q'
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # close everything
    webcam.release()
    cv2.destroyWindows()
Example #7
0
def readimg():
    img = cv2.imread('test.jpg')
    cv2.imshow("test", img)

    k = cv2.waitKey(1111)  # 0 无限期等待键盘输入,>0delay毫秒
    if k == 111:
        cv2.destroyAllWindows()
        cv2.destroyWindows()  # 删除指定窗口

    cv2.imwrite("testxxx.jpg", img)  # 写入到本地,
Example #8
0
def main():
    imgpath = "C:\\Users\\Rahul\\Pictures\\photo\\me.jpg" #image path
    img = cv2.imread(imgpath,0) #image read # 0 for grey and 1, -1 actual
    cv2.namedWindow('me', cv2.WINDOW_AUTOSIZE) 
    
    anotherfolder = "C:\\Users\\Rahul\\Pictures\\Camera Roll\\me.png"
    
    cv2.imshow('me',img)#image show
    cv2.imwrite(anotherfolder, img)  
    cv2.waitKey(0)
    cv2.destroyWindows('me')
Example #9
0
def detect_from_image(url):
    img = url_to_image(url)
    cv2.imshow('qq',img)
    cv2.waitKey(0)
    cv2.destroyWindows()
    print(img, img.shape)
    detections = detector.detectObjectsFromImage(input_image = img,
                                                input_type='array',
                                                output_image_path = output,
                                                minimum_percentage_probability=600)
    """
    'detectObjectsFromImage()' function is used to detect objects observable in the given image:
                    * input_image , which can be a filepath or image numpy array in BGR
                    * output_image_path (only if output_type = file) , file path to the output image that will contain the detection boxes and label, if output_type="file"
                    * input_type (optional) , filepath/numpy array of the image. Acceptable values are "file" and "array"
                    * output_type (optional) , file path/numpy array/image file stream of the image. Acceptable values are "file" and "array"
                    * extract_detected_objects (optional) , option to save each object detected individually as an image and return an array of the objects' image path.
                    * minimum_percentage_probability (optional, 30 by default) , option to set the minimum percentage probability for nominating a detected object for output.
                    * nms_threshold (optional, o.45 by default) , option to set the Non-maximum suppression for the detection
                    * display_percentage_probability (optional, True by default), option to show or hide the percentage probability of each object in the saved/returned detected image
                    * display_display_object_name (optional, True by default), option to show or hide the name of each object in the saved/returned detected image
                    * thread_safe (optional, False by default), enforce the loaded detection model works across all threads if set to true, made possible by forcing all Keras inference to run on the default graph
    """
    height, width, channel = img.shape
    data = {
            "image": url[:8],
            "labels": {
                "fire": []
            }
        }
    
    for detection in detections:
        [x1,y1,x2, y2] = detection["box_points"]
        box_height = abs(y1-y2)
        box_width = abs(x1-x2)
        
        top_ratio = round(y1/height, 2)
        bot_ratio = round((height-y2)/height, 2)
        l_ratio = round(x1/width, 2)
        r_ratio = round((width-x2)/width, 2)
        
        data['labels']['fire'].append({
            "top": top_ratio,
            "bottom": bot_ratio,
            "left": l_ratio,
            "right": r_ratio
        })


    print(data)
    res = requests.post('https://apigateway.hwangsehyun.com/smartcity/sns', json=data)
Example #10
0
def thinning(img):
    size = np.size(img)
    skel = np.zeros(img.shape, np.uint8)
    ret, img = cv2.threshold(img, 127, 255, 0)
    element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
    done = False

    while (not done):
        eroded = cv2.erode(img, element)
        temp = cv2.dilate(eroded, element)
        temp = cv2.subtract(img, temp)
        skel = cv2.bitwise_or(skel, temp)
        img = eroded.copy()

    cvs.imwrite("thinning.png", skel)
    cv2.imshow("skel", skel)
    cv2.waitKey(0)
    cv2.destroyWindows()
Example #11
0
import numpy as np
import cv2

img = cv2.imread('emoji.png')
cv2.imshow('emoji',img)

cv2.waitKey(0)
cv2.destroyWindows()
Example #12
0
def rgb2hsv(img, dim=0):
    if torch.is_tensor:
        log(f'Image tensor size is {img.size()}')
    else:
        log("This Function can only deal PyTorch Tensor!")
        return img
    # img = img * 0.5 + 0.5
    # hue = torch.Tensor(img.shape[0], img.shape[2], img.shape[3]).to(img.device)

    # hue[ img[:,2]==img.max(1)[0] ] = 4.0 + ( (img[:,0]-img[:,1]) / ( img.max(1)[0] - img.min(1)[0] + 1e-7) ) [ img[:,2]==img.max(1)[0] ]
    # hue[ img[:,1]==img.max(1)[0] ] = 2.0 + ( (img[:,2]-img[:,0]) / ( img.max(1)[0] - img.min(1)[0] + 1e-7) ) [ img[:,1]==img.max(1)[0] ]
    # hue[ img[:,0]==img.max(1)[0] ] = (0.0 + ( (img[:,1]-img[:,2]) / ( img.max(1)[0] - img.min(1)[0] + 1e-7) ) [ img[:,0]==img.max(1)[0] ]) % 6

    # hue[img.min(1)[0]==img.max(1)[0]] = 0.0
    # hue = hue/6

    # saturation = ( img.max(1)[0] - img.min(1)[0] ) / ( img.max(1)[0] + 1e-7 )
    # saturation[ img.max(1)[0]==0 ] = 0

    # value = img.max(1)[0]
    # img_hsv = torch.cat([hue.view(1,1,400,600),saturation.view(1,1,400,600),value.view(1,1,400,600)], dim=dim)
    # return img_hsv
    r, g, b = img.split(1, dim=dim)
    gap = 1. / 6.
    H = torch.zeros_like(r)
    S = torch.zeros_like(r)
    tensor_max = torch.max(torch.max(r, g), b)
    tensor_min = torch.min(torch.min(r, g), b)
    delta = tensor_max - tensor_min
    V = tensor_max
    g_b = (g >= b)
    b_g = ~g_b
    H_r_g = ((g - b) / delta) * gap
    H_r_b = ((g - b) / delta) * gap + 1.
    H_b = ((b - r) / delta) * gap + 2 * gap
    H_g = ((r - g) / delta) * gap + 4 * gap
    H_test = H.numpy().transpose(1, 2, 0)
    cv2.imshow('hsv1', H_test)
    cv2.waitKey(0)
    cv2.destroyWindow('hsv1')
    log(tensor_max == r)
    log(tensor_max == g)
    log(tensor_max == b)
    log(g_b)
    log(b_g)
    H = torch.where((tensor_max == r) & g_b, H, H_r_g)
    H_test = H.numpy().transpose(1, 2, 0)
    cv2.imshow('hsv1', H_test)
    cv2.waitKey(0)
    cv2.destroyWindow('hsv1')
    H = torch.where((tensor_max == r) & b_g, H, H_r_b)
    H_test = H.numpy().transpose(1, 2, 0)
    cv2.imshow('hsv1', H_test)
    cv2.waitKey(0)
    cv2.destroyWindow('hsv1')
    H = torch.where(tensor_max == g, H, H_b)
    H_test = H.numpy().transpose(1, 2, 0)
    cv2.imshow('hsv1', H_test)
    cv2.waitKey(0)
    cv2.destroyWindow('hsv1')
    H = torch.where(tensor_max == g, H, H_g)
    H_test = H.numpy().transpose(1, 2, 0)
    cv2.imshow('hsv1', H_test)
    cv2.waitKey(0)
    cv2.destroyWindows()
    S = torch.where(tensor_max != 0, S, delta / tensor_max)
    V = tensor_max
    img_hsv = torch.cat([H, S, V], dim=dim)
    return img_hsv
Example #13
0
 def ferme(o, titre = 'oeuil'):
     cv2.destroyWindows(titre)
Example #14
0
#!/usr/bin/env python3

import cv2
import numpy as np

#img=np.zeros([512,512,3])
#print (img)

img = cv2.imread('/home/rishabh/Downloads/superthumb.jpg', 1)

newimg = img[200:300, 230:430]

#display
#cv2.imwrite('/home/rishabh/Downloads/superthumb.jpg',newimg)

cv2.imshow('imageviewer', newimg)
cv2.waitKey(0)

#to destroy one named window
#cv2.destroyWindows('myownimage')
cv2.destroyWindows('imageviewer')
Example #15
0
def main():

    model = cv2.CascadeClassifier(CASCADESFILE)
    modelesmile = cv2.CascadeClassifier(SMILEFILE)

    webcam = cv2.VideoCapture(0)

    # infinite image processing loop
    while True:

        if not webcam.isOpened():
            logging.warning('Unable to connect to camera.')
            time.sleep(5)
            continue

        # get image from camera
        ret, frame = webcam.read()

        # convert image to grayscale
        grayframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # add blur
        grayframe = cv2.GaussianBlur(grayframe, (21, 21), 0)

        # detect faces
        faces = model.detectMultiScale(grayframe,
                                       scaleFactor=1.1,
                                       minNeighbors=5,
                                       minSize=(30, 30))
        smile = modelesmile.detectMultiScale(grayframe,
                                             scaleFactor=1.1,
                                             minNeighbors=5,
                                             minSize=(10, 10))

        # adding non_max_suppression to avoid overlapping
        faces = non_max_suppression(faces, probs=None, overlapThresh=0.65)
        smile = non_max_suppression(smile, probs=None, overlapThresh=0.65)

        logging.info(f'Detected faces: {len(faces)}')

        # add boxes
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

        for (x, y, w, h) in smile:
            if bool(list(faces)):
                if all((x1 <= x <= x + w <= x1 + w1) and (
                        y1 <= y <= y + h <= y1 + h1)
                       for (x1, y1, h1, w1) in faces):
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0),
                                  2)
        # show image
        cv2.imshow('Video', frame)

        # stop if user presses 'q'
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # close everything
    webcam.release()
    cv2.destroyWindows()
Example #16
0
import cv2 as cv

img = cv.imread('lenna.png', cv.IMREAD_COLOR)
if img is None:
    print("이미지 파일을 읽을 수 없습니다.")
    exit(1)

roi = img[10:100, 10:100]
cv.imshow("roi", roi)

img[10:100, 10:100] = 0
cv.imshow('img', img)

cv.waitKey(0)
cv.destroyWindows()
Example #17
0
import cv2
import numpy as np

face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_dafeult.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')

cap=cv2.VideoCapture()

while True:
        ret, img = cap.read(0)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray,1.3,5)
        for (x,y,w,h) in faces:
                cv2.rectangle(img, (x,y), (x+w, y+h), (255,0,0),2)
                roi_gray = gray[y:y+h, x:x+w]
                roi_color = img[y:y+h, x:x+w]
                eyes = eye_cascade.detectMultiScale(roi_gray)
                for (ex, ey, ew, eh) in eyes:
                    cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0,255,0), 2)

        cv2.imshow('img', img)
        k = cv2.waitkey(30) & 0*ff
        if k == 27:
            break

cap.release()
cv2.destroyWindows()
    
    
Example #18
0
import cv2
from PIL import Image
casc_path = 'C:\\Users\\user\\Anaconda3\\Lib\\site-packages\\cv2\\data\\haarcascade_eye.xml'
faceCascade = cv2.CascadeClassifier(casc_path)
imagename = 'Nikola_Tesla.jpg'
image = cv2.imread(imagename)
faces = faceCascade.detectMultiScale(image, scaleFactor=1.1, minNeighbors = 5, minSize = (30,30), \
                                     flags = cv2.CASCADE_SCALE_IMAGE)
count = 1
for (x, y, w, h) in faces:
    cv2.rectangle(image, (x, y), (x + w, y + h), (128, 255, 0), 2)
    filename = 'face' + str(count) + '.jpg'
    image1 = Image.open(imagename)
    image2 = image1.crop((x, y, x + w, y + h))
    image3 = image2.resize((200, 200), Image.ANTIALIAS)
    image3.save(filename)
    count += 1
cv2.namedWindow('facedetect')
cv2.imshow('facedetect', image)
cv2.waitKey(0)
cv2.destroyWindows('facedetect')
Example #19
0
    return cv2.bitwise_and(fark1, fark2)


esik_deger = 140000
kamera = cv2.VideoCapture(0)

pencereIsmi = "Hareket Algilayici"
cv2.namedWindow(pencereIsmi)

t_eksi = cv2.cvtColor(kamera.read()[1], cv2.COLOR_BGR2GRAY)
t = cv2.cvtColor(kamera.read()[1], cv2.COLOR_BGR2GRAY)
t_arti = cv2.cvtColor(kamera.read()[1], cv2.COLOR_BGR2GRAY)

zamanKontrol = datetime.now().strftime('%Ss')

while True:
    cv2.imshow(pencereIsmi, kamera.read()[1])
    if cv2.countNonZero(farkImaj(
            t_eksi, t, t_arti)) > esik_deger and zamanKontrol != datetime.now(
            ).strftime('%Ss'):
        fark_resim = kamera.read()[1]
        cv2.imwrite(datetime.now().strftime('%Y%m%d_%Hh%Mm%Ss%f') +
                    '.jpg'.fark_resim)
    zamanKontrol = datetime.now().strftime('%Ss')
    t_eksi = t
    t = t_arti
    t_arti = cv2.cvtColor(kamera.read()[1], cv2.COLOR_BGR2GRAY)
    key = cv2.waitKey(10)
    if key == 27:
        cv2.destroyWindows(pencereIsmi)
        break