Esempio n. 1
0
def images_to_video(vout, image_dir, clear_images=True):
    image_list = glob.glob(f'{timelapse_img_path}/{i}.jpg')
    sorted_images = sorted(image_list, key=os.path.getmtime)
    if file in sorted_images:
        image_frame = cv2.read(file)
        vout.write(image_frame)
    if clear_images:
        for file in image_list:
            os.remove(file)
Esempio n. 2
0
def load_imgs():
    global imgs
    global wheels

    for p in purposes:
        for epoch_id in epochs[p]:

            print(
                'processing and loading "{}" epoch {} into memory, current num of imgs is {}...'
                .format(p, epoch_id, len(imgs[p])))

            # vid_path = cm.jn(data_dir, 'epoch{:0>2}_front.mkv'.format(epoch_id))
            data_dir = os.path.abspath('training_images{}'.format(epoch_id))

            assert os.path.isdir(data_dir)

            print("upload images from:", data_dir)

            # csv_path = cm.jn(data_dir, 'epoch{:0>2}_steering.csv'.format(epoch_id))
            csv_path = os.path.abspath('out-key{}.csv'.format(epoch_id))
            assert os.path.isfile(csv_path)

            print("upload ground truth from:", csv_path)
            rows = cm.fetch_csv_data(csv_path)
            print(len(rows), frame_count)
            assert frame_count == len(rows)
            yy = [[float(row['wheel'])] for row in rows]

            for image in data_dir:
                img = cv2.read(image)
                img = preprocess.preprocess(img)
                imgs[p].append(img)

            wheels[p].extend(yy)
            assert len(imgs[p]) == len(wheels[p])

            cap.release()
Esempio n. 3
0
import cv2
import numpy as np
import math

img = cv2.read("image.jpg")  # take image as input
gray = cv2.cvtColor(img,COLOR_BGR2GRAY) # Convert image to grayscale


edges = cv2.Canny(gray,50,150, apertureSize = 3) #Apply Canny edge detector method on the grayscale image
lines = cv2.HoughLines(edges,1,np.pi/180,200)


print("Dip of the lines in the image")
sum=0
for i in range(0,len(lines)):
    for rho,thetha in lines[i]:
        sum+= np.pi/2- thetha
        print(np.pi/2-thetha)

print("Average value of dip is:" sum/len(lines))

num = len(lines)
x,y,dim = img.shape
dip = sum/num # average dip value in radian
m = num/(num+1)
'''
****Persistence length****
Persistence of discontinuities is one of the most
important rock mass parameters because it
defines, together with spacing, the size of blocks
that can slide from the face
Esempio n. 4
0
def main():
    """main function"""

    def process_frame_pair(frames, color) -> bool:
        left = frames[0]
        right = frames[1]

        # make sure images are the same size
        assert left.shape == right.shape

        # get depth map with rectified images
        depth_map = get_depth_map(left, right, stereo)

        # plot image with depth
        if POINTCLOUD:
            plot_image(pc, np.rot90(colorized, -1), np.rot90(depth_map, -1) * (1.0 / POINT_CLOUD_SCALE), POINT_CLOUD_SCALE, step=1)

        grad_x, grad_y = np.gradient(cv2.GaussianBlur(depth_map, (25, 25), 25.0) * 33.3, 1)
        direction, points = get_direction(depth_map, grad_x, grad_y, 250, 1.0)

        debug = np.zeros(depth_map.shape)
        for point in points:
            cv2.circle(debug, (int(point[0]), int(point[1])), 1, (255, 255, 255), 1)

        if cv2.waitKey(1) & 0xFF != ord('q') and (not POINTCLOUD or pc.render()):
            cv2.imshow('left', rectified_left)
            cv2.imshow('depth', depth_map)
            cv2.imshow('debug', debug)

        return False

    # object to compute disparity map
    stereo: cv2.StereoBM = cv2.StereoBM_create(constants.NUM_DISPARITIES, constants.BLOCK_SIZE)

    # custom point cloud renderer
    if POINTCLOUD:
        pc: point_cloud.PointCloud = point_cloud.PointCloud("3D Representation", 2.0)

    # load saved calibration
    calibration = np.load("test_calibration.npz")

    img_size = calibration["img_size"]
    left_map_x = calibration["left_map_x"]
    left_map_y = calibration["left_map_y"]

    right_map_x = calibration["right_map_x"]
    right_map_y = calibration["right_map_y"]
    right_map_y = calibration["right_map_y"]

    # noinspection PyUnboundLocalVariable
    frames = 0
    durations = []
    frames_per_second = []
    while True:  # not cameras or (cameras and capture_l.isOpened() and cameras and capture_r.isOpened()):
        pre = time.time()

        # save colorized image to render point cloud
        colorized = read('tests/test_left.png') / 255.0
        img_left, img_right = read('tests/test_left.png', cv2.CV_8UC1), read('tests/test_right.png', cv2.CV_8UC1)

        # remap images to align normals of image planes
        rectified_left = cv2.remap(img_left, left_map_x, left_map_y, REMAP_INTERPOLATION)
        rectified_right = cv2.remap(img_right, right_map_x, right_map_y, REMAP_INTERPOLATION)

        if DEBUG:
            rectified_left = img_left
            rectified_right = img_right

        if process_frame_pair((rectified_left, rectified_right), colorized):
            break

        durations.append(dur := round(time.time() - pre, 3))
        frames_per_second.append(fps := round(1.0 / dur, 3))
        print(
            "\rProcessed Frame: {frames} (took {dur:<5} ms, avg: {avg:<7} | fps: {fps:<7}, avg fps: {avg_fps:<7})".format(
                frames=(frames := frames + 1),
                dur=dur,
                avg=round(float(np.mean(durations)), 3),
                fps=fps,
                avg_fps=round(float(np.mean([1.0 / x for x in durations])), 3)),
            end='')

    cv2.destroyAllWindows()
Esempio n. 5
0
import cv2
import time
cam=cv2.VideoCapture(0)
start=time.time()
end=time.time()
while True:
    x,red=cv2.read()
    if end-start==5:
        flipped=cam.flip(red,-1)
        cv2.imshow('win',flipped)
    else:
        cv2.imshow('win',red)
    if cv2.waitKey(1)&0xFF==ord('q'):
        break
Esempio n. 6
0
import cv2 as cv
import numpy as np

cap=cv.VideoCapture()
ret,first_frame=cv.read()
prev_gray=cv.cvtColor(first_frame,cv.COLOR_BGR2GRAY)

mask=np.zeros_like(frame)
mask[..., 1]=255
while (cap.isOpened()):
    ret, frame = cap.read()
    cv.imshow('input',frame)
    gray = cv.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    flow= cv.calcOpticalFlowFarneback(prev_gray,
                                      gray,
                                      None,
                                      0.5,3,15,3,5,1.2,0)
    magn, angle = cv.cartToPolar((flow[...,0],
                                  flow[...,1]))
    mask[...,2] = cv.normalize(magn,
                               None,
                               0,
                               255,
                               cv.NORM_MINMAX)
    rgb = cv.cvtColor(mask, cv.COLOR_HSV2RGB)
    cv.imshow("Dense Optical Flow",rgb)
    prev_gray = gray
    if cv.waitKey(30) & 0xFF == ord("q"):
        break
cap.release()
cv.destroyAllWindows()
import cv2

cap = cv2.VideoCapture(0)

# Create the haar cascade
faceCascade = cv2.CascadeClassifier("../model/haarcascade_frontalface_alt.xml")
img_path = ''

while (True):
    # Capture frame-by-frame
    ret, frame = cap.read()
    cv2.read()

    # Our operations on the frame come here
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # Detect faces in the image
    faces = faceCascade.detectMultiScale(gray,
                                         scaleFactor=1.1,
                                         minNeighbors=5,
                                         minSize=(30, 30)
                                         #flags = cv2.CV_HAAR_SCALE_IMAGE
                                         )

    print("Found {0} faces!".format(len(faces)))

    # Draw a rectangle around the faces
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

    # Display the resulting frame
Esempio n. 8
0
# pre requisits : 1_basics.py

import cv2

img=cv2.read("imageProcessing.jpg")
height,width=img.shape[0:2]

# ------ resize Image ------

# resize(src,dsize,fx,fy)
# src = source of the image
# dsize = desired size of the output image
# fx = scalling factor along x-axis
# fy = scalling factor along y-axis

# method-1
resizeImg=cv2.resize(img,(0,0),fx=1.75,fy=1.75)

# method-2
resizeImg1=cv2.resize(img,(550,350))

cv2.imshow('resized image',resizeImg)
cv2.imshow('resized image 1',resizeImg1)

cv2.waitKey(0)
Esempio n. 9
0
# dimensions to crop the image frame
y1 = 0
x1 = 0
y2 = 500
x2 = 500

# read video feed from any USB camera
feed = cv2.VideoCapture(0)  # 0 denotes in-built Webcam

if not feed.isOpened():
    raise IOError("Webcam cannot be opened")

while True:

    # Extract frames
    ret, frame = cv2.read()

    # show run-time video feed on console
    cv2.imshow("input", frame)

    # crop image to user-defined size
    cropped_image = frame[y1:y2, x1:x2]

    # write frame to the local system
    cv2.imwrite(output + "/%#05d.jpg" % (count + 1), cropped_image)

    # get path of the saved frame
    path = output + "/%#05d.jpg" % (count + 1)

    count = count + 1
Esempio n. 10
0
def main(args):
    print('################################################################################')
    my_style_data=[]
    with tf.Graph().as_default():
        with tf.Session() as sess:

            # Setup models
            mtcnn = detect_and_align.create_mtcnn(sess, None) #It calls create_mtcnn function from the detect_and_align file 

            load_model(args.model) #IT loads the facenet 20170512-110547.pb pre-trained model
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")

            # Load anchor IDs
            id_data = IdData(args.id_folder[0], mtcnn, sess, embeddings, images_placeholder, phase_train_placeholder, args.threshold)
            #url ='rtsp://192.168.137.135:4747/video'

            #cap = cv2.VideoCapture(0)
            connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            connection.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
            connection.settimeout(TIMEOUT_SOCKET)
            connection.connect((IP_SERVER, PORT_SERVER))

            while True:
                try:
                    fileDescriptor = connection.makefile(mode='rb')
                    result = fileDescriptor.readline()
                    fileDescriptor.close()
                    result = base64.b64decode(result)
                    frame = np.fromstring(result, dtype=np.uint8)
                    frame_matrix = np.array(frame)
                    frame_matrix = np.reshape(frame_matrix, (IMAGE_HEIGHT, IMAGE_WIDTH,COLOR_PIXEL))
                    xyz=cv2.imshow('Window title', frame_matrix)
                    cv2.read(xyz)
                    print("cccc")
            

                    frame_height = frame_matrix.get(cv2.CAP_PROP_FRAME_HEIGHT)


                    show_landmarks = True
                    show_bb = True
                    show_id = True
                    show_fps = False
                    show_bb1 = True
                    while(True):
                        start = time.time()
                        v_offset = 50 
                        time.sleep(0.0001)
                        _, frame = frame_matrix.read()
                        frame1=frame

                        # Locate faces and landmarks in frame
                        face_patches, padded_bounding_boxes, landmarks = detect_and_align.detect_faces(frame, mtcnn)

                        if len(face_patches) > 0:
                            face_patches = np.stack(face_patches)
                            feed_dict = {images_placeholder: face_patches, phase_train_placeholder: False}
                            embs = sess.run(embeddings, feed_dict=feed_dict)

                            print('Matches in frame:')
                            matching_ids, matching_distances = id_data.find_matching_ids(embs)

                            for bb, landmark, matching_id, dist in zip(padded_bounding_boxes, landmarks, matching_ids, matching_distances):
                                if matching_id is None:
                                    matching_id = 'Unknown'
                                    print('Unknown! Couldn\'t fint match.')
                                else:
                                    #if(int(bb[0])<=170 and int(bb[3])<=357 and int(bb[2])<=437 and int(bb[1])<=164):
                                    print('Hi akki %s! Distance: %1.4f' % (matching_id, dist))
                                    now=datetime.now()
                                    
                                    #csvData = [matching_id, dist,now.strftime("%x %I:%M:%S %p")]
                                              
                                    '''with open('C:/myproject/Phase2-Copy_FaceRecognition-master/Student4.csv', 'a') as csvFile:
                                        writer = csv.writer(csvFile)
                                        writer.writerow(csvData)

                                    csvFile.close()'''

                                    


                                if show_id:                            
                                    font = cv2.FONT_HERSHEY_SIMPLEX
                                    print("bb_Frame",bb)
                                    if(210<int(bb[0])<350 and 150<int(bb[1])<250 and 300<int(bb[2])<460 and 310<int(bb[3])<450):

                                        welcome=" Welcome to Infogen labs"
                                        cv2.putText(frame,matching_id+ welcome, (0, 50), font, 1, (0,0,255), 2, cv2.LINE_AA)
                                        cv2.putText(frame,matching_id+now.strftime(" %I:%M%p"), (bb[0], bb[3]), font, 1, (0,0,255), 2, cv2.LINE_AA)
                                        #csvData = [matching_id, dist,now.strftime("%x  %I:%M:%S %p")]
                                       

                                        def previous_and_next(some_iterable):
                                            prevs, items, nexts = tee(some_iterable, 3)
                                            prevs = chain([None], prevs)
                                            nexts = chain(islice(nexts, 1, None), [None])
                                            return zip(prevs, items, nexts)
                                        csvData = [matching_id, dist,now.strftime("%x %I:%M:%S %p")]
                                        with open('C:/myproject/1Copy_FaceRecognition-master - Copy/Student5.csv', 'a') as csvFile1:
                                            writer = csv.writer(csvFile1)
                                            writer.writerow(csvData)
                                        csvFile1.close()
                                        my_style_data.append(csvData)
                                        print('******************************************************************************************')
                                        print(my_style_data)
                                        print('******************************************************************************************')
                                        if len(my_style_data) >=7:
                                            for prevs, item, nxt in previous_and_next(my_style_data):
                                                try:
                                                    if prevs[0] != item[0] or item[0] != nxt[0]:
                                                        
                                                        with open('C:/myproject/1Copy_FaceRecognition-master - Copy/Employee6.csv', 'a') as csvFile:
                                                            writer = csv.writer(csvFile)
                                                            
                                                            writer.writerow(item)
                                                        csvFile.close()
                                                except: pass
                                            del(my_style_data[0:7])




                                        '''if(matching_id=="Akshay"):
                                            with open('C:/myproject/Phase2-Copy_FaceRecognition-master/Akshay.csv', 'a') as csvFile:
                                                writer = csv.writer(csvFile)
                                                writer.writerow(csvData)
                                            csvFile.close()
                                        if(matching_id=="Ajinkya"):
                                            with open('C:/myproject/Phase2-Copy_FaceRecognition-master/Ajinkya.csv', 'a') as csvFile:
                                                writer = csv.writer(csvFile)
                                                writer.writerow(csvData)
                                            csvFile.close()'''



                                if show_bb:
                                    cv2.rectangle(frame, (bb[0], bb[1]), (bb[2], bb[3]), (255, 0, 0), 1)
                                if show_bb1:
                                    cv2.rectangle(frame1, (261,174),(457,380), (255,0,255),2)
                                if show_landmarks:
                                    for j in range(5):
                                        size = 1
                                        x=(int(landmark[j]))
                                        top_left = (int(landmark[j]) - size, int(landmark[j + 5]) - size)
                                        bottom_right = (int(landmark[j]) + size, int(landmark[j + 5]) + size)
                                        cv2.rectangle(frame, top_left, bottom_right, (255, 0, 255), 2)
                        else:
                            print('Couldn\'t find a face')

                        

                        

                        cv2.imshow('frame', frame)

                        key = cv2.waitKey(100)
                        if key == ord('q'):
                            break
                        elif key == ord('l'):
                            show_landmarks = not show_landmarks
                        elif key == ord('b'):
                            show_bb = not show_bb
                        elif key == ord('i'):
                            show_id = not show_id
                        elif key == ord('f'):
                            show_fps = not show_fps
                    frame_matrix.release()
                    cv2.destroyAllWindows()
class Face_recog:
    """ this is for screen"""
    def __init__(self,root):
        self.root=root
        self.root.configure(bg='black')
        self.root.geometry("1500x770+0+0")
        self.root.title("Attendence management system")

    #title label
        title_lbl=Label(self.root, text="Face Detector",
        font=("times new roman",40,"bold","italic"),
        bg="white",
        fg="purple"
        )

        title_lbl.place(x=0,y=0,width=1500,height=45)

        img=Image.open(r"C:\Users\ANCHAL\Desktop\dice_scrolling\face_detection_img\img1.jpg")
        img=img.resize((500,150),Image.ANTIALIAS) # Antialas converts high level image into low lwvwl img
        self.photoimg=ImageTk.PhotoImage(img)



        f_lbl=Label(self.root, image=self.photoimg)
        f_lbl.place(x=0,y=45,width=500, height=150)


       


        img1=Image.open(r"C:\Users\ANCHAL\Desktop\dice_scrolling\face_detection_img\images2.jpg")
        img1=img1.resize((500,150),Image.ANTIALIAS) # Antialas converts high level image into low lwvwl img
        self.photoimg1=ImageTk.PhotoImage(img1)



        f_lbl=Label(self.root, image=self.photoimg1)
        f_lbl.place(x=500,y=45,width=500, height=150)

        img2=Image.open(r"C:\Users\ANCHAL\Desktop\dice_scrolling\face_detection_img\img1.jpg")
        img2=img2.resize((500,150),Image.ANTIALIAS) # Antialas converts high level image into low lwvwl img
        self.photoimg2=ImageTk.PhotoImage(img2)



        f_lbl=Label(self.root, image=self.photoimg2)
        f_lbl.place(x=1000,y=45,width=500, height=150)


        img6=Image.open(r"C:\Users\ANCHAL\Desktop\face_recognition\face_detection_img\circle.jpg")
        img6=img6.resize((500,700),Image.ANTIALIAS) # Antialas converts high level image into low lwvwl img
        self.photoimg6=ImageTk.PhotoImage(img6)

        f_lbl=Label(self.root, image=self.photoimg6)
        f_lbl.place(x=500,y=300,width=500, height=300)

        detect_btn=Button(self.root,text="Detect Face",command=self.face_detect,font=("times new roman",25,"bold"),bg="black",fg="white",width=10)
        detect_btn.place(x=640,y=430)



    # face_recognition

    def face_detect(self):
    
        faceDetect=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
        cam=cv2.VideoCapture(0)
        rec=cv2.createLBPHFaceRecognizer()
        rec.load("classifier.xml")
        font=cv2.cv.InitFont(cv2.cv.CV_FONT_HERSHEY_COMPLEX,0.4,1,0,1)

    def getProfile(id):


        conn=mysql.connector.connect(host="localhost",username="******",password="******",database="face_recognition")
        my_cursur=conn.cursor()
        
    while(True):
        ret,img=cv2.read()
        gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        faces=faceDetect.detectMultiScale(gray,1.3,5)
        for(x,y,w,h) in faces:

            cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
            id,conf=rec.predict(gray[y:y+h,x:x+w])
            profile=getProfile(id)
            if(profile!=None):

                cv2.cv.PutText(cv2.cv.fromarray(img),"Name : "+str(profile[1]),(x,y+h+20),font,(0,255,0))
                cv2.cv.PutText(cv2.cv.fromarray(img),"Age : "+str(profile[2]),(x,y+h+45),font,(0,255,0))
                cv2.cv.PutText(cv2.cv.fromarray(img),"Gender : "+str(profile[3]),(x,y+h+70),font,(0,255,0))
                cv2.cv.PutText(cv2.cv.fromarray(img),"Criminal Records : "+str(profile[4]),(x,y+h+95),font,(0,0,255))
                cv2.imshow("Face",img)
            if(cv2.waitKey(1)==ord('q')):
               break
    cam.release()
    cv2.destroyAllWindows()    
Esempio n. 12
0
def convert(image):
    i = cv2.read(image_path)
    gray_image = cv2.cvtColor(i, cv2.COLOR_BGR2GRAY)
    threshold_img = cv2.threshold(gray_image, 0, 255,
                                  cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
    kernel = np.ones((1, 1), np.uint8)
    threshold_img = cv2.dilate(threshold_img, kernel, iterations=1)
    threshold_img = cv2.erode(threshold_img, kernel, iterations=1)

    #configuring parameters for tesseract
    from pytesseract import Output
    custom_config = r'--oem 3 --psm 6'

    # now feeding image to tesseract

    details = pytesseract.image_to_data(threshold_img,
                                        output_type=Output.DICT,
                                        config=custom_config)

    print(details.keys())
    from pytesseract import Output
    custom_config = r'--oem 3 --psm 6'
    total_boxes = len(details['text'])

    for sequence_number in range(total_boxes):

        if int(details['conf'][sequence_number]) > 5:
            (x, y, w, h) = (details['left'][sequence_number],
                            details['top'][sequence_number],
                            details['width'][sequence_number],
                            details['height'][sequence_number])
            threshold_img = cv2.rectangle(threshold_img, (x, y),
                                          (x + w, y + h), (0, 255, 0), 2)

    # display image

    # now feeding image to tesseract

    details = pytesseract.image_to_data(threshold_img,
                                        output_type=Output.DICT,
                                        config=custom_config)
    parse_text = []

    word_list = []

    last_word = ''

    for word in details['text']:

        if word != '':

            word_list.append(word)

            last_word = word

        if (last_word != '' and word == '') or (word == details['text'][-1]):

            parse_text.append(word_list)

            word_list = []
    import csv
    # saving the extracted text output to a txt file

    with open('result.txt', 'w', newline="") as file:

        csv.writer(file, delimiter=" ").writerows(parse_text)

    import pandas as pd
    # reading the txt file into a dataframe to convert to csv file
    df = pd.read_csv("result.txt", delimiter='\t')
    df.to_csv('result.csv')
Esempio n. 13
0
def create_pred_movie(conf, predList, moviename, outmovie, outtype, maxframes=-1):
    predLocs, predscores, predmaxscores = predList
    #     assert false, 'stop here'
    tdir = tempfile.mkdtemp()

    cap = cv2.VideoCapture(moviename)
    nframes = int(cap.get(cvc.FRAME_COUNT))
    if maxframes > 0:
        nframes = maxframes

    cmap = cm.get_cmap('jet')
    rgba = cmap(np.linspace(0, 1, conf.n_classes))

    fig = mpl.figure.Figure(figsize=(9, 4))
    canvas = FigureCanvasAgg(fig)

    if conf.adjustContrast:
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=conf.clahegridsize)
    else:
        clahe = None

    for curl in range(nframes):
        framein = cv2.read()
        framein = crop_images(framein, conf)
        if framein.shape[2] > 1:
            framein = framein[..., 0]

        if conf.adjustContrast:
            framein = clahe.apply(framein)

        fig.clf()
        ax1 = fig.add_subplot(1, 2, 1)
        ax1.imshow(framein, cmap=cm.gray)
        ax1.scatter(predLocs[curl, :, 0, 0], predLocs[curl, :, 0, 1],  # hold=True,
                    c=cm.hsv(np.linspace(0, 1 - old_div(1., conf.n_classes), conf.n_classes)),
                    s=np.clip(predmaxscores[curl, :, 0] * 100, 20, 40),
                    linewidths=0, edgecolors='face')
        ax1.axis('off')
        ax2 = fig.add_subplot(1, 2, 2)
        if outtype == 1:
            curpreds = predscores[curl, :, :, :, 0]
        elif outtype == 2:
            curpreds = predscores[curl, :, :, :, 0] * 2 - 1

        rgbim = create_pred_image(curpreds, conf.n_classes)
        ax2.imshow(rgbim)
        ax2.axis('off')

        fname = "test_{:06d}.png".format(curl)

        # to printout without X.
        # From: http://www.dalkescientific.com/writings/diary/archive/2005/04/23/matplotlib_without_gui.html
        # The size * the dpi gives the final image size
        #   a4"x4" image * 80 dpi ==> 320x320 pixel image
        canvas.print_figure(os.path.join(tdir, fname), dpi=160)

        # below is the easy way.
    #         plt.savefig(os.path.join(tdir,fname))

    tfilestr = os.path.join(tdir, 'test_*.png')
    mencoder_cmd = "mencoder mf://" + tfilestr + " -frames " + "{:d}".format(
        nframes) + " -mf type=png:fps=15 -o " + outmovie + " -ovc lavc -lavcopts vcodec=mpeg4:vbitrate=2000000"
    os.system(mencoder_cmd)
    cap.release()
Esempio n. 14
0
import numpy as np
import cv2
from matplotlib import pyplot as plt

img = cv2.read('mylogo.png', 0)
plt.imshow(img, cmap='gray', interpolation="bicubic")
plt.xticks([]), plt.yticks([])
plt.show()
Esempio n. 15
0
 def get_item(self, index: int):
     # -> Tuple(np.ndarray, np.ndarray)
     img = cv2.read(self.dataset[index])
     target = self.dataset[index].split('_')
     target = self._category_to_class_index(target)
     return np.array(img), np.array([target])
Esempio n. 16
0
import cv2
import numpy as np 

face_cascade = cv2.CascadeClassifier('C:\opencv\sources\data\haarcascades_cuda\haarcascade_frontalface_alt.xml')

face_mask = cv2.imread('C:\Users\hp pc\Google Drive\opencv-practice\python_codes\images\\find.jpg')
h_mask, w_mask = face_mask.shape[:2]

if face_cascade.empty():
    raise IOERROR('Unable to load the face cascade classifier xml file')

cap = cv2.VideoCapture(0)
scaling_factor = 0.5

while true:
    ret, frame = cv2.read()
    frame = cv2.resize(frame, None, fx = 0.5, fy = 0.5, Interpolation =  cv2.INTER_AREA)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    face_rects = face_cascade.detectMultiScale(gray, 1.3, 2)

    for (x, y, w, h) in face_rects:
        if h > 0 and w > 0:
            h, w = int(1.4*h), int(1*w) 
            ##

            # extract the region of interest(roi) from the image
            frame_roi  = frame[y:y+h, x:x+w]
            face_mask_small = cv2.resize(face_mask, (w,h), interpolation = cv2.INTER_AREA)

            # convert color image to grayscale and threshold it
Esempio n. 17
0
    os.makedirs(new_path)

# Save images to a folder
def save_data(img, steering):
    global img_list, steering_list
    now = datetime.now()
    timestamp = str(datetime.timestamp(now)).replace('.', '')
    filename = os.path.join(new_path, f'Image_{timestamp}.jpg')
    cv2.imwrite(filename, img)
    img_list.append(filename)
    steering_list.append(steering)

# Save the log file when the session ends
def save_log():
    global img_list, steering_list
    raw_data = {'Image': img_list, 'Steering': steering_list}
    df = pd.DataFrame(raw_data)
    new_path = os.path.join(current_dir, f'log_{str(folder_count)}.csv')
    df.to_csv(new_path, index=False, header=False)
    print('Log saved')
    print('Total Images: ', len(img_list))

if __name__ == '__main__':
    cap = cv2.VideoCapture(0)
    for x in range(10):
        _, img = cv2.read()
        save_data(img, 0.2)
        cv2.waitKey(1)
        cv2.imshow('Img', img)
    save_log()
import cv2
import os

for user in directory:
    user = []
    for img in user:
        cv2.read(img)
        img_name = os.img_name
        name, likes, comments, date, time = img_name[], img_name[], img_name[]
        
Esempio n. 19
0
cap.set(cv2.CAP_PROP_FRAME_WIDTH, INPUT_SIZE[0])
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, INPUT_SIZE[1])

last_capture_time = datetime.now().timestamp()

while True:

    if datetime.now().timestamp() < last_capture_time + 1000 / float(options.fps):
        time.sleep(0.01)
        continue

    try:

        has_error = False

        _, frame = cv2.read()

        # Preprocessing

        mean_shifted_image = cv2.pyrMeanShiftFiltering(frame, FILTER_SIZE, 32)
        img = cv2.cvtColor(mean_shifted_image, cv2.COLOR_BGR2GRAY)

        # Gaussian blur and thresholding

        blurred_image = cv2.GaussianBlur(img, (FILTER_SIZE, FILTER_SIZE), 6)
        thresh, binary_image = cv2.threshold(blurred_image, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)

        test_img = binary_image

        # Repeatedly detect and remove blobs