def main(): last_time = time.time() a = 1 while (True): a = 2 screenshot = ImageGrab.grab(bbox=(0, 40, 800, 640)) new_screen, m1, m2 = process(screenshot) #cv2.imshow("Edges",new_screen) #cv2.imshow("window",cv2.cvtColor(np.array(screenshot),cv2.COLOR_BGR2RGB)) #print("Frame took seconds = " , end = '') #print(time.time()-last_time) #print(m1,m2) #print(m1,m2) if abs(m1) - abs(m2) == 0: #none() print('reversing') print(m1 + m2) if abs(m2) > abs(m1): #right() print('right') print(m1 + m2) elif abs(m2) < abs(m1): #left() print("left") print(m1 + m2) last_time = time.time() if cv2.waitKey(25) & 0xFF == ord('q'): cv2.destroyALLWindows() break
def showVideo(): try: print('카메라를 구동합니다') cap = cv2.VideoCapture(0) except: print('카메라 구동 실패') return cap.set(3, 480) cap.set(4, 320) while True: ret, frame = cap.read() if not ret: print('비디오 읽기 오류') break gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) cv2.imshow('video', gray) k = cv2.waitKey(1) & OXFF if k == 27: break cap.release() cv2.destroyALLWindows()
def showimage(): imgfile = 'images/7-1.jpg' img = cv2.imread(imgfile, cv2.IMREAD_UNCHANGED) #이미지 읽기 플래그는 총 3가지가 있다. cv2.namedWindow('7-1', cv2.WINDOW_AUTOSIZE) #이미지 크기를 변경 cv2.imshow('7-1', img) cv2.waitKey(0) cv2.destroyALLWindows()
def main(template, ImageA): aImage = cv2.imread(ImageA) temp = cv2.imread(template, 0) aCopy = aImage.copy() aImage = cv2.cvtColor(aImage, cv2.COLOR_BGR2GRAY) th, tw = temp.shape[:2] match = cv2.matchTemplate(aImage, temp, cv2.TM_SQDIFF) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match) topcorner = min_loc bottomcorner = (topcorner[0] + tw, topcorner[1] + th) cv2.rectangle(aCopy, topcorner, bottomcorner, (0, 0, 255), 2) cv2.imshow('template', aCopy) if cv2.waitKey(0) & 0xFF == ord('q'): cv2.imwrite('template.jpg', aCopy) cv2.destroyALLWindows()
def main(): gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=1) sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options)) print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) print("GPU device: ", tf.test.gpu_device_name()) font = cv2.FONT_HERSHEY_SIMPLEX cap = cv2.VideoCapture(0) fps_start_time = datetime.datetime.now() fps = 0 total_frames = 0 while True: ret, frame = cap.read() total_frames = total_frames+1 fps_end_time = datetime.datetime.now() time_diff = fps_end_time - fps_start_time if time_diff.seconds == 0: fps = 0.0 else: fps = total_frames/time_diff.seconds fps_text = "FPS: {:.2f}".format(fps) cv2.putText(frame, fps_text, (5,30), font, 1, (0, 0, 255), 1, cv2.LINE_AA) cv2.imshow("Application", frame) key = cv2.waitKey(1) if key == ord('q'): break cv2.destroyALLWindows()
import cv2 import numpy as np cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) cap.open(0) while cap.isOpened(): flag, frame = cap.read() if not flag: break key_pressed = cv2.waitKey(60) print('blackboard is ', key_pressed) frame = cv2.Canny(frame, 20, 40) frame = np.dstack((frame, frame, frame)) cv2.imshow('mygook', frame) if key_pressed == 27: break cap.release() cv2.destroyALLWindows()
#Reading Videos import cv2 as cv #importing openCV library capture = cv.VideoCapture('Videos/dog.mp4') #defining video for the program while True: isTrue, frame = capture.read() #divides the video frame by frame cv.imshow('Video', frame) #reading frames of video if cv.waitKey(20) & 0xFF == ord( 'd'): #if letter d is pressed break the loop break capture.release() #release capture device cv.destroyALLWindows() #close the window
def main(): # note make sure the img_shape = (225, 225) # if you are chaning this make sure to update the code in the model model = load_model("rock-paper-scissors-model.h5") #_________________________________ # once you have written the code to play the game uncommend the below lines # and place them where required so that the model can predict # pred = model.predict(np.array([img])) # move_made_by_you = mapper(np.argmax(pred[0])) # ______________________ cap = cv2.VideoCapture(0) prev_move = None while True: ret, frame = cap.read() if not ret: continue cv2.rectangle(frame, (100, 100), (500, 500), (255, 255, 255), 2) #user cv2.rectangle(frame, (800, 100), (1200, 500), (255, 255, 255), 2) #computer roi = frame[100:500, 100:500] img = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB) img = cv2.GaussianBlur(img, (5, 5), 0) img = cv2.Canny(img, 50, 60) img = cv2.resize(img, (255, 255)) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (255, 255)) pred = model.predict(np.array([img])) user_move = mapper(np.argmax(pred[0])) # ..and the winner is: if prev_move != user_move: if user_move != 'empty': computer_move = choice(['rock', 'paper', 'scissor']) winner = find_winner(user_move, computer_move) else: computer_move = 'empty' winner = 'waiting...' prev_move = user_move font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(frame, "Your Move: " + user_move, (50, 50), font, 1.2, (255, 255, 255), 2, cv2.LINE_AA) cv2.putText(frame, "Computer's Move: " + computer_move, (750, 50), font, 1.2, (255, 255, 255), 2, cv2.LINE_AA) cv2.putText(frame, "Winner: " + winner, (400, 600), font, 2, (0, 0, 255), 4, cv2.LINE_AA) if computer_move != 'empty': computer_img = cv2.imread( 'E:/Projects/RPS/computer_images/{}.jpg'.format(computer_move)) computer_img = cv2.resize(computer_img, (400, 400)) frame[100:500, 800:1200] = computer_img cv2.imshow("rock paper scissor", frame) k = cv2.waitKey(10) if k == ord('q'): break cap.release() cv2.destroyALLWindows()
GPIO.output(23, 0) i=GPIO.input(18) if i==True: if obj==True: while(True): f,frame=cap.read() if f == False: break cv2.namedWindow("test", cv2.WND_PROP_FULLSCREEN) cv2.setWindowProperty("test", cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN) cv2.imshow("test", frame) ch=cv2.waitKey(10) if ch==27: break cap.release() cv2.destroyALLWindows() GPIO.output(23, 1) time.sleep(1) print "GPIO.cleanup()" GPIO.cleanup()
def Main(): consumer_key = 'xxxx' consumer_secret = 'xxxx' access_token_key = 'xxxx' access_token_secret = 'xxxx' auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token_key, access_token_secret) HOST = socket.gethostbyname('0.0.0.0') PORT = int(5000) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #create socket (family,type) print('Socket created') s.bind(('', 5000)) #bind socket to adress print('Socket bind complete') s.listen(1) #listen to the connections made to the socket print('Socket now listening') conn, addr = s.accept( ) #accept a conection-conn is a new socket object , address bound to the socket on the other end of the connection mess = "Thank you for connecting" conn.send(mess.encode()) data = b'' #bites literal c = 0 payload_size = struct.calcsize("L") #unsigned long size while True: while len(data) < payload_size: data += conn.recv( 4096) #recv data from socket - 4096 max amount of data c = c + 1 packed_msg_size = data[:payload_size] data = data[payload_size:] msg_size = struct.unpack( "L", packed_msg_size)[0] #unpack according to the str while len(data) < msg_size: data += conn.recv(4096) frame_data = data[:msg_size] data = data[msg_size:] img = pickle.loads( frame_data, encoding='latin1') #read a pickled object hierarchy from a string face_cascade = cv2.CascadeClassifier( 'C:/Users/Alex/Desktop/face.txt') #load face cascade eye_cascade = cv2.CascadeClassifier( 'C:/Users/Alex/Desktop/eye.txt') #load eye cascade gray = cv2.cvtColor( img, cv2.COLOR_BGR2GRAY) #convert data from camera to gray faces = face_cascade.detectMultiScale( gray ) #Detects faces of different sizes in the input image returned as a list of rectangles if len(faces) > 0: api = tweepy.API(auth) t = time.strftime("%d-%m-%Y %H:%M:%S", time.gmtime()) facess = str(faces) msg = "Alert !! Face detected at " + t + " : " + facess # api.update_status(msg) cv2.imwrite('opencv.png', img) api.update_with_media('opencv.png', msg) os.system("shutdown.exe /l") for (x, y, w, h) in faces: if c < 5: cv2.rectangle( img, (x, y), (x + w, y + h), (255, 0, 0), 2 ) #draw rectangle (x,y) start point, end point, blue, alignment roi_gray = gray[y:y + h, x:x + w] #start point, end point roi_color = img[y:y + h, x:x + w] #for reimpose eyes = eye_cascade.detectMultiScale( roi_gray ) #Detects eyes of different sizes in the input image returned as a list of rectangles print(c) for (ex, ey, ew, eh) in eyes: cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2) #draw green rectangle cv2.imshow('img', img) k = cv2.waitKey( 30 ) & 0xff #waits for specified milliseconds for any keyboard event (esc) if k == 27: break cv2.destroyALLWindows() #destroys all of the opened HighGUI windows