def vid_stream(): while cap.isOpened(): _, frame = cap.read() if frame is None: continue cv2.imshow("drone", frame) key = cv2.waitKey(1) & 0xFF if key == 27: break cap.release() cv2.destroyALlWindows() drone.streamoff()
def main(): cg = CameraGyroscope() video_data = cv2.VideoCapture(0) while True: _,frame2 = video_data.read() time1 = time.time() # time.sleep(0.1) _,frame1 = video_data.read() frame2 = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY) frame2 = np.divide(frame2,np.mean(frame2)) frame1 = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY) frame1 = np.divide(frame1,np.mean(frame1)) time2 = time.time() cg.extract_angle_update(frame1,frame2,time1,time2) cv2.imshow("frame1",frame1) key = cv2.waitKey(1) if key == ord('q'): break cv2.destroyALlWindows()
#Extract (Crop out the required face) : Region of Interest offset = 10 face_section = frame[y - offset:y + h + offset, x - offset:x + w + offset] face_section = cv2.resize(face_section, (100, 100)) skip += 1 if skip % 10 == 0: face_data.append(face_section) print(len(face_data)) cv2.imshow("Frame", frame) cv2.imshow("Face Section", face_section) key_pressed = cv2.waitKey(1) & 0xFF if key_pressed == ord('q'): break # Convert our face list array into a numpy array face_data = np.asarray(face_data) face_data = face_data.reshape((face_data.shape[0], -1)) print(face_data.shape) # Save this data into file system np.save(dataset_path + file_name + '.npy', face_data) print("Data Successfully saved at " + dataset_path + file_name + '.npy') cap.release() cv2.destroyALlWindows()
def main(_argv): physical_devices = tf.config.experimental.list_physical_devices('GPU') for physical_device in physical_devices: tf.config.experimental.set_memory_growth(physical_device, True) # import weights yolo = YoloV3(classes=FLAGS.num_classes) yolo.load_weights(FLAGS.weights) logging.info("weights loaded") # Import classes class_names = [c.strip() for c in open(FLAGS.classes).readlines()] logging.info("classes loaded") # list of time for procces on each frame times = [] # try to load webcam or a video file try: vid = cv2.VideoCapture(int(FLAGS.video)) except: vid = cv2.VideoCapture(FLAGS.video) out = None if FLAGS.output: #by default VideoCapture returns float instead of int width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = int(vid.get(cv2.CAP_PROP_FPS)) codec = cv2.VideoWriter_fourcc(*FLAGS.output_format) out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height)) while True: _, img = vid.read() if img is None: logging.info("Empty Frame") time.sleep(0.1) img_in = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img_in = tf.expand_dims(img_in, 0) img_in = transform_images(img_in, FLAGS.size) t1 = time.time() boxes, scores, classes, nums = yolo.predict(img_in) for i in num: print(i) t2 = time.time() times.append(t2 - t1) times = times[-20:] img = draw_outputs(img, (boxes, scores, classes, nums), class_names) img = cv2.putText( img, "Time: {:.2f}ms".format(sum(times) / len(times) * 1000), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (1, 0, 255), 2) if FLAGS.output: out.write(img) cv2.imshow('Output', img) if cv2.waitKey(1) == ord('q'): break vid.release() cv2.destroyALlWindows()
import numpy as np import cv2 #load a color image in grayscale img=cv2.imread('images/fruits.jpg',0) #display an image cv2.imshow('image',img) #cv2.waitKey(0) #cv2.destroyAllWindows() k=cv2.waitKey(0) & 0xFF if k==27: #wait for ESC key to exit cv2.destroyALlWindows() elif k==ord('s'): #wait 's' key to save and exit cv2.imwrite('images/fruits_gray.jpg',img) cv2.destroyAllWindows()