def resimcek(): cam = Camera() img = cam.getImage() img.save("deneme.jpg") del cam del img
def simpleDiff(): cam = Camera() img = cam.getImage().scale(.20) disp = Display(img.size()) img.save(disp) X = range(100) Y = [0 for i in range(100)] count = 0 imgA = cam.getImage().scale(0.20).grayscale() while not disp.isDone(): ax.clear() count += 1 time.sleep(0.1) imgB = cam.getImage().scale(0.20).grayscale() #imgB.save(disp) motion = (imgB - imgA).binarize().invert().erode(1).dilate(1) motion.save(disp) s = diff(motion) imgA = imgB if count < 100: Y[count] = s else: Y.append(s) Y = Y[1:] X.append(count) X = X[1:] ax.bar(X, Y) plt.xlim(X[0], X[-1]) plt.draw() imgA = imgB
def opticalFlow(): cam = Camera() img = cam.getImage().scale(.20) disp = Display(img.size()) img.save(disp) X = range(100) Y = [0 for i in range(100)] flag = 0 count = 0 while not disp.isDone(): ax.clear() count += 1 if flag == 0: imgA = cam.getImage().scale(0.20) flag += 1 else: imgB = cam.getImage().scale(0.20) imgB.save(disp) motion = imgB.findMotion(imgA) s = sum([i.magnitude() for i in motion]) imgA = imgB if count < 100: Y[count] = s else: Y.append(s) Y = Y[1:] X.append(count) X = X[1:] ax.bar(X, Y) plt.xlim(X[0], X[-1]) plt.draw()
def main(cameraNumber, camWidth, camHeight, outputFile): BUFFER_NAME = 'cloud3.avi' vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True) disp = Display((camWidth, camHeight)) cam = Camera(cameraNumber, prop_set={"width": camWidth, "height": camHeight}) # while the user does not press 'esc' start_time = time() count = 0 while disp.isNotDone(): # KISS: just get the image... don't get fancy img = cam.getImage() print type(img) skimage.io.push(img) #img.show() # write the frame to videostream vs.writeFrame(img) # show the image on the display img.save(disp) current_time = time() if current_time-start_time>=5: outputFile = "testing_chunk_%d.mp4" % (count) print "Saving %s" % (outputFile) saveFilmToDisk(BUFFER_NAME, outputFile) start_time = time() count += 1
def recordVideo(self, cb, topic, length=5): global BUFFER_NAME BUFFER_NAME = topic + '_' + time.strftime("%Y_%m_%d_%H_%M_%S") + '.avi' vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True) self.disp = Display((self.width, self.height)) cam = Camera(1, prop_set={"width":self.width,"height":self.height}) while self.continueRecord: gen = (i for i in range(0, 30 * length) if self.continueRecord) for i in gen: img = cam.getImage() vs.writeFrame(img) img.save(self.disp) self.continueRecord = False print "Broke capture loop" self.disp.quit() print "Saving video" # This is to run this process asynchronously - we will skip that # self.makefilmProcess = Process(target=saveFilmToDisk, args=(BUFFER_NAME, self.videoTitle)) # self.makefilmProcess.start() # Callback function cb()
class CameraVideo(object): def __init__(self): if hasattr(self, 'my_camera') is False: self.my_camera = Camera(prop_set={'width': 320, 'height': 240}) self.my_display = Display(resolution=(320, 240)) self.live_preview = False self.timestamp = datetime.now().strftime('%Y%m%d%H%M%S') def start_live_preview(self): if self.live_preview is False: self.file_name = "/tmp/cameraOut" + self.timestamp + ".avi" self.live_preview = True #video_stream = VideoStream(self.file_name, fps=15) timeout = 0 while timeout < 100: #image = my_camera.getImage() #image = image.edges() #video_stream.writeFrame(image) self.my_camera.getImage().save(self.my_display) timeout += 2 sleep(0.1) return self.file_name def stop_live_preview(self): self.live_preview = False # construct the encoding arguments # outname = self.file_name.replace('.avi', '.mp4') # params = " -i {0} {1}".format(self.file_name, outname) # run ffmpeg to compress your video. # call('ffmpeg' + params, shell=True def take_video(self, duration): pass
def main(): x = 0; cam = Camera (prop_set={'width':640, 'height':480}) disp = Display (resolution=(320,240)) while disp.isNotDone(): img = cam.getImage() img = img.scale(0.5) faces = img.findHaarFeatures("eye.xml") #print "not Detected" if faces: for face in faces: face.draw() print "eyes Detected" # x = 0 else: # x += 1 print "close eyes" #print (x) #if x > 10: # print "HOY GISING" # return main() img.save(disp)
def main(): """Finds and interprets feature points""" # Initialize Camera print "Starting Webcam..." try: cam = Camera() except: print "Unable to initialize camera" sys.exit(1) display = Display(resolution = (RES_WIDTH, RES_HEIGHT)) while not display.isDone(): # capture the current frame try: capture = cam.getImage() img = capture.smooth() except cv.error: print "Camera unsupported by OpenCV" sys.exit(1) # Build face and interpret expression face_image = FaceImage(img) if face_image.face: #s face_image.interpret() pass capture.save(display) time.sleep(0.01) if display.mouseLeft: display.done = True
def main(cameraNumber, camWidth, camHeight, outputFile): BUFFER_NAME = 'motiontest.avi' # create the video stream for saving the video file #vs = VideoStream(fps=24, filename=fname, framefill=True) vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True) # create a display with size (width, height) disp = Display((camWidth, camHeight)) # Initialize Camera cam = Camera(cameraNumber, prop_set={"width": camWidth, "height": camHeight}) # while the user does not press 'esc' while disp.isNotDone(): # KISS: just get the image... don't get fancy img = cam.getImage() #img.show() # write the frame to videostream vs.writeFrame(img) # show the image on the display img.save(disp) # Finished the acquisition of images now Transform into a film #self.makefilmProcess = Process(target=self.saveFilmToDisk, args=(BUFFER_NAME, outputFile)) #self.makefilmProcess.start() saveFilmToDisk(BUFFER_NAME, outputFile)
def Run(cmdPipe): steadyStateFPS = 10 desiredBuffer = 60*60 #1 minute * 60 seconds numberOfFrames = steadyStateFPS*desiredBuffer; cam = Camera(0, {"width": 640, "height": 480}) i = 10 sleepTime = 0 while True: # check command if cmdPipe.poll(): cmd = cmdPipe.recv() if cmd=='shutdown': print('capture', 0, "Shutting down.") break filelist = glob("images/*.jpg") if len(filelist)<numberOfFrames: sleepTime = (1.0/steadyStateFPS)-.01 print("capture", 0, "number of frames in buffer="+str(len(filelist))+" desired="+str(numberOfFrames)+" setting sleeptime to "+str(sleepTime)) else: sleepTime = 1.0/steadyStateFPS print("capture", 0, "number of frames in buffer="+str(len(filelist))+" desired="+str(numberOfFrames)+" setting sleeptime to "+str(sleepTime)) for index in range(100): ts = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3] cam.getImage().save("images/slowglass."+ ts + ".jpg") time.sleep(sleepTime)
def main(cameraNumber, outputFile): BUFFER_NAME = 'ipython1.avi' # create the video stream for saving the video file #vs = VideoStream(fps=24, filename=fname, framefill=True) #vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True) vs=VideoStream(filename=BUFFER_NAME) # create a display with size (width, height) disp = Display() # Initialize Camera cam = Camera(cameraNumber) time_start=time.time() # while the user does not press 'esc' while time.time()-time_start<10: # Finally let's starte # KISS: just get the image... don't get fancy img = cam.getImage() #img.show() # write the frame to videostream vs.writeFrame(img) # show the image on the display img.save(disp) # Finished the acquisition of images now Transform into a film #self.makefilmProcess = Process(target=self.saveFilmToDisk, args=(BUFFER_NAME, outputFile)) #self.makefilmProcess.start() saveFilmToDisk(BUFFER_NAME, outputFile)
def main(cameraNumber, camWidth, camHeight, outputFile): BUFFER_NAME = 'buffer.avi' # create the video stream for saving the video file vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True) # create a display with size (width, height) disp = Display((camWidth, camHeight)) # Initialize Camera cam = Camera(cameraNumber, prop_set={"width": camWidth, "height": camHeight}) # while the user does not press 'esc' while disp.isNotDone(): # KISS: just get the image... don't get fancy img = cam.getImage() # write the frame to videostream vs.writeFrame(img) # show the image on the display img.save(disp) # Finished the acquisition of images now Transform into a film makefilmProcess = Process(self.saveFilmToDisk, args=(BUFFER_NAME, outputFile)) makefilmProcess.start() def saveFilmToDisk(self, bufferName, outname): # construct the encoding arguments params = " -i {0} -c:v mpeg4 -b:v 700k -r 24 {1}".format(bufferName, outname) # run avconv to compress the video since ffmpeg is deprecated (going to be). call('avconv'+params, shell=True)
def get_image(): a = Camera(0) # a = Kinect() time.sleep(1) b = a.getImage() # b.save(expanduser("~/Projects/OceanColorSound/frame4.png")) # b = Image(expanduser("~/Projects/OceanSound/data/frame4.png")) return b
def get_image(img_name='tmp.jpg'): """Get an image from imaging hardware and save it.""" cam=Camera() img=cam.getImage() img=img.toGray() img.save(img_name) img.show() return img_name
def get_camera(self): try: cam = Camera() img = cam.getImage() img.save("mZOMGGUYS.png") except: pass return voting.encode_image(self.random_path())
def real_test(gesture_name, window_size=15): from SimpleCV import Camera, Display from sklearn.externals import joblib import time import os cam = Camera() dis = Display() time.sleep(2) lastLeft = None lastRight = None clfLeft = joblib.load('models/%s' % gesture_name) #clfRight = joblib.load('models/moveright') window = [] targetFps = 15.0 fps = targetFps sleepTime = 1/targetFps prevTime = None try: print "Recording... [keyboard interrupt to quit]" while True: img = cam.getImage() img = scaleDown(img) img.dl().ezViewText("{0:.3f} fps".format(fps), (0, 0)) if fps > targetFps + .5: sleepTime += 0.005 elif fps < targetFps: sleepTime = max(sleepTime - 0.015, 0.01) if prevTime is not None: fps = 1.0 / (time.time() - prevTime) prevTime = time.time() cur_feature = extractFeatures(img) if cur_feature is None: window = [] else: window.append(cur_feature) if (len(window) == window_size): datum = subtractPastFeatures(window) if (1 == clfLeft.predict(flatten(datum))[0]): print("Gesture Left") if lastLeft is None or (time.time() - lastLeft) > 1: #os.system("osascript -e 'tell application \"System Events\"' -e 'key down {control}' -e 'keystroke (key code 123)' -e 'key up {control}' -e 'end tell'") lastLeft = time.time() #if (1 == clfRight.predict(flatten(datum))[0]): # print("Gesture Right") # if lastLeft is None or (time.time() - lastLeft) > 1: # #os.system("osascript -e 'tell application \"System Events\"' -e 'key down {control}' -e 'keystroke (key code 124)' -e 'key up {control}' -e 'end tell'") # lastLeft = time.time() window = window[1:] img.show() time.sleep(sleepTime) except KeyboardInterrupt: print "Done recording"
def run(self): m = alsaaudio.Mixer() # defined alsaaudio.Mixer to change volume scale = (300,250) # increased from (200,150). works well d = Display(scale) cam = Camera() prev = cam.getImage().scale(scale[0],scale[1]) sleep(0.5) buffer = 20 count = 0 prev_t = time() # Note initial time while d.isNotDone(): current = cam.getImage() current = current.scale(scale[0],scale[1]) if( count < buffer ): count = count + 1 else: fs = current.findMotion(prev, method="LK") # find motion # Tried BM, and LK, LK is better. need to learn more about LK if fs: # if featureset found dx = 0 dy = 0 for f in fs: dx = dx + f.dx # add all the optical flow detected dy = dy + f.dy dx = (dx / len(fs)) # Taking average dy = (dy / len(fs)) prev = current sleep(0.01) current.save(d) if dy > 2 or dy < -2: vol = int(m.getvolume()[0]) # getting master volume if dy < 0: vol = vol + (-dy*3) else: vol = vol + (-dy*3) if vol > 100: vol = 100 elif vol < 0: vol = 0 print vol m.setvolume(int(vol)) # setting master volume if dx > 3: cur_t = time() if cur_t > 5 + prev_t: # adding some time delay self.play("next") # changing next prev_t = cur_t if dx < -3: cur_t = time() if cur_t > 5 + prev_t: prev_t = cur_t self.play("previous") # changing previous
def run(self): m = alsaaudio.Mixer() # defined alsaaudio.Mixer to change volume scale = (300, 250) # increased from (200,150). works well d = Display(scale) cam = Camera() prev = cam.getImage().scale(scale[0], scale[1]) sleep(0.5) buffer = 20 count = 0 prev_t = time() # Note initial time while d.isNotDone(): current = cam.getImage() current = current.scale(scale[0], scale[1]) if (count < buffer): count = count + 1 else: fs = current.findMotion(prev, method="LK") # find motion # Tried BM, and LK, LK is better. need to learn more about LK if fs: # if featureset found dx = 0 dy = 0 for f in fs: dx = dx + f.dx # add all the optical flow detected dy = dy + f.dy dx = (dx / len(fs)) # Taking average dy = (dy / len(fs)) prev = current sleep(0.01) current.save(d) if dy > 2 or dy < -2: vol = int(m.getvolume()[0]) # getting master volume if dy < 0: vol = vol + (-dy * 3) else: vol = vol + (-dy * 3) if vol > 100: vol = 100 elif vol < 0: vol = 0 print vol m.setvolume(int(vol)) # setting master volume if dx > 3: cur_t = time() if cur_t > 5 + prev_t: # adding some time delay self.play("next") # changing next prev_t = cur_t if dx < -3: cur_t = time() if cur_t > 5 + prev_t: prev_t = cur_t self.play("previous") # changing previous
def scan_cameras(): existingcameras = [] for i in range(0, 10): try: camera = Camera(i) camera.getImage().erode() existingcameras.append(i) except: pass return existingcameras
def main(): cam = Camera() while True: image = cam.getImage() blobs = image.findBlobs() if blobs: blobs.draw() blob = find_yellow(blobs) if blob: image.drawCircle((blob.x, blob.y), 10, color=Color.RED) image.show()
def nok_air_task(): global plu_tkpi,plu_rect,plu_text,cam_num,plu_pic plu_pic[3] = True cam = Camera(cam_num) img = cam.getImage() thumbnail = img.scale(90,60) thumbnail.save('tmp_picture.jpg') plu_tkpi[3] = pygame.image.load('tmp_picture.jpg') plu_rect[3] = plu_tkpi[3].get_rect() plu_rect[3][0] = 100 plu_rect[3][1] = 5 plu_text[3] = localtime[8:10]+' '+localtime[4:7]
def run_capturer(kafka_hosts, fps=24): producer = KafkaProducer(bootstrap_servers=kafka_hosts) cam = Camera() while True: img = cam.getImage() img.drawText(get_timestamp(), fontsize=160) img.save('tmp.jpg') with open('tmp.jpg', mode='rb') as file: content = file.read() producer.send('CAMERA_FEED', pack_image(content)) print('Got an image') sleep(0.4)
class Camara(pygame.sprite.Sprite): "ImagenCamara" #TODO: Se debería mejorar la elección de cámara, así no se toca el código elegida=1 #Por defecto es la cámara 0 o /dev/video0 estado=False #Estado de cámar(False=Apagada, True=Encendida) norma="PALN" #Norma(Hay que ver como se puede cambiar) cam="" def __init__(self): self.estado_luces=False pygame.sprite.Sprite.__init__(self) self.image = load_image("cam_principal_apagada.png", IMG_DIR, alpha=True) self.rect = self.image.get_rect() #Se obtiene un objeto rect para coordenadas y tamaño self.rect.centerx = 385 self.rect.centery = 280 self.image=pygame.transform.scale(self.image, (640, 480)) def encender(self): self.estado=True self.cam=Camera(self.elegida) #En esta sección se deben anexar las rutinas de encendido de camara # # #En esta sección se deben agregar el comportamiento de las gráficas # # # def apagar(self): if self.estado==True: del self.cam self.estado=False #En esta sección se deben anexar las rutinas de apagado de camara # # #En esta sección se deben agregar el comportamiento de las gráficas # # # def obtener_imagen(self): #Se obtiene imagen en formato SimpleCV if self.estado==True: imagen=self.cam.getImage().toPygameSurface() else: imagen=SimpleCV.Image("Imagenes/cam_principal_apagada.png").toPygameSurface() return imagen def sacar_foto(self,archivo): #OPTIMIZE: Es necesario mejorar esta función self.archivo=archivo imagen=self.cam.getImage() imagen.save(archivo)
def interactiveTranslation(): cam = Camera() disp = Display() current = " " while disp.isNotDone(): image = cam.getImage() if disp.mouseLeft: break if disp.mouseRight: text = image.readText() text = cleanText(text) translated = trans.translate(text, langpair) if translated: current = translated image.drawText(current, 0, 0, color=Color.BLACK, fontsize=40) image.save(disp)
def __init__(self, *args, **kwargs): # Pull out debug kwarg before passing to Camera try: self.debug = kwargs['debug'] del kwargs['debug'] print("Initializing camera") except KeyError: self.debug = False Camera.__init__(self, *args, **kwargs) while not self.camera_is_ready(): sleep(1) if self.debug: print("Camera is ready")
def handle(self, *args, **options): host = options.get('host', '0.0.0.0') port = options.get('port', '8090') host_camera = options.get('host_camera', 0) # setup the stream camera = Camera(host_camera) stream = JpegStreamer("%s:%s" % (host, port)) while True: image = camera.getImage() image.save(stream) # ensure it sleeps for as long as the fps in this case 10 fps time.sleep(0.1)
def __init__(self, cfg, strCamera = "basic"): """Create an instance of this class. Arguments: strCamera - The key identifying the camera to use, this camera must be defined in the config file. This key will be resolved to 'cameras.' + strCamera (e.g. cameras.dads_camera) """ self.logger = logging.getLogger(self.__class__.__name__) self.cfg_root = cfg self.cfg = cfg["cameras"][strCamera] self.cfg.setdefault("local_path", "./images/shot.jpg") self.cfg.setdefault("no_pic", "./images/xxx.jpg") self.cfg.setdefault("uri", None) if (self.cfg["uri"] == "PICAMERA"): self.logger.info ("Using connected Pi camera module ...") if (self._cam == None): self._cam = picamera.PiCamera() self._cam.resolution = (640, 480) #self._cam.start_preview() elif (self.cfg["uri"] != None): self.logger.info ("Using web camera at URI: %s ...", self.cfg["uri"]) else: self.logger.info ("Attempting to use local (USB?) camera ...") if ( self._cam == None): self._cam = Camera()
def nok_air_task(): global state,plu_tkpi,plu_button,plu_pic,nok_air plu_pic[0] = True cam = Camera(cam_num) img = cam.getImage() img.save('tmp_picture.jpg') image_CV = Image.open('tmp_picture.jpg') resized = image_CV.resize((320,225),Image.ANTIALIAS) plu_tkpi[0] = ImageTk.PhotoImage(resized) if state==4: plu_button[0].configure(width = 320, height = 225, image=plu_tkpi[0]) elif state==9: plu_button[10].configure(width = 213, height = 150, image=plu_tkpi[0]) elif state==16: plu_button[20].configure(width = 160, height = 112, image=plu_tkpi[0]) nok_air = win.after(500,nok_air_task)
def take_a_picture(i): global pic_num,plu_pic_num,plu_tkpi,plu_rect,plu_text,cam_num,plu_pic if i<=3 : return mm=['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'] plu_pic[i] = True cam = Camera(cam_num) img = cam.getImage() thumbnail = img.scale(90,60) pic_num=pic_num+1 thumbnail.save('./pic/p'+format(pic_num,'012')+'.jpg') plu_tkpi[i] = pygame.image.load('./pic/p'+format(pic_num,'012')+'.jpg') plu_pic_num[i] = pic_num plu_rect[i] = plu_tkpi[i].get_rect() if Date_set!=0 and Month_set!=0: plu_text[i] = str(Date_set)+' '+mm[Month_set-1] else: plu_text[i] = localtime[8:10]+' '+localtime[4:7]
def record_gesture(gesture_name): from SimpleCV import Camera, Display import time gesture_dir = "rawdata/%s" % gesture_name if not os.path.exists(gesture_dir): os.makedirs(gesture_dir) cam = Camera() dis = Display() time.sleep(2) gap = 3 # seconds of pause between each gesture targetFps = 15.0 fps = targetFps sleepTime = 1/targetFps start = time.time() prevTime = None try: print "Recording... [keyboard interrupt to quit]" while True: img = cam.getImage() img = scaleDown(img) if fps > targetFps + .5: sleepTime += 0.005 elif fps < targetFps: sleepTime = max(sleepTime - 0.015, 0.01) if prevTime is not None: fps = 1.0 / (time.time() - prevTime) prevTime = time.time() time_since_start = time.time() - start timegap = time_since_start % gap count = time_since_start / gap print(count) imgNo = timegap * targetFps if imgNo < 30: img.save("rawdata/%s/%03d-%02d.jpg" % (gesture_name, count, imgNo)) else: img.save("rawdata/junk.jpg") img.dl().ezViewText("{0:.3f} fps".format(fps), (0, 0)) if (timegap < gap/2): img.dl().ezViewText("Move!", (200, 200)) img.show() time.sleep(sleepTime) except KeyboardInterrupt: print "Done recording"
def __init__(self, pitchnum, stdout, sourcefile, resetPitchSize, noGui, debug_window, pipe): self.noGui = noGui self.lastFrameTime = self.begin_time = time.time() self.processed_frames = 0 self.running = True self.stdout = stdout self.pipe = pipe if sourcefile is None: self.camera = Camera() else: self.filetype = 'video' if sourcefile.endswith(('jpg', 'png')): self.filetype = 'image' self.gui = Gui(self.noGui) self.threshold = Threshold(pitchnum) self.thresholdGui = ThresholdGui(self.threshold, self.gui) self.preprocessor = Preprocessor(resetPitchSize) self.features = Features(self.gui, self.threshold) # if self.debug_window: # self.debug_window = DebugWindow() # else: # self.debug_window = None calibrationPath = os.path.join('calibration', 'pitch{0}'.format(pitchnum)) self.camera.loadCalibration(os.path.join(sys.path[0], calibrationPath)) eventHandler = self.gui.getEventHandler() eventHandler.addListener('q', self.quit) # Ugly stuff for smoothing coordinates - should probably move it self._pastSize = 5 self._pastCoordinates = { 'yellow': [(0, 0)] * self._pastSize, 'blue': [(0, 0)] * self._pastSize, 'ball': [(0, 0)] * self._pastSize } self._pastAngles = { 'yellow': [1.0] * self._pastSize, 'blue': [1.0] * self._pastSize } while self.running: if self.preprocessor.hasPitchSize: self.outputPitchSize() self.gui.setShowMouse(False) else: eventHandler.setClickListener(self.setNextPitchCorner) while self.running: self.doStuff()