def take_photo(fileName=0): if(fileName == 0): return fileName cam = Camera() img = cam.getImage() img.save(fileName) return fileName
def resimcek(): cam = Camera() img = cam.getImage() img.save("deneme.jpg") del cam del img
def main(): x = 0 cam = Camera(prop_set={'width': 640, 'height': 480}) disp = Display(resolution=(320, 240)) while disp.isNotDone(): img = cam.getImage() img = img.scale(0.5) faces = img.findHaarFeatures("eye.xml") #print "not Detected" if faces: for face in faces: face.draw() print "eyes Detected" # x = 0 else: # x += 1 print "close eyes" #print (x) #if x > 10: # print "HOY GISING" # return main() img.save(disp)
def CaptureImages(): #load mqtt service videoClient = mqtt.Client() videoClient.reinitialise(client_id="Camera", clean_session=True, userdata=None) #videoClient.connect("139.217.26.207", port = 1883, keepalive = 60, bind_address = "") videoClient.connect("192.168.31.243", port=1883, keepalive=60, bind_address="") videoClient.loop_start() #capture the images camera = Camera(0) i = 0 while True: #prepare the send buffer output = StringIO.StringIO() image = camera.getImage() #print image.save("imgs/simple"+str(i)+".jpg") #i += 1 image.save(output) videoClient.publish("Video", base64.b64encode(output.getvalue())) output.close() time.sleep(.05)
def control_by_cam(): scale_amount = (200, 150) d = Display(scale_amount) cam = Camera(0) prev = cam.getImage().flipHorizontal().scale(scale_amount[0], scale_amount[1]) time.sleep(0.5) t = 0.5 buffer = 20 count = 0 while d.isNotDone(): current = cam.getImage().flipHorizontal() current = current.scale(scale_amount[0], scale_amount[1]) if (count < buffer): count = count + 1 else: fs = current.findMotion(prev, window=15, method="BM") lengthOfFs = len(fs) if fs: dx = 0 for f in fs: dx = dx + f.dx dx = (dx / lengthOfFs) motionStr = movement_check(dx, t) current.drawText(motionStr, 10, 10) prev = current time.sleep(0.01) current.save(d) return motionStr
def main(cameraNumber, camWidth, camHeight): img = None # create a display with size (width, height) disp = Display((camWidth, camHeight)) # Initialize Camera cam = Camera(cameraNumber, prop_set={ "width": camWidth, "height": camHeight }) prev = cam.getImage() while 1: # Finally let's started # KISS: just get the image... don't get fancy img = cam.getImage() diff = img - prev diff.show() prev = img
def __init__(self, cfg, strCamera="basic"): """Create an instance of this class. Arguments: strCamera - The key identifying the camera to use, this camera must be defined in the config file. This key will be resolved to 'cameras.' + strCamera (e.g. cameras.dads_camera) """ self.logger = logging.getLogger(self.__class__.__name__) self.cfg_root = cfg self.cfg = cfg["cameras"][strCamera] self.cfg.setdefault("local_path", "./images/shot.jpg") self.cfg.setdefault("no_pic", "./images/xxx.jpg") self.cfg.setdefault("uri", None) if (self.cfg["uri"] == "PICAMERA"): self.logger.info("Using connected Pi camera module ...") if (self._cam == None): self._cam = picamera.PiCamera() self._cam.resolution = (640, 480) #self._cam.start_preview() elif (self.cfg["uri"] != None): self.logger.info("Using web camera at URI: %s ...", self.cfg["uri"]) else: self.logger.info("Attempting to use local (USB?) camera ...") if (self._cam == None): self._cam = Camera()
def get_image(): cam = Camera() img = cam.getImage() img.save("fireimg.png") time.sleep(1) encoded = base64.b64encode(open("fireimg.png", "rb").read()) return encoded
def take_selfie_using_simple_cv(image_filename): # to capture a screen from camera # need to install python-opencv, pygame, numpy, scipy, simplecv from SimpleCV import Camera cam = Camera() img = cam.getImage() img.save(image_filename)
def get_image(): a = Camera(0) #a = Kinect() time.sleep(1) b = a.getImage() #b.save(expanduser("~/Projects/OceanColorSound/frame4.png")) #b = Image(expanduser("~/Projects/OceanSound/data/frame4.png")) return b
def main(): camera = Camera() image = camera.getImage() image.show() image.save("mesquita.png") r = requests.post("http://localhost:3000/salvar", data={'name': "teste", 'file':base64.b64encode(open('mesquita.png', 'rb').read())}) r.should_close = True print(r.status_code, r.reason)
def run(self): m = alsaaudio.Mixer() # defined alsaaudio.Mixer to change volume scale = (300, 250) # increased from (200,150). works well d = Display(scale) cam = Camera() prev = cam.getImage().scale(scale[0], scale[1]) sleep(0.5) buffer = 20 count = 0 prev_t = time() # Note initial time while d.isNotDone(): current = cam.getImage() current = current.scale(scale[0], scale[1]) if (count < buffer): count = count + 1 else: fs = current.findMotion(prev, method="LK") # find motion # Tried BM, and LK, LK is better. need to learn more about LK if fs: # if featureset found dx = 0 dy = 0 for f in fs: dx = dx + f.dx # add all the optical flow detected dy = dy + f.dy dx = (dx / len(fs)) # Taking average dy = (dy / len(fs)) prev = current sleep(0.01) current.save(d) if dy > 2 or dy < -2: vol = int(m.getvolume()[0]) # getting master volume if dy < 0: vol = vol + (-dy * 3) else: vol = vol + (-dy * 3) if vol > 100: vol = 100 elif vol < 0: vol = 0 print vol m.setvolume(int(vol)) # setting master volume if dx > 3: cur_t = time() if cur_t > 5 + prev_t: # adding some time delay self.play("next") # changing next prev_t = cur_t if dx < -3: cur_t = time() if cur_t > 5 + prev_t: prev_t = cur_t self.play("previous") # changing previous
def scan_cameras(): existingcameras = [] for i in range(0, 10): try: camera = Camera(i) camera.getImage().erode() existingcameras.append(i) except: pass return existingcameras
def TakeWebshot(Path, Width, Height): try: cam = Camera(prop_set={"width": Width, "height": Height}) img = cam.getImage() img.save(Path) del (cam) del (img) return True except: return False
def __init__(self, cam_num, debug=False): Process.__init__(self) self.cam = Camera(cam_num, threaded=False) self.puck_locations = Array(Vector, [(-1, -1), (-1, -1)]) self.puck_velocity = Array(Vector, [(-1, -1), (-1, -1)]) self.gun_positions = Array(Vector, [(-1, -1), (-1, -1)]) self.debug = debug self.field_crop_boundary = list() self.field_post_crop_limits = [5000, 0] # [left, right] self.crop_points = list() self.lighting_constant = 250
def __init__(self, serial_loc='/dev/ttyACM0', baud=9600, verbose=True): ''' Initializes connection to arduino + camera. ''' self.cam = Camera(1) time.sleep(0.1) # If you don't wait, the image will be dark self.ser = serial.Serial(serial_loc, baud) self.ser.write("INIT") result = ser.readline() if result == "RECV": print "connection established"
def nok_air_task(): global plu_tkpi, plu_rect, plu_text, cam_num, plu_pic plu_pic[3] = True cam = Camera(cam_num) img = cam.getImage() thumbnail = img.scale(90, 60) thumbnail.save('tmp_picture.jpg') plu_tkpi[3] = pygame.image.load('tmp_picture.jpg') plu_rect[3] = plu_tkpi[3].get_rect() plu_rect[3][0] = 100 plu_rect[3][1] = 5 plu_text[3] = localtime[8:10] + ' ' + localtime[4:7]
def run_capturer(kafka_hosts, fps=24): producer = KafkaProducer(bootstrap_servers=kafka_hosts) cam = Camera() while True: img = cam.getImage() img.drawText(get_timestamp(), fontsize=160) img.save('tmp.jpg') with open('tmp.jpg', mode='rb') as file: content = file.read() producer.send('CAMERA_FEED', pack_image(content)) print('Got an image') sleep(0.4)
def __init__(self): self.n_cameras = int(Config.get("N_CAMERAS")) # Initialize the cameras self.cameras = [] for i in xrange(self.n_cameras): try: self.cameras.append(Camera(camera_index = i)) except: logger.warning("Error opening camera #"+str(i)) self.n_cameras = len(self.cameras)
def handle(self, *args, **options): host = options.get('host', '0.0.0.0') port = options.get('port', '8090') host_camera = options.get('host_camera', 0) # setup the stream camera = Camera(host_camera) stream = JpegStreamer("%s:%s" % (host, port)) while True: image = camera.getImage() image.save(stream) # ensure it sleeps for as long as the fps in this case 10 fps time.sleep(0.1)
def recordVideo(self, length=5): BUFFER_NAME = 'buffer.avi' vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True) self.disp = Display((self.width, self.height)) cam = Camera(prop_set={"width":self.width,"height":self.height}) while self.continueRecord: gen = (i for i in range(0, 30 * length) if self.continueRecord) for i in gen: img = cam.getImage() vs.writeFrame(img) img.save(self.disp) self.continueRecord = False print "Broke capture loop" self.disp.quit() print "Saving video"
def __init__(self): self.camara = Camera() self.archivoAjustesPorDefecto = '/home/cubie/Guhorus/Brazo mas Vision/GUI-para-el-control-visual-de-brazo-robotico/imagen/MisAjustes/ajustesBuenos.json' self.cargaAjustes() self.rutaImagenOriginal = 'imagen/imagenesGuardadas/ImagenOriginal.jpg' self.rutaImagenReducida = 'imagen/imagenesGuardadas/imagenReducida.jpg' self.rutaImagenBlobs = 'imagen/imagenesGuardadas/imagenBlobs.jpg' self.rutaImagenTratada_Fase1 = 'imagen/imagenesGuardadas/imagenTratada_fase1.jpg' self.rutaImagenTratada_Fase2 = 'imagen/imagenesGuardadas/imagenTratada_fase2.jpg' self.angulosHuesos = [] self.articulaciones = [] self.blobsFiltradosPorForma = [] self.todosLosCandidatos = [] self.AreaBlobs = [] self.numBlobsCandidatosPorArea = 0 self.enDepuracion = False self.listaAngulos = []
def qread(): # Function to scan QR code and read the data in it cam = Camera() count = 0 timeout_st = time.time() timout = 7 result = None while (count < 1 and time.time() < timeout_st + timout): img = cam.getImage() barcode = img.findBarcode() if (barcode is not None): barcode = barcode[0] result = str(barcode.data) barcode = [] count = 1 del cam return result
def webcam_pic(self, interval_w): try: cam = Camera() while True: time.sleep(interval_w) cur_time = str( str(time.localtime().tm_year) + "_" + str(time.localtime().tm_mon) + "_" + str(time.localtime().tm_mday) + "_" + str(time.localtime().tm_hour) + "_" + str(time.localtime().tm_min) + "_" + str(time.localtime().tm_sec)) scr = path_to_images + "webcam_" + cur_time + ".jpg" files.append(str(scr)) img = cam.getImage() img.save(scr) except Exception as e: print e
def process2(): a = Camera() b = a.getImage() #b.save(expanduser("~/Projects/OceanColorSound/frame4.png")) #b = Image(expanduser("~/Projects/OceanColorSound/frame4.png")) b.show() corners = find_corners(b, Color.ORANGE, circlet=.4) print("shear") tl, tr, br, bl = clockwise_corners([blob.centroid() for blob in corners], b) # fixed = b.shear([(0,0), (b.width, tl[1]-tr[1]), (b.width, b.height), (0, b.height)]) # fixed = b.shear([tl, (br[0], tl[1]), br, (tl[1], br[1])]) fixed = b.warp([tl, (br[0], tl[1]), br, (tl[0], br[1])]) fixed.show() print("shear") new_corners = find_corners(fixed, Color.ORANGE, circlet=.4, radius=10) print(new_corners) #boat = find_boat(fixed, Color.ORANGE, new_corners, circlet=.3, erode=1, radius=10) #draw_blobs(b, corners, boat) draw_blobs(fixed, new_corners, new_corners[0])
def __init__(self, window_size=(640, 480), **kwargs): while True: # Initialize the Camera try: cam = Camera() cam.getImage().flipHorizontal() except: continue else: break self.cam = cam self.image = None self.window_size = window_size self.display = Display(self.window_size) self.__window_center = ( 338, 377) # (self.window_size[0]/2, self.window_size[1]/2) self.__distance = None self.__blobs = None self.__segmented = None self.__circles = None self.__scope_layer = None self.initialize_scope_layer()
def take_a_picture(i): global pic_num, plu_pic_num, plu_tkpi, plu_rect, plu_text, cam_num, plu_pic if i <= 3: return mm = [ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec' ] plu_pic[i] = True cam = Camera(cam_num) img = cam.getImage() thumbnail = img.scale(90, 60) pic_num = pic_num + 1 thumbnail.save('./pic/p' + format(pic_num, '012') + '.jpg') plu_tkpi[i] = pygame.image.load('./pic/p' + format(pic_num, '012') + '.jpg') plu_pic_num[i] = pic_num plu_rect[i] = plu_tkpi[i].get_rect() if Date_set != 0 and Month_set != 0: plu_text[i] = str(Date_set) + ' ' + mm[Month_set - 1] else: plu_text[i] = localtime[8:10] + ' ' + localtime[4:7]
def __init__(self, camara_id, retraso_video = 10, framerate = 4.0, color=False, size=(320,240), ruido=True): self.buffer_size = int(retraso_video*framerate)+ 1 # (nos aseguramos que nunca sea cero) self.intervalo_refresco = float(1.0/framerate) # periodicidad con que se rerescan los datos del buffer de video self.momento_refresco = time.time() + self.intervalo_refresco # momento en que se debe sacar y añadir informacion al buffer de video time.sleep(self.intervalo_refresco) # pausa de seguridad para la generacion del buffer self.video_buffer = [] # contiene los frames equivalentes al tiempo de retraso self.imagen = None # almacenamiento temporal de la captura de la camara para hacer operaciones con ella self.camara = Camera(camara_id) # creamos una instancia de la clase opencv Camara() self.Flag_color = color # si False, procesa la imagen y la devuelve en gris self.Flag_resize = False # se pone a True si le pasamos una resolucion valida, para pemitir el reescalado self.size = size # si es una resocucion valida reescala la imagen if size[0]>0 and size[1]>0: self.Flag_resize = True # control del ruido self.duracion_interferencia = (2,6) # tiempo en segundos que puede llegar a durar una interferencia self.tiempo_entre_interferencias = (5, 20) # periodos de señal sin interferencias (de 25 a 45 segundos) self.FLAG_ruido_activo = ruido # Si True se activan las interferencias en momentos aleatorios self.FLAG_aplicar_ruido_ahora = True # si FLAG_ruido_activo = True, indica si es momento o no de meter una interferencia if self.Flag_color == False: self.video_ruido = [self.resize(self.gris(Image("VideoBuffer_SimpleCV/c_ruido%d.png" %x))) for x in range(5)] # cargamos la lista de fotogramas correspondientes al ruido else: self.video_ruido = [self.resize(Image("VideoBuffer_SimpleCV/c_ruido%d.png" %x)) for x in range(5)] # cargamos la lista de fotogramas correspondientes al ruido self.frame_ruido_index = 0 # fotograma del ruido que se mostrará self.incremento_aleatorio = retraso_video + random.randrange(self.duracion_interferencia[0],self.duracion_interferencia[1]) self.momento_cambio_bandera = time.time() + self.incremento_aleatorio self.nivel_ruido_maximo = 10 # intensidad con que se mostrará la interferencia #creacion y llenado inicial del buffer de video self.video_buffer = [] # definir el buffer como una lista self.imagen = self.getImage() # capturar un fotograma desde la camara self.imagen = self.resize(self.gris(self.imagen)) # reescalarlo y convertirlo a escala de grises self.imagen = self.imagen.blur(45,45) # hasta superado el retraso, la imagen sera borrosa # esto no funciona :( No se lleva bien con el ruido textLayer = DrawingLayer((self.imagen.width, self.imagen.height)) # crear una capa vacia para escribir texto textLayer.text("CONECTANDO...", (40, 70), color=Color.RED) # poner mensaje "CONECTANDO..." sobre la imagen: self.imagen.addDrawingLayer(textLayer) # fusionar la capa de imagen y al de texto self.video_buffer = [self.imagen for x in range(self.buffer_size)] # llenar el buffer de video con una imagen estatica"
def initCamera(self): """Setup camera variables Will prompt the user for feedback. Capable of loading webcam or an ip-camera streaming JPEG """ # TODO: Track last camera mode; use 'Enter' to repeat that camIp = raw_input("Specify camera; enter for webcam, " + "or ip of network camera:\n") logger.info("Camera specified as '{camIp}'".format(camIp=camIp)) if camIp is '': self.cam = Camera() elif '.' not in camIp: self.cam = JpegStreamCamera( "http://192.168.1.{ip}:8080/video".format(ip=camIp)) else: self.cam = JpegStreamCamera( "http://{ip}:8080/video".format(ip=camIp)) self.camRes = (800, 600) logger.info("Camera resolution={res}".format(res=self.camRes))
import numpy as np ## se importa paquete numpy de python ## Deteccion por algoritmo profesor: def calcrho(A,B): ## Se crea una funcion que determine el rho Ar=A[0] Ag=A[1] Ab=A[2] ## Se guardan los valores del material A y B en sus respectivos canales RGB Br=B[0] Bg=B[1] Bb=B[2] r=np.array([Ar/Br, Ag/Br, Ab/Br, Ar/Bg, Ag/Bg, Ab/Bg, Ar/Bb, Ag/Bb, Ab/Bb]) ## Se calcula el vector rho return r ## la funcion retorna el rho c=Camera(0,{"width":320,"height":240}) ## Se abre el objeto camara para caputrar imagenes de 320x240 img=c.live() ## Deja operando al camara en vivo, hasta que el usuario haga click derecho sobre la imagen time.sleep(2) ## Se deja esperando 2 segundos a la camara img=c.getImage() ## Se toma la foto fot=img.show() ## se muestra la imagen capturada time.sleep(4) ## por 4 segundos fot.quit() ## se cierra la ventana de imagen mat=img.getNumpy().astype(dtype='float64') ## se guarda la imagen en una matris de 3 dimensiones con elementos double siz=mat.shape ## se calcula las filas y columnas de la matriz fil=siz[1] ## se guardan dichos datos en las variables col=siz[0] bor=np.zeros((col,fil,3)) ## Se crea una matriz de 3 dimensiones de ceros para crear la mascara del lunar red=np.array([200,0,0],dtype='float64') ## se crea un vector de color rojo para destacar el borde A=np.array([86,53,36],dtype='float64') ## se crea el vector de material A(lunar) encontrado mediante entrenamiento del algoritmo en Matlab B=np.array([133,119,63],dtype='float64') ## Se crea el vector del material B(piel) encontrado mediante entrenamiento del algoritmo en Matlab