Пример #1
0
def plu_take_a_picture(i):
	global plu_button,image_CV,plu_tkpi,cam_num,plu_pic,nok_air
	win.after_cancel(nok_air)
	if i>=20:
		plu_pic[i-20]=True
		cam = Camera(cam_num)
		img = cam.getImage()
		img.save('tmp_picture.jpg')
		image_CV = Image.open('tmp_picture.jpg')
		resized = image_CV.resize((320,225),Image.ANTIALIAS)
		plu_tkpi[i-20] = ImageTk.PhotoImage(resized)
		plu_button[i].configure(width = 160, height = 112, image=plu_tkpi[i-20])				
	elif i>=10:
		plu_pic[i-10]=True
		cam = Camera(cam_num)
		img = cam.getImage()
		img.save('tmp_picture.jpg')
		image_CV = Image.open('tmp_picture.jpg')
		resized = image_CV.resize((320,225),Image.ANTIALIAS)
		plu_tkpi[i-10] = ImageTk.PhotoImage(resized)
		plu_button[i].configure(width = 213, height = 150, image=plu_tkpi[i-10])		
	else:
		plu_pic[i] = True
		cam = Camera(cam_num)
		img = cam.getImage()
		img.save('tmp_picture.jpg')
		image_CV = Image.open('tmp_picture.jpg')
		resized = image_CV.resize((320,225),Image.ANTIALIAS)
		plu_tkpi[i] = ImageTk.PhotoImage(resized)
		plu_button[i].configure(width = 320, height = 225, image=plu_tkpi[i])
	nok_air = win.after(1000,nok_air_task)
Пример #2
0
class CameraVideo(object):

    def __init__(self):
        if hasattr(self, 'my_camera') is False:
            self.my_camera = Camera(prop_set={'width': 320, 'height': 240})
        self.my_display = Display(resolution=(320, 240))
        self.live_preview = False
        self.timestamp = datetime.now().strftime('%Y%m%d%H%M%S')

    def start_live_preview(self):
        if self.live_preview is False:
            self.file_name = "/tmp/cameraOut" + self.timestamp + ".avi"
            self.live_preview = True
            #video_stream = VideoStream(self.file_name, fps=15)
        timeout = 0
        while timeout < 100:
            #image = my_camera.getImage()
            #image = image.edges()
            #video_stream.writeFrame(image)
            self.my_camera.getImage().save(self.my_display)
	    timeout += 2
            sleep(0.1)
        
	return self.file_name

    def stop_live_preview(self):
        self.live_preview = False
        # construct the encoding arguments
        # outname = self.file_name.replace('.avi', '.mp4')
        # params = " -i {0} {1}".format(self.file_name, outname)
        # run ffmpeg to compress your video.
        # call('ffmpeg' + params, shell=True

    def take_video(self, duration):
       pass
Пример #3
0
def simpleDiff():
    cam = Camera()
    img = cam.getImage().scale(.20)
    disp = Display(img.size())
    img.save(disp)
    X = range(100)
    Y = [0 for i in range(100)]
    count = 0
    imgA = cam.getImage().scale(0.20).grayscale()
    while not disp.isDone():
        ax.clear()
        count += 1
        time.sleep(0.1)
        imgB = cam.getImage().scale(0.20).grayscale()
        #imgB.save(disp)
        motion = (imgB - imgA).binarize().invert().erode(1).dilate(1)
        motion.save(disp)
        s = diff(motion)
        imgA = imgB
        if count < 100:
            Y[count] = s
        else:
            Y.append(s)
            Y = Y[1:]
            X.append(count)
            X = X[1:]
        ax.bar(X, Y)
        plt.xlim(X[0], X[-1])
        plt.draw()
        imgA = imgB
Пример #4
0
def Run(cmdPipe):
    steadyStateFPS = 10
    desiredBuffer = 60*60 #1 minute * 60 seconds
    numberOfFrames = steadyStateFPS*desiredBuffer;



    cam = Camera(0, {"width": 640, "height": 480})

    i = 10
    sleepTime = 0
    while True:
        # check command
        if cmdPipe.poll():
            cmd = cmdPipe.recv()
            if cmd=='shutdown':
                print('capture', 0, "Shutting down.")
                break

        filelist = glob("images/*.jpg")
        if len(filelist)<numberOfFrames:
            sleepTime = (1.0/steadyStateFPS)-.01
            print("capture", 0, "number of frames in buffer="+str(len(filelist))+" desired="+str(numberOfFrames)+" setting sleeptime to "+str(sleepTime))
        else:
            sleepTime = 1.0/steadyStateFPS
            print("capture", 0, "number of frames in buffer="+str(len(filelist))+" desired="+str(numberOfFrames)+" setting sleeptime to "+str(sleepTime))
        for index in range(100):
            ts = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
            cam.getImage().save("images/slowglass."+ ts + ".jpg")
            time.sleep(sleepTime)
Пример #5
0
def opticalFlow():
    cam = Camera()
    img = cam.getImage().scale(.20)
    disp = Display(img.size())
    img.save(disp)
    X = range(100)
    Y = [0 for i in range(100)]
    flag = 0
    count = 0
    while not disp.isDone():
        ax.clear()
        count += 1
        if flag == 0:
            imgA = cam.getImage().scale(0.20)
            flag += 1
        else:
            imgB = cam.getImage().scale(0.20)
            imgB.save(disp)
            motion = imgB.findMotion(imgA)
            s = sum([i.magnitude() for i in motion])
            imgA = imgB
            if count < 100:
                Y[count] = s
            else:
                Y.append(s)
                Y = Y[1:]
                X.append(count)
                X = X[1:]
            ax.bar(X, Y)
            plt.xlim(X[0], X[-1])
            plt.draw()
    def run(self):
        m = alsaaudio.Mixer()   # defined alsaaudio.Mixer to change volume
        scale = (300,250)    # increased from (200,150). works well
        d = Display(scale)
        cam = Camera()
        prev = cam.getImage().scale(scale[0],scale[1])
        sleep(0.5)
        buffer = 20
        count = 0
        prev_t = time()    # Note initial time
        while d.isNotDone():
            current = cam.getImage()
            current = current.scale(scale[0],scale[1])
            if( count < buffer ):
                count = count + 1
            else:
                fs = current.findMotion(prev, method="LK")   # find motion
                # Tried BM, and LK, LK is better. need to learn more about LK
                if fs:      # if featureset found
                    dx = 0
                    dy = 0
                    for f in fs:
                        dx = dx + f.dx      # add all the optical flow detected
                        dy = dy + f.dy
                
                    dx = (dx / len(fs))     # Taking average
                    dy = (dy / len(fs))

                    prev = current
                    sleep(0.01)
                    current.save(d)
                    
                    if dy > 2 or dy < -2:
                        vol = int(m.getvolume()[0]) # getting master volume
                        if dy < 0:
                            vol = vol + (-dy*3)
                        else:
                            vol = vol + (-dy*3)
                        if vol > 100:
                            vol = 100
                        elif vol < 0:
                            vol = 0
                        print vol
                        m.setvolume(int(vol))   # setting master volume
                        
                    if dx > 3:
                        cur_t = time()
                        if cur_t > 5 + prev_t:  # adding some time delay
                            self.play("next")   # changing next
                            prev_t = cur_t
                        
                    if dx < -3:
                        cur_t = time()
                        if cur_t > 5 + prev_t:
                            prev_t = cur_t
                        self.play("previous")   # changing previous
Пример #7
0
def scan_cameras():
    existingcameras = []
    for i in range(0, 10):
        try:
            camera = Camera(i)
            camera.getImage().erode()
            existingcameras.append(i)
        except:
            pass
    return existingcameras
Пример #8
0
class Camara(pygame.sprite.Sprite):
    "ImagenCamara"
    #TODO: Se debería mejorar la elección de cámara, así no se toca el código
    elegida=1 #Por defecto es la cámara 0 o /dev/video0
    estado=False #Estado de cámar(False=Apagada, True=Encendida)
    norma="PALN" #Norma(Hay que ver como se puede cambiar)
    cam=""
    def __init__(self):
        self.estado_luces=False
        pygame.sprite.Sprite.__init__(self)
        self.image = load_image("cam_principal_apagada.png", IMG_DIR, alpha=True)
        self.rect = self.image.get_rect() #Se obtiene un objeto rect para coordenadas y tamaño
        self.rect.centerx = 385
        self.rect.centery = 280
        self.image=pygame.transform.scale(self.image, (640, 480))

    def encender(self):
        self.estado=True
        self.cam=Camera(self.elegida)
        #En esta sección se deben anexar las rutinas de encendido de camara
        #
        #
        #En esta sección se deben agregar el comportamiento de las gráficas
        #
        #
        #
    def apagar(self):
        if self.estado==True:
            del self.cam
            self.estado=False

        #En esta sección se deben anexar las rutinas de apagado de camara
        #
        #
        #En esta sección se deben agregar el comportamiento de las gráficas
        #
        #
        #

    def obtener_imagen(self): #Se obtiene imagen en formato SimpleCV

        if self.estado==True:
            imagen=self.cam.getImage().toPygameSurface()
        else:
            imagen=SimpleCV.Image("Imagenes/cam_principal_apagada.png").toPygameSurface()
        return imagen

    def sacar_foto(self,archivo): #OPTIMIZE: Es necesario mejorar esta función
        self.archivo=archivo
        imagen=self.cam.getImage()
        imagen.save(archivo)
def main(cameraNumber, camWidth, camHeight, outputFile):

    BUFFER_NAME = 'cloud3.avi'
    vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True)

    disp = Display((camWidth, camHeight))
    cam = Camera(cameraNumber, prop_set={"width": camWidth, "height": camHeight})

    # while the user does not press 'esc'
    start_time = time()
    count = 0
    while disp.isNotDone():
        # KISS: just get the image... don't get fancy
        img = cam.getImage()
        print type(img)

        skimage.io.push(img)

        #img.show()

        # write the frame to videostream
        vs.writeFrame(img)

        # show the image on the display
        img.save(disp)

        current_time = time()
        if current_time-start_time>=5:
            outputFile = "testing_chunk_%d.mp4" % (count)
            print "Saving %s" % (outputFile)
            saveFilmToDisk(BUFFER_NAME, outputFile)
            start_time = time()
            count += 1
def main(cameraNumber, outputFile):
    BUFFER_NAME = 'ipython1.avi'

    # create the video stream for saving the video file
    #vs = VideoStream(fps=24, filename=fname, framefill=True)
    #vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True)
    vs=VideoStream(filename=BUFFER_NAME)
    # create a display with size (width, height)
    disp = Display()

    # Initialize Camera
    cam = Camera(cameraNumber)
    time_start=time.time()
    # while the user does not press 'esc'
    while time.time()-time_start<10:
    # Finally let's starte
        # KISS: just get the image... don't get fancy
    
        img = cam.getImage()

        #img.show()

        # write the frame to videostream
        vs.writeFrame(img)

        # show the image on the display
        img.save(disp)

    # Finished the acquisition of images now Transform into a film
    #self.makefilmProcess = Process(target=self.saveFilmToDisk, args=(BUFFER_NAME, outputFile))
    #self.makefilmProcess.start()
    saveFilmToDisk(BUFFER_NAME, outputFile)
Пример #11
0
def main(cameraNumber, camWidth, camHeight, outputFile):
    BUFFER_NAME = 'buffer.avi'

    # create the video stream for saving the video file
    vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True)
    
    # create a display with size (width, height)
    disp = Display((camWidth, camHeight))
    
    # Initialize Camera
    cam = Camera(cameraNumber, prop_set={"width": camWidth, "height": camHeight})
    
    # while the user does not press 'esc'
    while disp.isNotDone():
        # KISS: just get the image... don't get fancy
        img = cam.getImage()
        
        # write the frame to videostream
        vs.writeFrame(img)
        
        # show the image on the display
        img.save(disp)
    
    # Finished the acquisition of images now Transform into a film
    makefilmProcess = Process(self.saveFilmToDisk, args=(BUFFER_NAME, outputFile))
    makefilmProcess.start()


    def saveFilmToDisk(self, bufferName, outname):
        # construct the encoding arguments
        params = " -i {0} -c:v mpeg4 -b:v 700k -r 24 {1}".format(bufferName, outname)
        
        # run avconv to compress the video since ffmpeg is deprecated (going to be).
        call('avconv'+params, shell=True)
Пример #12
0
def main():
    """Finds and interprets feature points"""

    # Initialize Camera
    print "Starting Webcam..."
    try:
        cam = Camera()
    except:
	print "Unable to initialize camera"
	sys.exit(1)

    display = Display(resolution = (RES_WIDTH, RES_HEIGHT))
    while not display.isDone():
        # capture the current frame
        try: 
	    capture = cam.getImage()
	    img = capture.smooth()
	except cv.error:
	    print "Camera unsupported by OpenCV"
	    sys.exit(1)

        # Build face and interpret expression
	face_image = FaceImage(img)
	if face_image.face:
	  #s  face_image.interpret()
	    pass

	capture.save(display)
	time.sleep(0.01)
	if display.mouseLeft:
	    display.done = True
Пример #13
0
def main():

    x = 0;
    cam  = Camera (prop_set={'width':640, 'height':480})
    disp = Display (resolution=(320,240))
    while disp.isNotDone():
        img = cam.getImage()
        img = img.scale(0.5)
        faces = img.findHaarFeatures("eye.xml")
        #print "not Detected"
        if faces:
            for face in faces:
                face.draw()
                print "eyes Detected"
               # x = 0
        else:
            
                 # x += 1

                 print "close eyes"
                  #print (x)    
                  #if x > 10:
                  #  print "HOY GISING"

                   # return main()
        img.save(disp)
Пример #14
0
    def recordVideo(self, cb, topic, length=5):
        global BUFFER_NAME

        BUFFER_NAME = topic + '_' + time.strftime("%Y_%m_%d_%H_%M_%S") + '.avi'
        vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True)
        self.disp = Display((self.width, self.height))
        cam = Camera(1, prop_set={"width":self.width,"height":self.height})

        while self.continueRecord:
            gen = (i for i in range(0, 30 * length) if self.continueRecord)
            for i in gen:
                img = cam.getImage()
                vs.writeFrame(img)
                img.save(self.disp)
            self.continueRecord = False
        print "Broke capture loop"
        self.disp.quit()

        print "Saving video"

        # This is to run this process asynchronously - we will skip that
        # self.makefilmProcess = Process(target=saveFilmToDisk, args=(BUFFER_NAME, self.videoTitle))
        # self.makefilmProcess.start()

        # Callback function
        cb()
def main(cameraNumber, camWidth, camHeight, outputFile):
    BUFFER_NAME = 'motiontest.avi'

    # create the video stream for saving the video file
    #vs = VideoStream(fps=24, filename=fname, framefill=True)
    vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True)

    # create a display with size (width, height)
    disp = Display((camWidth, camHeight))

    # Initialize Camera
    cam = Camera(cameraNumber, prop_set={"width": camWidth, "height": camHeight})

    # while the user does not press 'esc'
    while disp.isNotDone():
        # KISS: just get the image... don't get fancy
        img = cam.getImage()

        #img.show()

        # write the frame to videostream
        vs.writeFrame(img)

        # show the image on the display
        img.save(disp)

    # Finished the acquisition of images now Transform into a film
    #self.makefilmProcess = Process(target=self.saveFilmToDisk, args=(BUFFER_NAME, outputFile))
    #self.makefilmProcess.start()
    saveFilmToDisk(BUFFER_NAME, outputFile)
Пример #16
0
def resimcek():
    cam = Camera()
    img = cam.getImage()
    img.save("deneme.jpg")

    del cam
    del img 
Пример #17
0
def get_image(img_name='tmp.jpg'):
    """Get an image from imaging hardware and save it."""
    cam=Camera()
    img=cam.getImage()
    img=img.toGray()
    img.save(img_name)
    img.show()
    return img_name
Пример #18
0
def get_image():
    a = Camera(0)
    # a = Kinect()
    time.sleep(1)
    b = a.getImage()
    # b.save(expanduser("~/Projects/OceanColorSound/frame4.png"))
    # b = Image(expanduser("~/Projects/OceanSound/data/frame4.png"))
    return b
Пример #19
0
 def get_camera(self):
     try:
         cam = Camera()
         img = cam.getImage()
         img.save("mZOMGGUYS.png")
     except:
         pass
     return voting.encode_image(self.random_path())
Пример #20
0
def real_test(gesture_name, window_size=15):
    from SimpleCV import Camera, Display
    from sklearn.externals import joblib
    import time
    import os
    cam = Camera()
    dis = Display()
    time.sleep(2)
    lastLeft = None
    lastRight = None

    clfLeft = joblib.load('models/%s' % gesture_name)
    #clfRight = joblib.load('models/moveright')

    window = []

    targetFps = 15.0
    fps = targetFps
    sleepTime = 1/targetFps
    prevTime = None
    try:
        print "Recording... [keyboard interrupt to quit]"
        while True:
            img = cam.getImage()
            img = scaleDown(img)
            img.dl().ezViewText("{0:.3f} fps".format(fps), (0, 0))
            if fps > targetFps + .5:
                sleepTime += 0.005
            elif fps < targetFps:
                sleepTime = max(sleepTime - 0.015, 0.01)
            if prevTime is not None:
                fps = 1.0 / (time.time() - prevTime)
            prevTime = time.time()
            cur_feature = extractFeatures(img)
            if cur_feature is None:
                window = []
            else:
                window.append(cur_feature)
            if (len(window) == window_size):
                datum = subtractPastFeatures(window)
                if (1 == clfLeft.predict(flatten(datum))[0]):
                    print("Gesture Left")
                    if lastLeft is None or (time.time() - lastLeft) > 1:
                        #os.system("osascript -e 'tell application \"System Events\"' -e 'key down {control}' -e 'keystroke (key code 123)' -e 'key up {control}' -e 'end tell'")
                        lastLeft = time.time()
                #if (1 == clfRight.predict(flatten(datum))[0]):
                #    print("Gesture Right")
                #    if lastLeft is None or (time.time() - lastLeft) > 1:
                #        #os.system("osascript -e 'tell application \"System Events\"' -e 'key down {control}' -e 'keystroke (key code 124)' -e 'key up {control}' -e 'end tell'")
                #        lastLeft = time.time()
                
                window = window[1:]

            img.show()
            time.sleep(sleepTime)

    except KeyboardInterrupt:
        print "Done recording"
Пример #21
0
def nok_air_task():
	global plu_tkpi,plu_rect,plu_text,cam_num,plu_pic
	plu_pic[3] = True
	cam = Camera(cam_num)
	img = cam.getImage()
	thumbnail = img.scale(90,60)
	thumbnail.save('tmp_picture.jpg')
	plu_tkpi[3] = pygame.image.load('tmp_picture.jpg')
	plu_rect[3] = plu_tkpi[3].get_rect()
	plu_rect[3][0] = 100
	plu_rect[3][1] = 5
	plu_text[3] = localtime[8:10]+' '+localtime[4:7]
Пример #22
0
def main():
	cam = Camera()

	while True:
		image = cam.getImage()
		blobs = image.findBlobs()
		if blobs:
			blobs.draw()
			blob = find_yellow(blobs)
			if blob:
				image.drawCircle((blob.x, blob.y), 10, color=Color.RED)
		image.show()
Пример #23
0
def run_capturer(kafka_hosts, fps=24):
    producer = KafkaProducer(bootstrap_servers=kafka_hosts)
    cam = Camera()
    while True:
        img = cam.getImage()
        img.drawText(get_timestamp(), fontsize=160)
        img.save('tmp.jpg')
        with open('tmp.jpg', mode='rb') as file:
            content = file.read()
        producer.send('CAMERA_FEED', pack_image(content))
        print('Got an image')
        sleep(0.4)
Пример #24
0
class CameraPicture(object):

    def __init__(self):
	if hasattr(self, 'my_camera') is False:
	    self.my_camera = Camera(prop_set={'width':320, 'height': 240})

    def take_picture(self):
        frame = self.my_camera.getImage()
        self.timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
        file_name = "/tmp/cameraOut" + self.timestamp + ".jpg"
        frame.save(file_name)
        return file_name
def main(cameraNumber, camWidth, camHeight):
    
    img = None

    # create a display with size (width, height)
    disp = Display((camWidth, camHeight))

    # Initialize Camera
    cam = Camera(cameraNumber, prop_set={"width": camWidth, "height": camHeight})

    prev = cam.getImage()

    while 1:
        # Finally let's started
        # KISS: just get the image... don't get fancy
    
        img = cam.getImage()

        diff = img - prev

        diff.show()

        prev = img
Пример #26
0
def interactiveTranslation():
	cam = Camera()
	disp = Display()
	current = " "
	while disp.isNotDone():
		image = cam.getImage()
		if disp.mouseLeft: break
		if disp.mouseRight:
			text = image.readText()
			text = cleanText(text)
			translated = trans.translate(text, langpair)
			if translated: current = translated
		image.drawText(current, 0, 0, color=Color.BLACK, fontsize=40)
		image.save(disp)
Пример #27
0
class ObjectRead():
    def __init__(self, display=False):
        self.display=display
    
    img=None
    cam=None
    
    def startCamera(self,width=640,height=480):
        self.cam = Camera(0, { "width": width, "height": height })
        return self.cam
    
    def getImage(self):
        self.img=self.cam.getImage()
        return self.img    
Пример #28
0
    def handle(self, *args, **options):
        host = options.get('host', '0.0.0.0')
        port = options.get('port', '8090')
        host_camera = options.get('host_camera', 0)

        # setup the stream
        camera = Camera(host_camera)
        stream = JpegStreamer("%s:%s" % (host, port))

        while True:
            image = camera.getImage()
            image.save(stream)

            # ensure it sleeps for as long as the fps in this case 10 fps
            time.sleep(0.1)
Пример #29
0
def nok_air_task():
	global state,plu_tkpi,plu_button,plu_pic,nok_air
	plu_pic[0] = True
	cam = Camera(cam_num)
	img = cam.getImage()
	img.save('tmp_picture.jpg')
	image_CV = Image.open('tmp_picture.jpg')
	resized = image_CV.resize((320,225),Image.ANTIALIAS)
	plu_tkpi[0] = ImageTk.PhotoImage(resized)
	if state==4:
		plu_button[0].configure(width = 320, height = 225, image=plu_tkpi[0])						
	elif state==9:
		plu_button[10].configure(width = 213, height = 150, image=plu_tkpi[0])				
	elif state==16:
		plu_button[20].configure(width = 160, height = 112, image=plu_tkpi[0])				
	nok_air = win.after(500,nok_air_task)
Пример #30
0
class FaceDetector:
	def __init__(self):
		self.cam = Camera()

	def get_coordinates(self):
	    img = self.cam.getImage()

	    faces = img.findHaarFeatures('face')

	    if faces is not None:
	        face = faces.sortArea()[-1]
	        face.draw()

	        return (face.x, face.y)

	    raise FaceNotFound("fail")
Пример #31
0
from SimpleCV import Camera, Color, Display
import time

cam = Camera()

previous = cam.getImage()

disp = Display(previous.size())

while not disp.isDone():
    current = cam.getImage()
    motion = current.findMotion(previous)
    for m in motion:
        m.draw(color=Color.RED, normalize=False)

    current.save(disp)
    previous = current
Пример #32
0
from SimpleCV import Camera, Display
import time
cam = Camera()
display = Display()
while True:
    image = cam.getImage()
    image.save(display)
Пример #33
0
import time
from SimpleCV import Camera, Image, Color, Display

img = Image((300, 300))

img.dl().circle((150, 75), 50, Color.RED, filled = True)

img.dl().line((150, 125), (150, 275), Color.WHITE, width = 5)

img.show()

time.sleep(5)

cam = Camera()

size = cam.getImage().size()

disp = Display(size)

center = (size[0] / 2, size[1] / 2)

while disp.isNotDone():
    img = cam.getImage()

    img.dl().circle(center, 50, Color.BLACK, width = 3)

    img.dl().circle(center, 200, Color.BLACK, width = 6)

    img.dl().line((center[0], center[1] - 50), (center[0], 0), Color.BLACK, width = 2)
    img.dl().line((center[0], center[1] + 50), (center[0], size[1]), Color.BLACK, width = 2)
    img.dl().line((center[0] - 50, center[1]), (0 , center[1]), Color.BLACK, width = 2)
Пример #34
0
# plot points in 3D
cam = Camera(0)
disp = Display((800, 600))
fig = figure()
fig.set_size_inches((10, 7))

canvas = FigureCanvasAgg(fig)
azim = 0
while disp.isNotDone():
    ax = fig.gca(projection='3d')
    ax.set_xlabel('AZUL', color=(0, 0, 1))
    ax.set_ylabel('VERDE', color=(0, 1, 0))
    ax.set_zlabel('VERMELHO', color=(1, 0, 0))
    # Get the color histogram
    img = cam.getImage().scale(0.3)
    rgb = img.getNumpyCv2()
    hist = cv2.calcHist([rgb], [0, 1, 2], None, [bins, bins, bins],
                        [0, 256, 0, 256, 0, 256])
    hist = hist / np.max(hist)
    # render everything
    [
        ax.plot([x], [y], [z],
                '.',
                markersize=max(hist[x, y, z] * 100, 6),
                color=color) for x, y, z, color in idxs if (hist[x][y][z] > 0)
    ]
    #[ ax.plot([x],[y],[z],'.',color=color) for x,y,z,color in idxs if(hist[x][y][z]>0) ]
    ax.set_xlim3d(0, bins - 1)
    ax.set_ylim3d(0, bins - 1)
    ax.set_zlim3d(0, bins - 1)
Пример #35
0
from SimpleCV import Camera, Display

cam = Camera()
WeightFactor = 0.5
t0 = cam.getImage()

disp = Display((t0.size()))

while not disp.isDone():
    t1 = cam.getImage()
    img = (t1 * WeightFactor) + (t0 * (1 - WeightFactor))
    img.save(disp)
    t0 = t1
Пример #36
0
from SimpleCV import Camera

# Initialize the camera
cam = Camera()

# Loop to continuously get images
while True:
    # Get Image from camera
    initial_img = cam.getImage()

    # Binarized
    img = initial_img.binarize(150)

    print "hey"

    # Blobs
    blobs = img.findBlobs()

    if blobs:
        blobs = blobs.filter(blobs.area() > 50)
        blobs.draw(width=5)

    # Show the image
    img.show()

Пример #37
0
class ImagenTratada():
    def __init__(self):
        self.camara = Camera()
        self.archivoAjustesPorDefecto = '/home/cubie/Guhorus/Brazo mas Vision/GUI-para-el-control-visual-de-brazo-robotico/imagen/MisAjustes/ajustesBuenos.json'
        self.cargaAjustes()
        self.rutaImagenOriginal = 'imagen/imagenesGuardadas/ImagenOriginal.jpg'
        self.rutaImagenReducida = 'imagen/imagenesGuardadas/imagenReducida.jpg'
        self.rutaImagenBlobs = 'imagen/imagenesGuardadas/imagenBlobs.jpg'
        self.rutaImagenTratada_Fase1 = 'imagen/imagenesGuardadas/imagenTratada_fase1.jpg'
        self.rutaImagenTratada_Fase2 = 'imagen/imagenesGuardadas/imagenTratada_fase2.jpg'
        self.angulosHuesos = []
        self.articulaciones = []
        self.blobsFiltradosPorForma = []
        self.todosLosCandidatos = []
        self.AreaBlobs = []
        self.numBlobsCandidatosPorArea = 0
        self.enDepuracion = False
        self.listaAngulos = []

    def cargaAjustes(
        self,
        archivo='/home/cubie/Guhorus/Brazo mas Vision/GUI-para-el-control-visual-de-brazo-robotico/imagen/MisAjustes/ajustesBuenos.json'
    ):
        self.ajustes = Ajustes()
        self.ajustes.cargaAjustes(archivo)

    def capturaImagen(self):
        img = self.camara.getImage()
        img.save(self.rutaImagenOriginal)
        imgReducida = img.resize(320, 240)
        imgReducida.save(self.rutaImagenReducida)
        return imgReducida

    def trataImagen(self):

        img = Image(self.rutaImagenReducida)
        result = img.colorDistance(
            (self.ajustes.r, self.ajustes.g, self.ajustes.b))
        result.save(self.rutaImagenTratada_Fase1)
        result = result.invert()
        result = result.binarize(float(self.ajustes.umbralBinarizado)).invert()
        result.save(self.rutaImagenTratada_Fase2)

        #self.depuracion()

    def capturaYTrataLaImagen(self):
        img = self.capturaImagen()
        self.trataImagen()
        return Image(self.rutaImagenTratada_Fase1)

    def capturaTrataYFiltraBlobsDeImagen(self):
        img = self.capturaImagen()
        self.trataImagen()
        self.encuentraYFiltraBlobs()
        return (Image(self.rutaImagenBlobs), self.listaAngulos)

    def encuentraYFiltraBlobs(self, tipoDibujo='estructura'):

        imagenBlobs = Image(self.rutaImagenTratada_Fase2).copy()
        blobs = imagenBlobs.findBlobs()
        self.todosLosCandidatos = blobs

        if blobs != []:

            blobs.image = imagenBlobs

            self.areaBlobs = blobs.area()
            blobs = self.filtroPorArea(blobs)
            self.numBlobsCandidatosPorArea = len(blobs)

            # Busca los blobs de forma circular , los blobs que pasan el filtro
            # se guardan en la lista self.articulaciones
            blobs = self.filtroPorForma(blobs)

            if tipoDibujo == 'blobs':
                self.dibujaBlobs(blobs)
            elif tipoDibujo == 'estructura':
                self.listaAngulos = self.encuentraYDibujaAngulos(imagenBlobs)

            # La imagen tratada tiene que ser guardada porque sino no funciona
            # la integracion con Tkinter
            imagenBlobs.save(self.rutaImagenBlobs)
            return Image(self.rutaImagenBlobs)

    def filtroPorArea(self, blobs):
        return blobs.filter((blobs.area() > self.ajustes.areaMin)
                            & (blobs.area() < self.ajustes.areaMax))

    def filtroPorForma(self, blobs):
        """ Busca los blobs de forma circular , los blobs que pasan el filtro
		se guardan en la lista self.articulaciones"""

        numero_Iteraciones = 2

        self.articulaciones = []
        self.todosLosCandidatos = []
        self.blobsFiltradosPorForma = []
        for blob in blobs:
            candidato = blob.blobMask()
            hayCirculo, errorCode = aux.esCirculo(candidato,
                                                  self.ajustes.toleranciaWH,
                                                  self.ajustes.toleranciaLP,
                                                  self.ajustes.desviacionD,
                                                  numero_Iteraciones)
            self.todosLosCandidatos.append(blob)
            if not hayCirculo and self.enDepuracion:
                print errorCode
            if hayCirculo:
                self.articulaciones.append((blob.x, blob.y))
        self.blobsFiltradosPorForma.append(blob)

    def dibujaBlobs(self, blobs):
        if self.todosLosCandidatos:
            for blob in self.todosLosCandidatos:
                blob.draw(width=2, color=Color.YELLOW)

    def encuentraYDibujaAngulos(self, img):
        """ Ademas de dibujar la estructura de los huesos del brazo
		devuelve los angulos de dichos huesos con la horizontal """

        # Pinto ejes de coordenadas
        img.dl().line((20, img.height - 20), (20, img.height - 60),
                      Color.RED,
                      width=5)
        img.dl().line((20, img.height - 20), (60, img.height - 20),
                      Color.RED,
                      width=5)
        textLayer = DrawingLayer((img.width, img.height))
        textLayer.setFontSize(20)
        textLayer.text("90 grados", (20, img.height - 80), Color.RED)
        textLayer.text("0 grados", (70, img.height - 20), Color.RED)
        img.addDrawingLayer(textLayer)

        angulosHuesos = []
        if self.articulaciones != []:
            self.articulaciones = aux.ordenaListaPorDistanciaApunto(
                self.articulaciones, [0, 480])
            puntoInicial = self.articulaciones.pop()
            img.dl().circle(puntoInicial, 10, Color.BLUE, width=5)
            numAngulo = 1
            while self.articulaciones != []:
                p = self.articulaciones.pop()
                img.dl().line(puntoInicial, p, Color.BLUE, width=5)
                img.dl().circle(p, 10, Color.BLUE, width=5)
                textLayer = DrawingLayer((img.width, img.height))
                textLayer.setFontSize(24)
                textLayer.text(str(numAngulo), (p[0], p[1]), Color.RED)
                img.addDrawingLayer(textLayer)
                numAngulo += 1
                img.applyLayers()
                angulosHuesos.append(
                    aux.anguloLineaEntreDosPuntos(p, puntoInicial))
                puntoInicial = p

        if len(angulosHuesos) == 3:
            return angulosHuesos
        else:
            return []

    def depuracion(self):
        self.enDepuracion = True
        print " ---------------------"
        print "Areas: "
        print self.AreaBlobs
        print "Numero de blobs candidatos por area: "
        print self.numBlobsCandidatosPorArea
        print "Tiempo de tratamiento de imagen: "
        print self.tiempoTratamiento
        print "Numero Articulaciones detectadas: "
        print len(self.articulaciones)
        print " ---------------------"
        time.sleep(1)
Пример #38
0
analog_pin_1 = board.get_pin('a:1:i')  # Use pin 1 as input
analog_pin_2 = board.get_pin('a:2:i')  # Use pin 2 as input
button_13 = board.get_pin('d:13:i')  # Use pin 13 for button input

it = util.Iterator(board)  # Initalize the pin monitor for the Arduino
it.start()  # Start the pin monitor loop

multiplier = 400.0  # A value to adjust the edge threshold by
cam = Camera()  # Initalize the camera

while True:
    t1 = analog_pin_1.read()  # Read the value from pin 1
    t2 = analog_pin_2.read()  # Read the value from pin 2
    b13 = button_13.read()  # Read if the button has been pressed.

    if not t1:  # Set a default if no value read
        t1 = 50
    else:
        t1 *= multiplier

    if not t2:  # Set a default if no value read
        t2 = 100
    else:
        t2 *= multiplier

    print("t1 " + str(t1) + ", t2 " + str(t2) + ", b13 " + str(b13))
    img = cam.getImage().flipHorizontal()
    edged_img = img.edges(int(t1), int(t2)).invert().smooth()
    edged_img.show()
    time.sleep(0.1)
    if faces:
        for face in faces:
            print "Face at: " + str(face.coordinates())
            myFace = face.crop()
            noses = myFace.findHaarFeatures('nose')
            if noses:
                nose = noses.sortArea()[-1]
                print "Nose at: " + str(nose.coordinates())
                xmust = face.points[0][0] + nose.x - (stache.width / 2)
                ymust = face.points[0][1] + nose.y + (stache.height / 3)
            else:
                return frame
        frame = frame.blit(stache, pos=(xmust, ymust), mask=stacheMask)
        return frame
    else:
        return frame


while not myDisplay.isDone():
    inputValue = GPIO.input(24)
    frame = myCamera.getImage()
    if inputValue == True:
        frame = mustachify(frame)
        frame.save("mustache-" + str(time()) + ".jpg")
        frame = frame.flipHorizontal()
        frame.show()
        sleep(3)
    else:
        frame = frame.flipHorizontal()
        frame.save(myDisplay)
    sleep(.05)
Пример #40
0
def main(camindex = 0, capture_width = 800, capture_height = 600, chessboard_width = 8, chessboard_height = 5, planemode = False, gridsize = 0.029, calibrationFile = "default"):
    global save_location

    if planemode:
        mode = 7
    else:
        mode = 0

    dims = (chessboard_width, chessboard_height)

    cam = Camera(camindex, prop_set = { "width": capture_width, "height": capture_height })
    d = Display((capture_width, capture_height))

    save_location = "" #change this if you want to save your calibration images

    calibration_set = [] #calibration images
    fc_set = []

    introMessage()


    while not d.isDone():
        time.sleep(0.01)
        i = cam.getImage().flipHorizontal()
        cb = i.findChessboard(dims, subpixel = False)


        if cb:
            cb = cb[0]
        elif mode != 6:
            showText(i, "Put a chessboard in the green outline")

        if mode == 0:  #10 pictures, chessboard filling 80% of the view space
            findLargeFlat(cb, i, calibration_set, dims)
            if (len(calibration_set) == 10):
                mode = 1
        elif mode == 1:  #5 pictures, chessboard filling 60% of screen, at 45 deg horiz
            findHorizTilted(cb, i, calibration_set, dims)
            if (len(calibration_set) == 15):
                mode = 2
        elif mode == 2:  #5 pictures, chessboard filling 60% of screen, at 45 deg vert
            findVertTilted(cb, i, calibration_set, dims)
            if (len(calibration_set) == 20):
                mode = 3
        elif mode == 3:  #5 pictures, chessboard filling 40% of screen, corners at 45
            findCornerTilted(cb, i, calibration_set, dims)
            if (len(calibration_set) == 25):
                mode = 4
        elif mode == 4:  #10 pictures, chessboard filling 12% - 25% of view space
            findSmallFlat(cb, i, calibration_set, dims)
            if (len(calibration_set) == 35):
                mode = 5
        elif mode == 5:
            cam.calibrate(calibration_set, gridsize, dims)
            cam.saveCalibration(calibrationFile)
            mode = 6
        elif mode == 6:
            showText(i,  "Saved calibration to " + calibrationFile)
        elif mode == 7:
            findPlane(cb, i, calibration_set, dims)
            if (len(calibration_set) == 25):
                mode = 5

        if cb:
            cb.draw()

        i.save(d)
from SimpleCV import Camera,Display
from time import sleep

myCamera = Camera(prop_set={'width':320,'height':240})
myDisplay = Display(resolution=(320,240))
while not myDisplay.isDone():
  myCamera.getImage().save(myDisplay)
  sleep(.1)
Пример #42
0
import time

from SimpleCV import Image, BlobMaker, Color, Camera

# name = "img1.jpg"
# img = Image(name)
# img.show()
# time.sleep(1)

th = 240
n = 0
while True:
    c = Camera(0)
    img = c.getImage()
    img.show()

    # print("New image")

    # print(th)
    # bm = BlobMaker() # create the blob extractor
    # img.invert().binarize(thresh=th).invert().show()
    # time.sleep(0.1)

    # blobs = bm.extractFromBinary(img.invert().binarize(thresh=th).invert(),img, minsize = 200)

    # print(len(blobs))
    # if len(blobs) == 0:
    #     th = th - 10
    #     continue

    # if(len(blobs)>0 and len(blobs) < 10): # if we got a blob
Пример #43
0
from SimpleCV import Camera, Display

frameWeight = .2

cam = Camera()

lastImage = cam.getImage()

display = Display(
    (int(cam.getProperty('width')), int(cam.getProperty('height'))))

while not display.isDone():
    img = cam.getImage()
    img = (img * frameWeight) + (lastImage * (1 - frameWeight))
    img.save(display)
    lastImage = img
Пример #44
0
cam = Camera()  #initialize the camera
quality = 400
minMatch = 0.3

try:
    password = Image("password.jpg")
except:
    password = None

mode = "unsaved"
saved = False
minDist = 0.25

while True:
    sleep(2)
    image = cam.getImage().scale(320,
                                 240)  # get image, scale to speed things up
    faces = image.findHaarFeatures("face.xml")  # load in trained face file
    if faces:
        if not password:
            faces.draw()
            face = faces[-1]
            password = face.crop().save("password.jpg")
            break
        else:
            faces.draw()
            face = faces[-1]
            template = face.crop()
            template.save("passwordmatch.jpg")
            keypoints = password.findKeypointMatch(template)
            if keypoints:
                print "YOU ARE THE ONE!!! CONGRATS"
Пример #45
0
from SimpleCV import Camera,Display
import time
cam=Camera() # //Intializing camera
time.sleep(5) #//delay for three seconds
a=cam.getImage()#//capturing the first image
a.save("a.jpg")
time.sleep(10)
b=cam.getImage()#//capturing the second image after one second
b.save("b.jpg")
d=b-a #//subtracting the image pixels
#d.show() #// display the subtracted image
mat=d.getNumpy() #//converting to numpy array
avg=mat.mean() #//take the mean
print avg #//print average value on the screen
if avg>6:
    print("Motion Detected")
else:
    print("not detected")
Пример #46
0
import time

from SimpleCV import Camera, VideoStream

secondsBuffer = 10
secondsToRecord = 60
fps = 10

# d = deque([], secondsBuffer*fps)  #10 segundos de buffer

c = Camera()
vs = VideoStream("out.avi", fps=fps)

framecount = 0

while (framecount < fps * secondsToRecord):  #record for 60 sec @ 10fps
    # d.append(c.getImage())  # .save(vs)
    c.getImage().save(vs)
    framecount += 1
    time.sleep(0.1)  # 1 sec = 0.1 * 10 sec TODO: adjust to match fps

# for frame in d:
#     frame.save(vs)
Пример #47
0
# Grabs a screenshot of your webcam and displays it

from SimpleCV import Camera, Display, Image
import time
# Initialize the camera
cam = Camera()
# Initialize the display
display = Display()
# Snap a picture using the camera
img = cam.getImage()
# Show some text
img.drawText("Hello World!")
# Show the picture on the screen
img.save(display)
# Wait five seconds so the window doesn't close right away
time.sleep(5)
Пример #48
0
#! /usr/bin/python

from SimpleCV import Image, Camera, Display, time

import matplotlib.pyplot as plt

cam = Camera()

time.sleep(1)

img = cam.getImage()

img.save("foto1.jpg")

time.sleep(2)

img2 = cam.getImage()

img2.save("foto2.jpg")

time.sleep(2)

img3 = cam.getImage()

escaladegris = img2.grayscale()
escaladegris.save("fotogray.jpg")

histograma = escaladegris.histogram()

plt.figure(1)
Пример #49
0
from SimpleCV import Image, Camera, Display, Color

cam = Camera()
scaler = 0.5
previous = cam.getImage().scale(scaler)
disp = Display((680, 480))
sz = previous.width / 10

while disp.isNotDone():
    current = cam.getImage().scale(scaler)
    motion = current.findMotion(previous, sz, method='HS')
    motion.draw(color=Color.RED, width=3)
    current.save(disp)
    previous = current
Пример #50
0
from SimpleCV import Camera

cam = Camera()
display = Display()

# This variable saves the last rotation, and is used
# in the while loop to increment the rotation
rotate = 0

while display.isNotDone():
    rotate = rotate + 5
    cam.getImage().rotate(rotate).save(display)
Пример #51
0
            sleep(1)

            print(
                "Time to scan your music! Please put your music sheet where I can see it, and make sure it is positioned correctly. When you are ready for me to scan it, hit the spacebar.\n"
            )

            sleep(1)

            keyboardPressed = False
            while keyboardPressed == False:
                events = pygame.event.get()
                for event in events:
                    if event.type == pygame.KEYUP:
                        keyboardPressed = True
                        break
                liveImg = cam.getImage()
                liveImg.show()

            iteration += 1
            print("Iteration: " + str(iteration))

            #Take photo
            img = cam.getImage().invert()

            #Find blobs from edges
            blobs = img.findBlobs()

            #Check to see how many blobs detected
            length = len(blobs)

            #If any blobs were detected
Пример #52
0
from SimpleCV.Display import Display
cam = Camera()
display = Display((640,480)) # create our display

quality = 400.00
minDist = 0.35
minMatch = 0.2
template_img = None
mode = "untrained"
startX = None
startY = None
endY = None
endX = None

while( display.isNotDone() ):
		img = cam.getImage().resize(640,480)

		#Display this if a template has not been trained yet
		if mode == "untrained":
			if startX == None or startY == None:
				img.dl().text("Click the upper left corner to train", (10,10))
				if display.mouseLeft:
					startX = display.mouseRawX
					startY = display.mouseRawY
					time.sleep(0.2)
			elif endX == None or endY == None:
				img.dl().text("now click the lower right corner to train", (10,10))
				if display.mouseLeft:
					endX = display.mouseX
					endY = display.mouseY
					template_img = img.crop(startX,startY,endX - startX, endY - startY)
Пример #53
0
'''
This program super imposes the camera onto the television in the picture
'''
from __future__ import print_function

print(__doc__)

from SimpleCV import Camera, Image, Display

tv_original = Image("family_watching_television_1958.jpg", sample=True)

tv_coordinates = [(353, 379), (433, 380), (432, 448), (354, 446)]
tv_mask = Image(tv_original.size()).invert().warp(tv_coordinates)
tv = tv_original - tv_mask

c = Camera()
d = Display(tv.size())

while d.isNotDone():
    bwimage = c.getImage().grayscale().resize(tv.width, tv.height)
    on_tv = tv + bwimage.warp(tv_coordinates)
    on_tv.save(d)
Пример #54
0
#
# Released under the BSD license. See LICENSE file for details.
"""
This program basically does face detection an blurs the face out.
"""
print __doc__

from SimpleCV import Camera, Display, HaarCascade

# Initialize the camera
cam = Camera()

# Create the display to show the image
display = Display()

# Haar Cascade face detection, only faces
haarcascade = HaarCascade("face")

# Loop forever
while display.isNotDone():
    # Get image, flip it so it looks mirrored, scale to speed things up
    img = cam.getImage().flipHorizontal().scale(0.5)
    # Load in trained face file
    faces = img.findHaarFeatures(haarcascade)
    # Pixelize the detected face
    if faces:
        bb = faces[-1].boundingBox()
        img = img.pixelize(10, region=(bb[0], bb[1], bb[2], bb[3]))
    # Display the image
    img.save(display)
Пример #55
0
while True:

    if first_image:
        first_image = False
        print("Skiping first image")
        time.sleep(5)
        continue

    # External 16G USB drive is mounted here
    image_filename = "camera_" + str(int(time.time())) + ".png"
    filename = "./images/camera/" + image_filename

    # print("Saving image " + filename)

    camera_image = cam.getImage()
    camera_image.save(filename)

    gas = GasMeter(camera_image)

    stamp = time.strftime("%d/%m/%Y %H:%M:%S")

    # value is returned as a string. It can have X in the place of unrecognized character
    value = gas.get_meter_value()

    csv_line = []

    csv_line.append(image_filename)
    csv_line.append(stamp)
    csv_line.append(str(value))
    csv_line.append(str(int(time.time())))
Пример #56
0
import sys
from SimpleCV import Camera
import numpy as np
import bayesopt
from time import sleep

# Python3 compat
if sys.version_info[0] == 3:
    raw_input = input

# Initialize the camera
cam = Camera()
cost = np.zeros(256)

#Load images
img = cam.getImage().scale(400, 400)
img2 = img.binarize()


def costImage(i):
    # Make image black and white
    img1 = img.binarize(int(i))
    mat = img1.getNumpy()
    countW = np.count_nonzero(mat)
    countB = mat.size - countW
    return ((countW - countB) / float(mat.size))**2


params = {}  #bayesopt.initialize_params()
params['n_iterations'] = 15
params['n_init_samples'] = 5
Пример #57
0
class FieldAnalyzer(Process):
    def __init__(self, cam_num, debug=False):
        Process.__init__(self)
        self.cam = Camera(cam_num, threaded=False)
        self.puck_locations = Array(Vector, [(-1, -1), (-1, -1)])
        self.puck_velocity = Array(Vector, [(-1, -1), (-1, -1)])
        self.gun_positions = Array(Vector, [(-1, -1), (-1, -1)])
        self.debug = debug
        self.field_crop_boundary = list()
        self.field_post_crop_limits = [5000, 0]  # [left, right]
        self.crop_points = list()
        self.lighting_constant = 250

    def run(self):
        now_time = time.time()
        while True:
            img = self.cam.getImage() \
                .regionSelect(self.crop_points[0],
                              self.crop_points[1],
                              self.crop_points[2],
                              self.crop_points[3]) \
                .warp(self.field_crop_boundary)
            # create binary mask image for finding blobs
            mask = img.binarize(thresh=self.lighting_constant).invert()
            blobs = img.findBlobsFromMask(mask, minsize=50, maxsize=200)

            if blobs:
                for i in range(2):
                    if len(blobs) > i:
                        self.puck_locations[i].x = blobs[i].coordinates()[0]
                        self.puck_locations[i].y = blobs[i].coordinates()[1]

            if self.debug:
                old_time = now_time  # timing
                now_time = time.time()
                fps = 1 / (now_time - old_time)
                if blobs:
                    blobs.draw(width=4)
                    print "FPS: " + str(fps) + "Puck Locations: " + \
                        str(self.puckLocations()) + \
                        " Percent progression: " + \
                        str(self.puckLocationsPercent())
                img.show()

    def puckLocations(self):
        """
        API proxy for accessing puck locations so user doesn't have to
        deal with weird c_type memory
        """
        return [(self.puck_locations[0].x, self.puck_locations[0].y),
                (self.puck_locations[1].x, self.puck_locations[1].y)]

    def puckLocationsPercent(self):
        """
        Returns the percent the puck has progressed over the field
        0% is left most, 100% is right most
        """
        motorA = ((self.field_post_crop_limits[1] -
                   self.field_post_crop_limits[0]) -
                  (self.field_post_crop_limits[1] -
                   self.puck_locations[0].x)) / \
            float(self.field_post_crop_limits[1] -
                  self.field_post_crop_limits[0])

        motorB = ((self.field_post_crop_limits[1] -
                   self.field_post_crop_limits[0]) -
                  (self.field_post_crop_limits[1] -
                   self.puck_locations[1].x)) / \
            float(self.field_post_crop_limits[1] -
                  self.field_post_crop_limits[0])

        if motorA > 1:
            motorA = 1
        elif motorA < 0:
            motorA = 0

        if motorB > 1:
            motorB = 1
        elif motorB < 0:
            motorB = 0

        return (motorA, motorB)

    def calibrate(self):
        """
        A calibration tool which gives gui for calibrating
        """

        #####################INITIAL FIELD POINT CALIBRATION###################
        print "We are displaying a live feed from the cam.  Click " \
              "on a point of the field we tell you, then enter that info\n\n"
        print "Click top-left of field, then right click\n"

        self.cam.live()
        top_left = raw_input("Enter the coord value: ")

        print "\nClick top-right, then right click\n"
        self.cam.live()
        top_right = raw_input("Enter the coord value: ")

        print "\nClick the bottom left, then click right\n"
        self.cam.live()
        bottom_left = raw_input("Enter the coord value: ")

        print "\nClick bottom right, then right click\n"
        self.cam.live()
        bottom_right = raw_input("Enter the coord value: ")

        top_left = tuple(int(v) for v in re.findall("[0-9]+", top_left))
        top_right = tuple(int(v) for v in re.findall("[0-9]+", top_right))
        bottom_left = tuple(int(v) for v in re.findall("[0-9]+", bottom_left))
        bottom_right = tuple(
            int(v) for v in re.findall("[0-9]+", bottom_right))

        locations = [5000, 0, 5000, 0]  # left, top, right, bottom

        if top_left[0] < bottom_left[0]:
            locations[0] = top_left[0]
        else:
            locations[0] = bottom_left[0]
        if top_right[0] > bottom_right[0]:
            locations[2] = top_right[0]
        else:
            locations[2] = bottom_right[0]
        if top_left[1] < top_right[1]:
            locations[1] = top_left[1]
        else:
            locations[1] = top_right[1]

        if bottom_right[1] < bottom_left[1]:
            locations[3] = bottom_left[1]
        else:
            locations[3] = bottom_right[1]
        self.field_crop_boundary.append(
            (bottom_left[0] - locations[0], top_right[1] - locations[1]))
        self.field_crop_boundary.append(
            (bottom_right[0] - locations[0], top_left[1] - locations[1]))
        self.field_crop_boundary.append(
            (top_right[0] - locations[0], bottom_left[1] - locations[1]))
        self.field_crop_boundary.append(
            (top_left[0] - locations[0], bottom_right[1] - locations[1]))

        self.crop_points = locations
        #######################################################################
        #######################################################################

        #############################Lighting Calibration######################
        inVal = 200

        print "We are now starting calibration for lighting."
        while not re.match(r"[yY]", str(inVal)):

            img = self.cam.getImage() \
                .regionSelect(locations[0],
                              locations[1],
                              locations[2],
                              locations[3]) \
                .warp(self.field_crop_boundary)
            mask = img.binarize(thresh=inVal).invert()
            blobs = img.findBlobsFromMask(mask, minsize=50, maxsize=200)
            if blobs:
                blobs.draw()
            img.show()
            self.lighting_constant = inVal
            oldVal = inVal
            inVal = raw_input("Enter new thresh val for blobs, "
                              "then -1 to confirm lighting calibration: ")
            if re.match(r"\d+", inVal):
                inVal = int(inVal)
            elif re.match(r"[yY]", inVal):
                pass
            else:
                inVal = oldVal
            print "\n"
        #######################################################################
        #######################################################################

        ##################Post Crop Field Determination########################
        temp_positions = [0, 0, 0, 0]
        inVal = ""
        print "We are now taking some simple measurements " \
              "of the post-cropped playing field."

        ####################Upper Left Determination###########################
        raw_input("Place the puck in the upper-left most "
                  "side of the field and press [Enter]")
        while not re.match(r"[yY]", inVal):

            img = self.cam.getImage()   \
                .regionSelect(locations[0],
                              locations[1],
                              locations[2],
                              locations[3]) \
                .warp(self.field_crop_boundary)
            mask = img.binarize(thresh=self.lighting_constant).invert()
            blobs = img.findBlobsFromMask(mask, minsize=50, maxsize=200)
            if blobs:
                blobs[0].draw()
                print blobs[0]
                temp_positions[0] = blobs[0].coordinates()[0]
            img.show()
            inVal = str(
                raw_input("Enter y/Y if the puck is selected, and the "
                          "displayed coordinate appears reasonable, "
                          "otherwise just hit [Enter]"))
        #######################################################################

        #######################Upper Right Determinstaion######################
        inVal = ""
        raw_input("Place the puck in the upper-right most side "
                  "of the field and press [Enter]")
        while not re.match(r"[yY]", inVal):

            img = self.cam.getImage() \
                .regionSelect(locations[0],
                              locations[1],
                              locations[2],
                              locations[3]) \
                .warp(self.field_crop_boundary)
            mask = img.binarize(thresh=self.lighting_constant).invert()
            blobs = img.findBlobsFromMask(mask, minsize=50, maxsize=200)
            if blobs:
                blobs[0].draw()
                print blobs[0]
                temp_positions[2] = blobs[0].coordinates()[0]
            img.show()
            inVal = raw_input("Enter y/Y if the puck is selected, and the "
                              "displayed coordinate appears reasonable, "
                              "otherwise just hit [Enter]")
        #######################################################################

        ######################Bottom Left Determination########################
        inVal = ""
        raw_input("Place the puck in the bottom-left most "
                  "side of the field and press [Enter]")
        while not re.match(r"[yY]", inVal):

            img = self.cam.getImage() \
                .regionSelect(locations[0],
                              locations[1],
                              locations[2],
                              locations[3]) \
                .warp(self.field_crop_boundary)
            mask = img.binarize(thresh=self.lighting_constant).invert()
            blobs = img.findBlobsFromMask(mask, minsize=50, maxsize=200)
            if blobs:
                blobs[0].draw()
                print blobs[0]
                temp_positions[1] = blobs[0].coordinates()[0]
            img.show()
            inVal = raw_input("Enter y/Y if the puck is selected, and the "
                              "displayed coordinate appears reasonable, "
                              "otherwise just hit [Enter]")
        #######################################################################

        ####################Bottom Right Determination#########################
        inVal = ""
        raw_input("Place the puck in the bottom-right most "
                  "side of the field and press [Enter]")
        while not re.match(r"[yY]", inVal):

            img = self.cam.getImage() \
                .regionSelect(locations[0],
                              locations[1],
                              locations[2],
                              locations[3]) \
                .warp(self.field_crop_boundary)
            mask = img.binarize(thresh=self.lighting_constant).invert()
            blobs = img.findBlobsFromMask(mask, minsize=50, maxsize=200)
            if blobs:
                blobs[0].draw()
                print blobs[0]
                temp_positions[3] = blobs[0].coordinates()[0]
            img.show()
            inVal = raw_input("Enter y/Y if the puck is selected, and the "
                              "displayed coordinate appears reasonable, "
                              "otherwise just hit [Enter]")
        #######################################################################

        ###################Assigning Limits for post-Crop######################
        if temp_positions[0] < temp_positions[1]:
            self.field_post_crop_limits[0] = temp_positions[0]
        else:
            self.field_post_crop_limits[0] = temp_positions[1]

        if temp_positions[2] > temp_positions[3]:
            self.field_post_crop_limits[1] = temp_positions[2]
        else:
            self.field_post_crop_limits[1] = temp_positions[3]
        #######################################################################

        #######################################################################
        #######################################################################

        print self.crop_points
        print self.field_crop_boundary
Пример #58
0
from SimpleCV import Color, Camera, Display
from Tkinter import *
from Scanner import Scanner
import RPi.GPIO as GPIO
from config import CONFIG

if __name__ == "__main__":
    cam = Camera()  # starts the camera
    display = Display(resolution=(300, 200))

    posSystem = Scanner(CONFIG)

    from pos_ui.frame_order import Frame_Order

    frame_order_info = Frame_Order(None, Tk())

    try:
        while display.isNotDone():
            img = cam.getImage()  # gets image from the camera
            barcode = img.findBarcode()  # finds barcode data from image

            if barcode is not None:  # if there is some data processed
                posSystem.process_scan_event(str(barcode[0].data))
                barcode = []  # reset barcode data
            img.save(display)  # shows the image on the screen
    except (KeyboardInterrupt, SystemExit):
        print 'Received terminate signal from user or system.'
        print 'Cleaning up.'
        GPIO.cleanup()
from SimpleCV import Camera, Display, Color
scaler = 0.5
cam = Camera()
disp = Display((640, 480))
last = cam.getImage().scale(scaler)
sz = last.width / 10
while disp.isNotDone():
    img = cam.getImage().scale(scaler)
    motion = img.findMotion(last, sz, method='HS')
    motion.draw(color=Color.RED, width=3)
    img.save(disp)
    last = img
Пример #60
0
#! /usr/bin/python
#TODA  LAS FOTOS DEBEN TENER EL MISMO TEXTO ESCRITO SOLO SE CAMBIARA LA SUPERFICI
#DEL TEXTO
from SimpleCV  import Image, Camera, Display, time 
import matplotlib.pyplot as plt
cam = Camera()
asdf=cam.getImage()
time.sleep(2)
prueba=cam.getImage()
prueba.save("prueba1.jpg")
escalagris=prueba.grayscale()
escalagris.save("gray.jpg")
histograma=escalagris.histogram()
plt.subplot(4,1,1)
plt.plot(histograma)
plt.grid()
plt.title("Histograma Grayscale")
#una vez echo el filtro en gris se procede a hacerlo en RGB(RED GREEn BLUE)
(red,green,blue)=prueba.splitChannels(False)
red_histogram=red.histogram(255)
plt.subplot(4,1,2)
plt.plot(red_histogram)
plt.grid()
plt.title("Histograma red")
green_histogram=green.histogram(255)
plt.subplot(4,1,3)
plt.plot(green_histogram)
plt.grid()
plt.title("Histograma green")
blue_histogram=blue.histogram(255)
plt.subplot(4,1,4)