Пример #1
0
def main():

    x = 0
    cam = Camera(prop_set={'width': 640, 'height': 480})
    disp = Display(resolution=(320, 240))
    while disp.isNotDone():
        img = cam.getImage()
        img = img.scale(0.5)
        faces = img.findHaarFeatures("eye.xml")
        #print "not Detected"
        if faces:
            for face in faces:
                face.draw()
                print "eyes Detected"
            # x = 0
        else:

            # x += 1

            print "close eyes"
            #print (x)
            #if x > 10:
            #  print "HOY GISING"

            # return main()
        img.save(disp)
Пример #2
0
def simpleDiff():
    cam = Camera()
    img = cam.getImage().scale(.20)
    disp = Display(img.size())
    img.save(disp)
    X = range(100)
    Y = [0 for i in range(100)]
    count = 0
    imgA = cam.getImage().scale(0.20).grayscale()
    while not disp.isDone():
        ax.clear()
        count += 1
        time.sleep(0.1)
        imgB = cam.getImage().scale(0.20).grayscale()
        #imgB.save(disp)
        motion = (imgB - imgA).binarize().invert().erode(1).dilate(1)
        motion.save(disp)
        s = diff(motion)
        imgA = imgB
        if count < 100:
            Y[count] = s
        else:
            Y.append(s)
            Y = Y[1:]
            X.append(count)
            X = X[1:]
        ax.bar(X, Y)
        plt.xlim(X[0], X[-1])
        plt.draw()
        imgA = imgB
def main(cameraNumber, camWidth, camHeight, outputFile):
    BUFFER_NAME = 'motiontest.avi'

    # create the video stream for saving the video file
    #vs = VideoStream(fps=24, filename=fname, framefill=True)
    vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True)

    # create a display with size (width, height)
    disp = Display((camWidth, camHeight))

    # Initialize Camera
    cam = Camera(cameraNumber, prop_set={"width": camWidth, "height": camHeight})

    # while the user does not press 'esc'
    while disp.isNotDone():
        # KISS: just get the image... don't get fancy
        img = cam.getImage()

        #img.show()

        # write the frame to videostream
        vs.writeFrame(img)

        # show the image on the display
        img.save(disp)

    # Finished the acquisition of images now Transform into a film
    #self.makefilmProcess = Process(target=self.saveFilmToDisk, args=(BUFFER_NAME, outputFile))
    #self.makefilmProcess.start()
    saveFilmToDisk(BUFFER_NAME, outputFile)
def main(cameraNumber, camWidth, camHeight, outputFile):

    BUFFER_NAME = 'cloud3.avi'
    vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True)

    disp = Display((camWidth, camHeight))
    cam = Camera(cameraNumber, prop_set={"width": camWidth, "height": camHeight})

    # while the user does not press 'esc'
    start_time = time()
    count = 0
    while disp.isNotDone():
        # KISS: just get the image... don't get fancy
        img = cam.getImage()
        print type(img)

        skimage.io.push(img)

        #img.show()

        # write the frame to videostream
        vs.writeFrame(img)

        # show the image on the display
        img.save(disp)

        current_time = time()
        if current_time-start_time>=5:
            outputFile = "testing_chunk_%d.mp4" % (count)
            print "Saving %s" % (outputFile)
            saveFilmToDisk(BUFFER_NAME, outputFile)
            start_time = time()
            count += 1
Пример #5
0
def control_by_cam():
    scale_amount = (200, 150)
    d = Display(scale_amount)
    cam = Camera(0)
    prev = cam.getImage().flipHorizontal().scale(scale_amount[0],
                                                 scale_amount[1])
    time.sleep(0.5)
    t = 0.5
    buffer = 20
    count = 0
    while d.isNotDone():
        current = cam.getImage().flipHorizontal()
        current = current.scale(scale_amount[0], scale_amount[1])
        if (count < buffer):
            count = count + 1
        else:
            fs = current.findMotion(prev, window=15, method="BM")
            lengthOfFs = len(fs)
            if fs:
                dx = 0
                for f in fs:
                    dx = dx + f.dx
                dx = (dx / lengthOfFs)
                motionStr = movement_check(dx, t)
                current.drawText(motionStr, 10, 10)
        prev = current
        time.sleep(0.01)
        current.save(d)
        return motionStr
Пример #6
0
def main():

    x = 0;
    cam  = Camera (prop_set={'width':640, 'height':480})
    disp = Display (resolution=(320,240))
    while disp.isNotDone():
        img = cam.getImage()
        img = img.scale(0.5)
        faces = img.findHaarFeatures("eye.xml")
        #print "not Detected"
        if faces:
            for face in faces:
                face.draw()
                print "eyes Detected"
               # x = 0
        else:
            
                 # x += 1

                 print "close eyes"
                  #print (x)    
                  #if x > 10:
                  #  print "HOY GISING"

                   # return main()
        img.save(disp)
Пример #7
0
def main(cameraNumber, camWidth, camHeight, outputFile):
    BUFFER_NAME = 'buffer.avi'

    # create the video stream for saving the video file
    vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True)
    
    # create a display with size (width, height)
    disp = Display((camWidth, camHeight))
    
    # Initialize Camera
    cam = Camera(cameraNumber, prop_set={"width": camWidth, "height": camHeight})
    
    # while the user does not press 'esc'
    while disp.isNotDone():
        # KISS: just get the image... don't get fancy
        img = cam.getImage()
        
        # write the frame to videostream
        vs.writeFrame(img)
        
        # show the image on the display
        img.save(disp)
    
    # Finished the acquisition of images now Transform into a film
    makefilmProcess = Process(self.saveFilmToDisk, args=(BUFFER_NAME, outputFile))
    makefilmProcess.start()


    def saveFilmToDisk(self, bufferName, outname):
        # construct the encoding arguments
        params = " -i {0} -c:v mpeg4 -b:v 700k -r 24 {1}".format(bufferName, outname)
        
        # run avconv to compress the video since ffmpeg is deprecated (going to be).
        call('avconv'+params, shell=True)
Пример #8
0
def opticalFlow():
    cam = Camera()
    img = cam.getImage().scale(.20)
    disp = Display(img.size())
    img.save(disp)
    X = range(100)
    Y = [0 for i in range(100)]
    flag = 0
    count = 0
    while not disp.isDone():
        ax.clear()
        count += 1
        if flag == 0:
            imgA = cam.getImage().scale(0.20)
            flag += 1
        else:
            imgB = cam.getImage().scale(0.20)
            imgB.save(disp)
            motion = imgB.findMotion(imgA)
            s = sum([i.magnitude() for i in motion])
            imgA = imgB
            if count < 100:
                Y[count] = s
            else:
                Y.append(s)
                Y = Y[1:]
                X.append(count)
                X = X[1:]
            ax.bar(X, Y)
            plt.xlim(X[0], X[-1])
            plt.draw()
Пример #9
0
class VideoModule:
    videoTitle =  time.strftime("%Y_%m_%d_%H_%M_%S")

    topic = ''

    continueRecord = True

    width = 300
    height = 300

    makefilmProcess = Process()

    disp = 0

    def getVideoTitle(self):
        return self.videoTitle

    def getVideoDisplay(self):
        return self.disp

    def recordVideo(self, cb, topic, length=5):
        global BUFFER_NAME

        BUFFER_NAME = topic + '_' + time.strftime("%Y_%m_%d_%H_%M_%S") + '.avi'
        vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True)
        self.disp = Display((self.width, self.height))
        cam = Camera(1, prop_set={"width":self.width,"height":self.height})

        while self.continueRecord:
            gen = (i for i in range(0, 30 * length) if self.continueRecord)
            for i in gen:
                img = cam.getImage()
                vs.writeFrame(img)
                img.save(self.disp)
            self.continueRecord = False
        print "Broke capture loop"
        self.disp.quit()

        print "Saving video"

        # This is to run this process asynchronously - we will skip that
        # self.makefilmProcess = Process(target=saveFilmToDisk, args=(BUFFER_NAME, self.videoTitle))
        # self.makefilmProcess.start()

        # Callback function
        cb()

    def getBufferName(self):
        global BUFFER_NAME
        return BUFFER_NAME

    def endCapture(self):
        self.continueRecord = False
        self.disp.quit()
        print "Set variable to false"

    def __init__(self, appendTitle):
        self.topic = appendTitle
        self.videoTitle += appendTitle + ".mp4"
Пример #10
0
def show_video():
    dis = Display()
    try:
        while not dis.isDone():
            capture_image().save(dis)
            sleep(1 / 25.0)
    except pygame.error:
        return
    def run(self):
        m = alsaaudio.Mixer()   # defined alsaaudio.Mixer to change volume
        scale = (300,250)    # increased from (200,150). works well
        d = Display(scale)
        cam = Camera()
        prev = cam.getImage().scale(scale[0],scale[1])
        sleep(0.5)
        buffer = 20
        count = 0
        prev_t = time()    # Note initial time
        while d.isNotDone():
            current = cam.getImage()
            current = current.scale(scale[0],scale[1])
            if( count < buffer ):
                count = count + 1
            else:
                fs = current.findMotion(prev, method="LK")   # find motion
                # Tried BM, and LK, LK is better. need to learn more about LK
                if fs:      # if featureset found
                    dx = 0
                    dy = 0
                    for f in fs:
                        dx = dx + f.dx      # add all the optical flow detected
                        dy = dy + f.dy
                
                    dx = (dx / len(fs))     # Taking average
                    dy = (dy / len(fs))

                    prev = current
                    sleep(0.01)
                    current.save(d)
                    
                    if dy > 2 or dy < -2:
                        vol = int(m.getvolume()[0]) # getting master volume
                        if dy < 0:
                            vol = vol + (-dy*3)
                        else:
                            vol = vol + (-dy*3)
                        if vol > 100:
                            vol = 100
                        elif vol < 0:
                            vol = 0
                        print vol
                        m.setvolume(int(vol))   # setting master volume
                        
                    if dx > 3:
                        cur_t = time()
                        if cur_t > 5 + prev_t:  # adding some time delay
                            self.play("next")   # changing next
                            prev_t = cur_t
                        
                    if dx < -3:
                        cur_t = time()
                        if cur_t > 5 + prev_t:
                            prev_t = cur_t
                        self.play("previous")   # changing previous
Пример #12
0
    def run(self):
        m = alsaaudio.Mixer()  # defined alsaaudio.Mixer to change volume
        scale = (300, 250)  # increased from (200,150). works well
        d = Display(scale)
        cam = Camera()
        prev = cam.getImage().scale(scale[0], scale[1])
        sleep(0.5)
        buffer = 20
        count = 0
        prev_t = time()  # Note initial time
        while d.isNotDone():
            current = cam.getImage()
            current = current.scale(scale[0], scale[1])
            if (count < buffer):
                count = count + 1
            else:
                fs = current.findMotion(prev, method="LK")  # find motion
                # Tried BM, and LK, LK is better. need to learn more about LK
                if fs:  # if featureset found
                    dx = 0
                    dy = 0
                    for f in fs:
                        dx = dx + f.dx  # add all the optical flow detected
                        dy = dy + f.dy

                    dx = (dx / len(fs))  # Taking average
                    dy = (dy / len(fs))

                    prev = current
                    sleep(0.01)
                    current.save(d)

                    if dy > 2 or dy < -2:
                        vol = int(m.getvolume()[0])  # getting master volume
                        if dy < 0:
                            vol = vol + (-dy * 3)
                        else:
                            vol = vol + (-dy * 3)
                        if vol > 100:
                            vol = 100
                        elif vol < 0:
                            vol = 0
                        print vol
                        m.setvolume(int(vol))  # setting master volume

                    if dx > 3:
                        cur_t = time()
                        if cur_t > 5 + prev_t:  # adding some time delay
                            self.play("next")  # changing next
                            prev_t = cur_t

                    if dx < -3:
                        cur_t = time()
                        if cur_t > 5 + prev_t:
                            prev_t = cur_t
                        self.play("previous")  # changing previous
Пример #13
0
def calibrate():
    winsize = (640, 480)
    display = Display(winsize)
    bg_img = get_image()
    bg_img.save(display)
    while not display.isDone():
        img = get_image()
        img.save(display)
        if display.mouseLeft:
            return img.getPixel(display.mouseX, display.mouseY), bg_img, img
Пример #14
0
def calibrate():
    winsize = (640, 480)
    display = Display(winsize)
    bg_img = get_image()
    bg_img.save(display)
    while not display.isDone():
        img = get_image()
        img.save(display)
        if display.mouseLeft:
            return img.getPixel(display.mouseX, display.mouseY), bg_img, img
Пример #15
0
def interactiveTranslation():
	cam = Camera()
	disp = Display()
	current = " "
	while disp.isNotDone():
		image = cam.getImage()
		if disp.mouseLeft: break
		if disp.mouseRight:
			text = image.readText()
			text = cleanText(text)
			translated = trans.translate(text, langpair)
			if translated: current = translated
		image.drawText(current, 0, 0, color=Color.BLACK, fontsize=40)
		image.save(disp)
Пример #16
0
    def recordVideo(self, cb, topic, length=5):
        global BUFFER_NAME

        BUFFER_NAME = topic + '_' + time.strftime("%Y_%m_%d_%H_%M_%S") + '.avi'
        vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True)
        self.disp = Display((self.width, self.height))
        cam = Camera(1, prop_set={"width":self.width,"height":self.height})

        while self.continueRecord:
            gen = (i for i in range(0, 30 * length) if self.continueRecord)
            for i in gen:
                img = cam.getImage()
                vs.writeFrame(img)
                img.save(self.disp)
            self.continueRecord = False
        print "Broke capture loop"
        self.disp.quit()

        print "Saving video"

        # This is to run this process asynchronously - we will skip that
        # self.makefilmProcess = Process(target=saveFilmToDisk, args=(BUFFER_NAME, self.videoTitle))
        # self.makefilmProcess.start()

        # Callback function
        cb()
Пример #17
0
def main(cameraNumber, camWidth, camHeight):

    img = None

    # create a display with size (width, height)
    disp = Display((camWidth, camHeight))

    # Initialize Camera
    cam = Camera(cameraNumber,
                 prop_set={
                     "width": camWidth,
                     "height": camHeight
                 })

    prev = cam.getImage()

    while 1:
        # Finally let's started
        # KISS: just get the image... don't get fancy

        img = cam.getImage()

        diff = img - prev

        diff.show()

        prev = img
Пример #18
0
class ThreadingObject(object):
    """ Threading object class

    The run() method will be started and it will run in the background
    until the application exits.
    """
 
    def __init__(self, interval=1, video=False):
        """ Constructor

        :type interval: int
        :param interval: Check interval, in seconds
        """
        self.interval = interval
        self.url = "http://192.168.10.222:1201/videostream.cgi?user=admin&pwd="
        self.ipCam = JpegStreamCamera(self.url)
        self.display = Display()

        thread = threading.Thread(target=self.run, args=(video,))
        thread.daemon = True                            # Daemonize thread
        thread.start()                                  # Start the execution
 
    def run(self, video):
        """ Method that runs forever """
        while not self.display.isDone():
            if video:
                imagen = self.ipCam.live()
            else:
                imagen = self.ipCam.getImage().show()
            time.sleep(self.interval)
        imagen.quit()
Пример #19
0
    def __init__(self, size=(720, 540)):
        self._layers = {
                # Base layers
                'raw': None,
                'threshY': None,
                'threshB': None,
                'threshR': None,

                # Overlay layers
                'yellow': None,
                'blue': None,
                'ball' : None,
                }

        # These layers are drawn regardless of the current layerset
        self._persistentLayers = {
                'mouse': None
        }

        self._currentLayerset = self.layersets['default']
        self._display = Display(size)
        self._eventHandler = Gui.EventHandler()
        self._lastMouseState = 0
        self._showMouse = True
        self._lastFrame = None
        self._lastFrameTime = time.time()
Пример #20
0
def ajusteFoto(filename,brillo=50,resolucion=(1024,768),modoExposicion='auto'):
	""" Va tomando fotos en un proceso de ensayo y error supervisado por el 
	usuario , hasta que se toma la adecuada y el metodo 
	devuelve el objeto imagen """
	
	disp = Display(resolucion)
	
	try:
		while not disp.isDone():
			
				img = tomaFoto(filename,brillo,resolucion,modoExposicion,altaVelocidad=False)
				img.save(disp)
	except:
		pass
		
	return img
Пример #21
0
class GUIManager:

  def init(self):
    self.dis = Display(title='FRC Team 3341 Targeting')  # creates a window
    
  def setImage(self, image):
    self.img = image
    
  def setImageText(self, imageText):
    self.img.drawtext(imagetext, self.img.height/2, self.img.width/2)  
    # the height and width may need to be switched around
    # it could also be that the image is bigger than the display
    
  def setFeatures(self, blobs=None, ls=None):  #draws the features like blobs and Ls
    if self.blobs:
      for b in blobs:
        b.draw()

    if ls:
        for l in ls:
          l.draw()  # the L draw function draws the Ls upside down for some reason
    
  def show(self):  
    # I took out the isFile parameter because a video is not displayed differently than an image in simplecv
    # the thing where it waits for a key to be pressed was taken out too because it made the program crash
    self.img.save(self.dis)
    
  def disIsNotDone(self):
    # this is used to find if the window has been exited out of
    return self.dis.isNotDone()
Пример #22
0
    def recordVideo(self, length=5):
        BUFFER_NAME = 'buffer.avi'
        vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True)
        self.disp = Display((self.width, self.height))
        cam = Camera(prop_set={"width":self.width,"height":self.height})

        while self.continueRecord:
            gen = (i for i in range(0, 30 * length) if self.continueRecord)
            for i in gen:
                img = cam.getImage()
                vs.writeFrame(img)
                img.save(self.disp)
            self.continueRecord = False
        print "Broke capture loop"
        self.disp.quit()

        print "Saving video"
Пример #23
0
class VideoModule:
    videoTitle =  time.strftime("%Y_%m_%d_%H_%M_%S_")

    continueRecord = True

    width = 300
    height = 300

    makefilmProcess = Process()

    disp = 0

    def getVideoTitle(self):
        return self.videoTitle

    def getVideoDisplay(self):
        return self.disp

    def recordVideo(self, length=5):
        BUFFER_NAME = 'buffer.avi'
        vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True)
        self.disp = Display((self.width, self.height))
        cam = Camera(prop_set={"width":self.width,"height":self.height})

        while self.continueRecord:
            gen = (i for i in range(0, 30 * length) if self.continueRecord)
            for i in gen:
                img = cam.getImage()
                vs.writeFrame(img)
                img.save(self.disp)
            self.continueRecord = False
        print "Broke capture loop"
        self.disp.quit()

        print "Saving video"

        # This is to run this process asynchronously - we will skip that
        # self.makefilmProcess = Process(target=saveFilmToDisk, args=(BUFFER_NAME, self.videoTitle))
        # self.makefilmProcess.start()

    def endCapture(self):
        self.continueRecord = False
        print "Set variable to false"

    def __init__(self, appendTitle):
        self.videoTitle += appendTitle + ".mp4"
Пример #24
0
    def __init__(self, size=(720, 540)):

        self._current_layer_set = self._layer_sets['default']
        self._display = Display(size)
        self._event_handler = Gui.EventHandler()
        self._last_mouse_state = 0
        self._show_mouse = True
        self._last_frame = None
        self._last_frame_time = time.time()
Пример #25
0
def Run(cmdPipe):
    steadyStateFPS = 10
    desiredBuffer = 60*60 #1 minute * 60 seconds
    numberOfFrames = steadyStateFPS*desiredBuffer;
    fmt = '%Y-%m-%d %H:%M:%S'

    disp = Display()

    filelist = []
    frameCounter = 101
    sleepTime = .1

    while disp.isNotDone():
        # check command
        if cmdPipe.poll():
            cmd = cmdPipe.recv()
            if cmd=='shutdown':
                print('player', 0, "Shutting down.")
                break

        if frameCounter > 100 or len(filelist) == 0:
            frameCounter = 0
            filelist = glob("images/*.jpg")
            if len(filelist)>numberOfFrames:
                sleepTime = 1.0/steadyStateFPS
                print("player", 0, "number of frames in buffer="+str(len(filelist))+" desired="+str(numberOfFrames)+" setting sleeptime to "+str(sleepTime))
            else:
                sleepTime = (1.0/steadyStateFPS)+.01
                print("player", 0, "number of frames in buffer="+str(len(filelist))+" desired="+str(numberOfFrames)+" setting sleeptime to "+str(sleepTime))


        filename = filelist.pop(0)
        img = Image(filename)
        matchObj = re.search(r'[0-9- :]+', filename)

        d1_ts = time.mktime(datetime.strptime(matchObj.group(), fmt).timetuple())
        d2_ts = time.mktime(datetime.utcnow().timetuple())
        offset = int(d1_ts-d2_ts)/60
        img.drawText(str(offset),  x=600, y=470)
        img.save(disp)
        os.remove(filename)
        frameCounter = frameCounter+1
        time.sleep(sleepTime)
Пример #26
0
def main():
    global ON_CIRCLE

    colour = Color.RED
    cam  = Camera()
    disp = Display()
    obj_x = 150
    obj_y = 75
    radius = 25
    normaldisplay = True
    while disp.isNotDone():
        if disp.mouseRight:
            normaldisplay = not(normaldisplay)
            print "Display Mode:", "Normal" if normaldisplay else "Segmented"

        img = cam.getImage()
        img = img.scale(0.5).flipHorizontal()
        dist = img.colorDistance(Color.BLACK).dilate(2)
        img.dl().circle((obj_x, obj_y), radius, colour, filled = True)
        segmented = dist.stretch(200,255)
        palm = img.findHaarFeatures('/home/malithsen/downloads/palm.xml')
        fist = img.findHaarFeatures('/home/malithsen/downloads/agest.xml')
        if palm:
            # palm = palm.sortArea()
            palm = palm[-1]
            colour = parm_on_obj(obj_x, obj_y, radius, palm)
            palm.draw()
        elif fist:
            # fist = fist.sortArea()
            fist = fist[-1]
            fist.draw()
            if ON_CIRCLE:
                colour = Color.GREEN
                obj_x, obj_y = fist.x, fist.y
        if normaldisplay:
            img.show()
        else:
            segmented.show()
Пример #27
0
    def __init__(self, interval=1, video=False):
        """ Constructor

        :type interval: int
        :param interval: Check interval, in seconds
        """
        self.interval = interval
        self.url = "http://192.168.10.222:1201/videostream.cgi?user=admin&pwd="
        self.ipCam = JpegStreamCamera(self.url)
        self.display = Display()

        thread = threading.Thread(target=self.run, args=(video,))
        thread.daemon = True                            # Daemonize thread
        thread.start()                                  # Start the execution
Пример #28
0
def getImage(video):
    url = "http://192.168.10.222:1201/videostream.cgi?user=admin&pwd="
    ipCam = JpegStreamCamera(url)
    display = Display()
    if video==False:
        imagen = ipCam.getImage().show()
        while not display.isDone():
            pass

    else:
        while not display.isDone():

            imagen = ipCam.getImage()
            #faces = imagen.findHaarFeatures('face')
            #if faces is not None:
            #    faces = faces.sortArea()
            #    bigFace = faces[-1]
                # Draw a green box around the face
            #    bigFace.draw()
            #imagen = ipCam.live()
            imagen.save(display)

    imagen.quit()
Пример #29
0
def record(filename):
    from SimpleCV import Camera, Display
    import time
    neg_dir = "rawdata/%s" % filename
    if not os.path.exists(neg_dir):
        os.makedirs(neg_dir)
    cam = Camera()
    dis = Display()
    time.sleep(2)
    targetFps = 15.0
    fps = targetFps
    sleepTime = 1/targetFps
    start = time.time()
    prevTime = None
    count = 0
    try:
        print "Recording... [keyboard interrupt to quit]"
        while dis.isNotDone():
            img = cam.getImage()
            img = scaleDown(img)
            if fps > targetFps + .5:
                sleepTime += 0.005
            elif fps < targetFps:
                sleepTime = max(sleepTime - 0.005, 0.01)
            if prevTime is not None:
                fps = 1.0 / (time.time() - prevTime)
            prevTime = time.time()
            img.save("%s/%05d.jpg" % (neg_dir, count + 1600))
            count += 1
            img.dl().ezViewText("{0:.3f} fps".format(fps), (0, 0))
            img.save(dis)
            if dis.mouseRight:
                dis.quit()
            time.sleep(sleepTime)

    except KeyboardInterrupt:
        print "Done recording"
Пример #30
0
def main():
    # Get file name
    if len(sys.argv) < 2:
        print "<%s> usage : <%s> <directory name>" %(sys.argv[0], sys.argv[0])
        sys.exit()
    else:
        filename = sys.argv[1]

    config = ConfigParser.RawConfigParser()
    try:
        config.readfp(open("./experiments/" + filename + "/config.ini"))
    except IOError: 
        print "Error: can\'t find file or read data"
        sys.exit()

    # Display
    display = Display()

    # Set of slide images
    slideSet = ImageSet("./experiments/" + filename)

    lineList = config.items('sequence')
    length = len(lineList)
    sleepList = list()

    # Sleep times
    for line in lineList:
        sleepList.append(int(line[1])/1000)

    # Main loop    
    while display.isNotDone():
        for index, img in enumerate(slideSet):
            img.show()
            time.sleep(sleepList[index])

    sys.exit()
Пример #31
0
    def run_frame(self, ti, img):
        """Run the algorithm for one frame

        :param TrackerIn ti: TrackerIn object to send events to
        :return: True if I should be called with the next frame
        """
        img = self.crop_img(img)

        if self.display is None:
            # Consume one frame for the initialization
            self.display = Display(img.size())
            self.prev_img = img
            self.bg_img = None
            self.count = 20
            self.last_time = time.time()
            return True
        elif self.display.isDone():
            return False

        if self.bg_img is None and img:
            self.bg_img = img

        positions = self.find_fingers3(img, self.bg_img)
        if self.count > 0:
            self.bg_img = img
            self.count -= 1
            print "SETTING BG IMAGE"

        di = img  # (bg_img.grayscale() - img.grayscale()).binarize(40)
        for x, y in positions:
            di.dl().circle((int(x), int(y)), 15, color=Color.RED, width=3)
        self.add_positions(ti, positions)

        fps = 1.0 / (time.time() - self.last_time)
        di.dl().ezViewText("{0:.3f} fps".format(fps), (0, 0))
        di.save(self.display)

        self.last_time = time.time()
        self.last_img = True

        if self.display.mouseLeft or self.display.mouseRight:
            self.display.done = True
            return False
        else:
            return True
Пример #32
0
def show_img(img):
    display = Display()
    img.show()

    # Wait for user to close the window or break out of it.
    while display.isNotDone():
        try:
            pass
        except KeyboardInterrupt:
            display.done = True
        if display.mouseRight:
            display.done = True
    display.quit()
Пример #33
0
 def __init__(self, window_size=(640, 480), **kwargs):
     while True:  # Initialize the Camera
         try:
             cam = Camera()
             cam.getImage().flipHorizontal()
         except:
             continue
         else:
             break
     self.cam = cam
     self.image = None
     self.window_size = window_size
     self.display = Display(self.window_size)
     self.__window_center = (
         338, 377)  # (self.window_size[0]/2, self.window_size[1]/2)
     self.__distance = None
     self.__blobs = None
     self.__segmented = None
     self.__circles = None
     self.__scope_layer = None
     self.initialize_scope_layer()
Пример #34
0
from SimpleCV import Camera, Color, Display, Image

cam = Camera()
original_background = Image('weather.png')

disp = Display()

while not disp.isDone():
    img = cam.getImage()
    img = img.flipHorizontal()
    
    bgcolor = img.getPixel(10, 10)
    dist = img.colorDistance(bgcolor)
    mask = dist.binarize(50)
    
    foreground = img - mask

    background = original_background - mask.invert()

    combined = background + foreground

    combined.save(disp)
Пример #35
0
from SimpleCV import Color, Camera, Display, RunningSegmentation
import time

cam = Camera()
rs = RunningSegmentation(0.9, (99, 99, 99))

size = (cam.getImage().size())
disp = Display(size)

# Start the crosshairs in the center of the screen
center = (size[0] / 2, size[1] / 2)

while disp.isNotDone():
    input = cam.getImage()
    # Assume using monitor mounted camera, so flip to create mirror image
    input = input.flipHorizontal()
    rs.addImage(input)  #

    if (rs.isReady()):
        # Get the object that moved
        img = rs.getSegmentedImage(False)  #
        blobs = img.dilate(10).findBlobs()

        # If an object in motion was found
        if (blobs is not None):
            blobs = blobs.sortArea()
            # Update the crosshairs onto the object in motion
            center = (int(blobs[-1].minRectX()), int(blobs[-1].minRectY()))

        # Inside circle
        input.dl().circle(center, 50, Color.BLACK, width=3)  #
Пример #36
0
#!/usr/bin/env python
#
# Released under the BSD license. See LICENSE file for details.
"""
All this example does is find a face and replace it with another image. The
image should auto scale to match the size of the face.
"""
print __doc__

from SimpleCV import Camera, Display, HaarCascade, Image

#initialize the camera
cam = Camera()
# Create the display to show the image
display = Display()

# Load the new face image
troll_face = Image('troll_face.png', sample=True)

# Haar Cascade face detection, only faces
haarcascade = HaarCascade("face")

# Loop forever
while display.isNotDone():
    # Get image, flip it so it looks mirrored, scale to speed things up
    img = cam.getImage().flipHorizontal().scale(0.5)
    # load in trained face file
    faces = img.findHaarFeatures(haarcascade)
    # If there were faces found do something
    if faces:
        face = faces[-1]
from SimpleCV import Color, Camera, Display
import os
import webbrowser

cam = Camera()  #starts the camera
display = Display()

while (display.isNotDone()):

    img = cam.getImage()  #gets image from the camera

    barcode = img.findBarcode()  #finds barcode data from image
    if (barcode is not None):  #if there is some data processed
        barcode = barcode[0]
        result = str(barcode.data)
        print result  #prints result of barcode in python shell
        #Above line launches a browser to link scanned. Returns error if not a URL
        barcode = []  #reset barcode data to empty set
        img.save(display)  #shows the image on the screen
Пример #38
0
    def new_dewarp(self):
        vidpath = self.iVidPath  #get input video path

        # isInROI is deprecated and not used in this program
        def isInROI(x, y, R1, R2, Cx, Cy):
            isInOuter = False
            isInInner = False
            xv = x - Cx
            yv = y - Cy
            rt = (xv * xv) + (yv * yv)
            if (rt < R2 * R2):
                isInOuter = True
                if (rt < R1 * R1):
                    isInInner = True
            return isInOuter and not isInInner

        """ ws = width of input video
            hs = height of input video
            wd = width of destination/output video
            Hd = height of destinaton/output video
          
        """

        def buildMap(Ws, Hs, Wd, Hd, R1, R2, Cx, Cy):
            #the function throws type error, if Wd and Hd are not converted to integers
            Hd = int(Hd)
            Wd = int(Wd)
            map_x = np.zeros((Hd, Wd), np.float32)
            map_y = np.zeros((Hd, Wd), np.float32)
            rMap = np.linspace(R1, R1 + (R2 - R1), Hd)
            thetaMap = np.linspace(0, 0 + float(Wd) * 2.0 * np.pi, Wd)
            sinMap = np.sin(thetaMap)
            cosMap = np.cos(thetaMap)

            for y in xrange(0, int(Hd - 1)):
                map_x[y] = Cx + rMap[y] * sinMap
                map_y[y] = Cy + rMap[y] * cosMap

            return map_x, map_y

        # do the unwarping
        def unwarp(img, xmap, ymap):
            output = cv2.remap(img.getNumpyCv2(), xmap, ymap, cv2.INTER_LINEAR)
            result = Image(output, cv2image=True)
            # return result
            return result

        disp = Display(
            (800, 600))  #initialise a 800x600 simplecv display to show preview
        #disp = Display((1296,972))
        vals = []
        last = (0, 0)
        # Load the video
        vc = VirtualCamera(vidpath, "video")
        # Sometimes there is crud at the begining, buffer it out
        for i in range(0, 10):
            img = vc.getImage()
            img.save(disp)
        # Show the user a frame let them left click the center
        #    of the "donut" and the right inner and outer edge
        # in that order. Press esc to exit the display
        while not disp.isDone():
            test = disp.leftButtonDownPosition()
            if test != last and test is not None:
                last = test
                print "[360fy]------- center = {0}\n".format(last)

                vals.append(test)
        print "[360fy]------- Dewarping video and generating frames using center, offset1, offset2\n"

        Cx = vals[0][0]
        Cy = vals[0][1]
        #print str(Cx) + " " + str(Cy)
        # Inner donut radius
        R1x = vals[1][0]
        R1y = vals[1][1]
        R1 = R1x - Cx
        #print str(R1)
        # outer donut radius
        R2x = vals[2][0]
        R2y = vals[2][1]
        R2 = R2x - Cx
        #print str(R2)
        # our input and output image siZes
        Wd = round(float(max(R1, R2)) * 2.0 * np.pi)
        #Wd = 2.0*((R2+R1)/2)*np.pi
        #Hd = (2.0*((R2+R1)/2)*np.pi) * (90/360)
        Hd = (R2 - R1)
        Ws = img.width
        Hs = img.height
        # build the pixel map, this could be sped up
        print "BUILDING MAP"

        xmap, ymap = buildMap(Ws, Hs, Wd, Hd, R1, R2, Cx, Cy)
        print "MAP DONE"

        result = unwarp(img, xmap, ymap)

        result.save(disp)

        print "[360fy]------- Storing frames into ../temp_data/frames\n"
        i = 0
        while img is not None:
            print bcolors.OKBLUE + "\rFrame Number: {0}".format(
                i) + bcolors.ENDC,

            sys.stdout.flush(
            )  #flushes stdout so that frame numbers print continually without skipping
            #print " percent complete         \r",
            result = unwarp(img, xmap, ymap)
            result.save(disp)
            # Save to file
            fname = "../temp_data/frames/FY{num:06d}.png".format(num=i)
            result.save(fname)

            img = vc.getImage()
            i = i + 1
        print " \n"

        if img is None:
            self.statusText.setText(str("Status: Done"))
            disp.quit()
from SimpleCV import Image,Display,DrawingLayer,Color
from time import sleep

myDisplay = Display()

raspberryImage = Image("test.jpg")

myDrawingLayer = DrawingLayer((raspberryImage.width, raspberryImage.height))
myDrawingLayer.rectangle((50,20),(250,60),filled=True)
myDrawingLayer.setFontSize(45)
myDrawingLayer.text("Raspberries!",(50,20),color=Color.WHITE)
raspberryImage.addDrawingLayer(myDrawingLayer)
raspberryImage.applyLayers()
raspberryImage.save(myDisplay)
while not myDisplay.isDone():
  sleep(0.1)
Пример #40
0
            xS = Cx + r * np.sin(theta)
            yS = Cy + r * np.cos(theta)
            map_x.itemset((y, x), int(xS))
            map_y.itemset((y, x), int(yS))

    return map_x, map_y


# do the unwarping
def unwarp(img, xmap, ymap):
    output = cv2.remap(img.getNumpyCv2(), xmap, ymap, cv2.INTER_LINEAR)
    result = Image(output, cv2image=True)
    return result


disp = Display((800, 600))
vals = []
last = (0, 0)
# Load the video from the rpi
vc = VirtualCamera("video.h264", "video")
# Sometimes there is crud at the begining, buffer it out
for i in range(0, 10):
    img = vc.getImage()
    img.save(disp)
# Show the user a frame let them left click the center
# of the "donut" and the right inner and outer edge
# in that order. Press esc to exit the display
while not disp.isDone():
    test = disp.leftButtonDownPosition()
    if (test != last and test is not None):
        last = test
from SimpleCV import Camera, Display, Image
from time import sleep

myCamera = Camera(prop_set={'width': 320, 'height': 240})
myDisplay = Display(resolution=(320, 240))
stache = Image("mustache-small.bmp")
stacheMask = stache.createBinaryMask(color1=(0, 0, 0), color2=(254, 254, 254))
stacheMask = stacheMask.invert()
while not myDisplay.isDone():
    frame = myCamera.getImage()
    faces = frame.findHaarFeatures('face')
    if faces:
        for face in faces:
            print "Face at: " + str(face.coordinates())
            myFace = face.crop()
            noses = myFace.findHaarFeatures('nose')
            if noses:
                nose = noses.sortArea()[-1]
                print "Nose at: " + str(nose.coordinates())
                xmust = face.points[0][0] + nose.x - (stache.width / 2)
                ymust = face.points[0][1] + nose.y + (stache.height / 3)
                frame = frame.blit(stache, pos=(xmust, ymust), mask=stacheMask)
                frame.save(myDisplay)
    else:
        print "No faces detected."
    sleep(1)
from SimpleCV import Camera, Display
from time import sleep

myCamera = Camera(prop_set={'wdith': 320, 'height': 240})

myDisplay = Display(resolution=(320, 240))

while not myDisplay.isDone():
   myCamera.getImage().save(myDisplay)
   sleep(.1)
Пример #43
0
from SimpleCV import Image, Display, Color, Camera
cam = Camera(0)  # Get the first camera
disp = Display((640, 480))  # Create a 640x480 display
while (disp.isNotDone()):  # While we don't exit the display
    img = cam.getImage().binarize()  # Get an image and make it black and white
    # Draw the text "Hello World" at (40,40) in red.
    img.drawText("Hello World!", 40, 40, fontsize=60, color=Color.RED)
    img.save(disp)  # Save it to the screen
Пример #44
0
def get_bounding_box(keyword, url, filename):
    # get the image
    img = Image(url)

    # resize the image so things aren't so slow, if necessary
    w, h = img.size()
    if w > 1200 or h > 1200:
        maxdim = max(w, h)
        ratio = math.ceil(maxdim/800.0)
        print "   resizing..."
        img = img.resize(w=int(w/ratio), h=int(h/ratio))
    else:
        ratio = 1

    # get the canvas
    disp = Display((800, 800))
    # text overlay
    textlayer = DrawingLayer(img.size())
    textlayer.setFontSize(30)
    cx, cy = 10, 10
    for xoff in range(-2, 3):
        for yoff in range(-2, 3):
            textlayer.text(keyword, (cx + xoff, cy + yoff), color=Color.BLACK)
    textlayer.text(keyword, (cx, cy), color=Color.WHITE)

    # two points to declare a bounding box
    point1 = None
    point2 = None
    while disp.isNotDone():
        cursor = (disp.mouseX, disp.mouseY)
        if disp.leftButtonUp:
            if point1 and point2:
                point1 = None
                point2 = None
            if point1:
                point2 = disp.leftButtonUpPosition()
            else:
                point1 = disp.leftButtonUpPosition()
        bb = None
        if point1 and point2:
            bb = disp.pointsToBoundingBox(point1, point2)
        elif point1 and not point2:
            bb = disp.pointsToBoundingBox(point1, cursor)

        img.clearLayers()
        drawlayer = DrawingLayer(img.size())
        if bb:
            drawlayer.rectangle((bb[0], bb[1]), (bb[2], bb[3]), color=Color.RED)

        # keyboard commands
        if pygame.key.get_pressed()[pygame.K_s]:
            # skip for now
            raise Skip()
        elif pygame.key.get_pressed()[pygame.K_b]:
            # mark it as an invalid picture
            raise BadImage()
        elif pygame.key.get_pressed()[pygame.K_RETURN]:
            if point1 and point2:
                bb = disp.pointsToBoundingBox(scale(ratio, point1), scale(ratio, point2))
                return bb
            elif not point1 and not point2:
                bb = disp.pointsToBoundingBox((0, 0), (w, h))
                return bb


        drawlayer.line((cursor[0], 0), (cursor[0], img.height), color=Color.BLUE)
        drawlayer.line((0, cursor[1]), (img.width, cursor[1]), color=Color.BLUE)
        #drawlayer.circle(cursor, 2, color=Color.BLUE, filled=True)
        img.addDrawingLayer(textlayer)
        img.addDrawingLayer(drawlayer)
        img.save(disp)
Пример #45
0
#!/usr/bin/env python
# Original Author: Patrick Benz / @Pa_trick17
# Check out the video here:
# http://www.youtube.com/watch?v=cAL6u6Q0Xuc
from SimpleCV import Image, Display
import time
 
#webcam-URLs
marktplatz = 'http://www.tuebingen.de/camera/webcam...'
marktgasse = 'http://leuchtengalerie.com/webcam/leu...'
neckarbruecke1 = 'http://www.tagblatt.de/cms_media/webc...'
neckarbruecke2 = 'http://tuebingen-info.de/fileadmin/we...'
 
display = Display((1240, 960))
 
counter = 0
 
while not display.isNotDone():
img1 = Image(marktplatz)
img1 = img1.adaptiveScale((640, 480))
img2 = Image(marktgasse)
img2 = img2.adaptiveScale((640, 480))
img3 = Image(neckarbruecke1)
img3 = img3.adaptiveScale((640, 480))
img4 = Image(neckarbruecke2)
img4 = img4.adaptiveScale((640, 480))
top = img1.sideBySide(img2)
bottom = img3.sideBySide(img4)
combined = top.sideBySide(bottom, side="bottom")
combined.save(display)
combined.save("webcam" +str(counter).zfill(4) +".jpg")
# -*- coding: utf-8 -*-

from SimpleCV import Display, Image, Color

winsize = (640, 480)
display = Display(winsize)
img = Image(winsize)
img.save(display)

while not display.isDone():
    if display.mouseLeft:
        img.dl().circle((display.mouseX, display.mouseY),
                        4,
                        Color.WHITE,
                        filled=True)
        img.save(display)
        img.save("ex7.png")
Пример #47
0
# coding: utf-8

# # Hello World

# In[ ]:

from SimpleCV import Camera,Color,Display,Image
camera = Camera()
disp = Display()
while disp.isNotDone():
    image = camera.getImage()
    image.save(disp)
print("Done")
exit()


# # Detect Yellow Object

# In[1]:

from SimpleCV import Camera,Color,Display,Image
camera = Camera()
disp = Display()
while disp.isNotDone():
    image = camera.getImage()
    yellow = image.colorDistance(Color.YELLOW).binarize(140).invert()
    onlyYellow = image-yellow
    onlyYellow.save(disp)
    
print("Done")
Пример #48
0
'''
This program super imposes the camera onto the television in the picture
'''
print __doc__

from SimpleCV import Camera, Image, Display

tv_original = Image("family_watching_television_1958.jpg", sample=True)

tv_coordinates = [(353, 379), (433, 380), (432, 448), (354, 446)]
tv_mask = Image(tv_original.size()).invert().warp(tv_coordinates)
tv = tv_original - tv_mask

c = Camera()
d = Display(tv.size())

while d.isNotDone():
    bwimage = c.getImage().grayscale().resize(tv.width, tv.height)
    on_tv = tv + bwimage.warp(tv_coordinates)
    on_tv.save(d)
Пример #49
0
	def getDisplay(self):
		self.__setupDisplayProperties()
		return Display(self.fullscreen_size, pygame.RESIZABLE, self.title)
Пример #50
0
from base64 import b64encode

import pygame
import requests
from PIL import (Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont,
                 ImageOps)
from SimpleCV import Image as Image2
from SimpleCV import Camera, Display

# when running this project, pass in the key and secret via command line.
# Little more secure.
REKOGNITION_KEY = sys.argv[1]
REKOGNITION_SECRET = sys.argv[2]
URL = "http://rekognition.com/func/api/"
WEBCAM = Camera(0)
VIDEO_DISPLAY = Display()


def play_video(image_file):
    """
    Get video feed from camera and save frame on mouse click.

    :param image_file: - The image file location to save to.
    :type image_file: str
    """
    while VIDEO_DISPLAY.isNotDone():
        webcam_image = WEBCAM.getImage().scale(800, 450).show()
        if VIDEO_DISPLAY.mouseLeft:
            webcam_image.save(image_file)
            break
    return
Пример #51
0
from SimpleCV import ColorSegmentation, Image, Display, ImageSet
import time

redBlock = Image('redblock.png')
greenBlock = Image('greenblock.png')
blueBlock = Image('blueblock.png')

cs = ColorSegmentation()
cs.addToModel(redBlock)
cs.addToModel(greenBlock)
cs.addToModel(blueBlock)

cards = ImageSet('cards')
card = None

disp = Display((320, 240))

score = 0
isPrimary = False

while (cards or card) and disp.isNotDone():

	if card is None:
		card = cards.pop()
		
		cs.addImage(card)
		res = cs.getSegmentedImage()
		
		color = res.meanColor()
		if ((color[0] < 254) and (color[1] < 254) and (color[2] < 254)):
			isPrimary = True
Пример #52
0
GPIO.setmode(GPIO.BOARD)
GPIO.setup(PULneg, GPIO.OUT)
GPIO.setup(DIRpos, GPIO.OUT)
GPIO.setup(DIRneg, GPIO.OUT)
GPIO.setup(enblPin, GPIO.OUT)

GPIO.output(PULneg, False)
GPIO.output(DIRpos, False)
GPIO.output(DIRneg, False)
GPIO.output(enblPin, True)

########################################################################################################################

# CV Initialization
winsize = (640, 480)
display = Display(winsize)
normaldisplay = True

# SERVO INITIALIZATION
pwm = Adafruit_PCA9685.PCA9685(0x40)  # PCA
servo_initial = 375
circle_x = 0
circle_y = 0
servo_min = 125  # Min pulse length out of 4096
servo_max = 625  # Max pulse length out of 4096

# Set frequency to 60hz, good for servos.
pwm.set_pwm_freq(60)

pwm.set_pwm(0, 0, servo_initial)
time.sleep(0.0166667)
Пример #53
0
cap.set(cv.CV_CAP_PROP_POS_FRAMES, 10010)
params = cv2.SimpleBlobDetector_Params()
params.minDistBetweenBlobs = 1.0
params.filterByInertia = False
params.filterByConvexity = False
params.filterByColor = False
params.filterByCircularity = False
params.filterByArea = True
params.minArea = 5.0
params.maxArea = 200.0
params.minThreshold = 15
params.maxThreshold = 255

b = cv2.SimpleBlobDetector(params)

display = Display()
counter = 0
box_dim = 48

while display.isNotDone():

    # Capture frame-by-frame
    ret, frame = cap.read()
    blob = b.detect(frame)

    fcount = 0
    for beest in blob:

        if fcount > 100:
            continue
        tmpImg = Image(frame, cv2image=True).crop(int(beest.pt[0]),
Пример #54
0
    def turk(self,
             saveOriginal=False,
             disp_size=(800, 600),
             showKeys=True,
             font_size=16,
             color=Color.RED,
             spacing=10):
        """
        **SUMMARY**

        This function does the turning of the data. The method goes through each image,
        applies the preprocessing (which can return multiple images), displays each image
        with an optional display of the key mapping. The user than selects the key that describes
        the class of the image. The image is then post processed and saved to the directory.
        The escape key kills the turking, the space key skips an image.

        **PARAMETERS**

        * *saveOriginal* - if true save the original image versus the preprocessed image.
        * *disp_size* - size of the display to create.
        * *showKeys* - Show the key mapping for the turking. Note that on small images this may not render correctly.
        * *font_size* - the font size for the turking display.
        * *color* - the font color.
        * *spacing* - the spacing between each line of text on the display.

        **RETURNS**

        Nothing but stores each image in the directory. The image sets are also available
        via the getClass method.

        **EXAMPLE**

        >>>> def GetBlobs(img):
        >>>>     blobs = img.findBlobs()
        >>>>     return [b.mMask for b in blobs]

        >>>> def ScaleIng(img):
        >>>>     return img.resize(100,100).invert()

        >>>> turker = TurkingModule(['./data/'],['./turked/'],['apple','banana','cherry'],['a','b','c'],preProcess=GetBlobs,postProcess=ScaleInv]
        >>>> turker.turk()
        >>>> # ~~~ stuff ~~~
        >>>> turker.save('./derp.pkl')

        ** TODO **
        TODO: fix the display so that it renders correctly no matter what the image size.
        TODO: Make it so you can stop and start turking at any given spot in the process
        """
        disp = Display(disp_size)
        bail = False
        for img in self.srcImgs:
            print img.filename
            samples = self.preProcess(img)
            for sample in samples:
                if (showKeys):
                    sample = self._drawControls(sample, font_size, color,
                                                spacing)

                sample.save(disp)
                gotKey = False
                while (not gotKey):
                    keys = disp.checkEvents(True)
                    for k in keys:
                        if k in self.keyMap:
                            if saveOriginal:
                                self._saveIt(img, self.keyMap[k])
                            else:
                                self._saveIt(sample, self.keyMap[k])
                            gotKey = True
                        if k == 'space':
                            gotKey = True  # skip
                        if k == 'escape':
                            return
				p = self.articulaciones.pop()
				img.dl().line(puntoInicial, p, Color.BLUE, width=5)
				img.dl().circle(p, 10, Color.BLUE, width=5)
				img.applyLayers()
				self.angulosHuesos.append(aux.anguloLineaEntreDosPuntos(p, puntoInicial))
				puntoInicial = p	
		
	def depuracion(self):
		self.enDepuracion = True
		print " ---------------------"
		print "Areas: "
		print self.AreaBlobs  
		print "Numero de blobs candidatos por area: "
		print self.numBlobsCandidatosPorArea
		print "Tiempo de tratamiento de imagen: "
 		print self.tiempoTratamiento 
 		print "Numero Articulaciones detectadas: "
 		print len(self.articulaciones) 
		print " ---------------------"
		time.sleep(1)
		
if __name__ == '__main__':
	
	display = Display() 
	imgT = ImagenTratada()
	
	while not display.isDone():
		img = imgT.capturaYTrataLaImagen(150)
		img.save(display)

Пример #56
0
from SimpleCV import ColorSegmentation, Image, Camera, VirtualCamera, Display, Color

# Open reference video
cam = VirtualCamera(
    '../../Recording/Videos/kiteFlying from Zenith Wind Power-jn9RrUCiWKM.mp4',
    'video')
# Select reference image
img = cam.getFrame(50)
modelImage = img.crop(255, 180, 70, 20)
modelImage = Image('../kite_detail.jpg')
ts = []
disp = Display()
for i in range(0, 50):
    img = cam.getImage()
while (disp.isNotDone()):
    img = cam.getImage()
    bb = (255, 180, 70, 20)
    ts = img.track("camshift", ts, modelImage, bb, num_frames=1)
    modelImage = Image('../kite_detail.jpg')
    # now here in first loop iteration since ts is empty,
    # img0 and bb will be considered.
    # New tracking object will be created and added in ts (TrackSet)
    # After first iteration, ts is not empty and hence the previous
    # image frames and bounding box will be taken from ts and img0
    # and bb will be ignored.
    ts.draw()
    ts.drawBB()
    ts.showCoordinates()
    img.show()
Пример #57
0
            except KeyboardInterrupt:
                print "User exit"
                done.value = True

    def cleanup_completely(self):
        map(lambda f: os.remove(os.path.join(self._dir, f)), self.get_list())
        os.removedirs(self._dir)

    def getImage(self):
        return Image(self.get_current_image_path())

    def __del__(self):
        self.kill_mplayer()


if __name__ == "__main__":
    cam = MplayerCamera()
    disp = Display()
    done = False
    while not done:
        try:
            cam.getImage().save(disp)
        except KeyboardInterrupt:
            print "User exit"
            done = True
        except Exception, e:
            print e
            done = True
    cam.kill_mplayer()
    time.sleep(0.1)
Пример #58
0
from SimpleCV import Image, Display
from time import sleep

Display1 = Display()
Image1 = Image("raspberrypi.png")
Image1.save(Display1)
while not Display1.isDone():
    sleep(1)