Exemple #1
0
def control_by_cam():
    scale_amount = (200, 150)
    d = Display(scale_amount)
    cam = Camera(0)
    prev = cam.getImage().flipHorizontal().scale(scale_amount[0],
                                                 scale_amount[1])
    time.sleep(0.5)
    t = 0.5
    buffer = 20
    count = 0
    while d.isNotDone():
        current = cam.getImage().flipHorizontal()
        current = current.scale(scale_amount[0], scale_amount[1])
        if (count < buffer):
            count = count + 1
        else:
            fs = current.findMotion(prev, window=15, method="BM")
            lengthOfFs = len(fs)
            if fs:
                dx = 0
                for f in fs:
                    dx = dx + f.dx
                dx = (dx / lengthOfFs)
                motionStr = movement_check(dx, t)
                current.drawText(motionStr, 10, 10)
        prev = current
        time.sleep(0.01)
        current.save(d)
        return motionStr
Exemple #2
0
def main():

    x = 0
    cam = Camera(prop_set={'width': 640, 'height': 480})
    disp = Display(resolution=(320, 240))
    while disp.isNotDone():
        img = cam.getImage()
        img = img.scale(0.5)
        faces = img.findHaarFeatures("eye.xml")
        #print "not Detected"
        if faces:
            for face in faces:
                face.draw()
                print "eyes Detected"
            # x = 0
        else:

            # x += 1

            print "close eyes"
            #print (x)
            #if x > 10:
            #  print "HOY GISING"

            # return main()
        img.save(disp)
Exemple #3
0
def main(cameraNumber, camWidth, camHeight):

    img = None

    # create a display with size (width, height)
    disp = Display((camWidth, camHeight))

    # Initialize Camera
    cam = Camera(cameraNumber,
                 prop_set={
                     "width": camWidth,
                     "height": camHeight
                 })

    prev = cam.getImage()

    while 1:
        # Finally let's started
        # KISS: just get the image... don't get fancy

        img = cam.getImage()

        diff = img - prev

        diff.show()

        prev = img
Exemple #4
0
    def run(self):
        m = alsaaudio.Mixer()  # defined alsaaudio.Mixer to change volume
        scale = (300, 250)  # increased from (200,150). works well
        d = Display(scale)
        cam = Camera()
        prev = cam.getImage().scale(scale[0], scale[1])
        sleep(0.5)
        buffer = 20
        count = 0
        prev_t = time()  # Note initial time
        while d.isNotDone():
            current = cam.getImage()
            current = current.scale(scale[0], scale[1])
            if (count < buffer):
                count = count + 1
            else:
                fs = current.findMotion(prev, method="LK")  # find motion
                # Tried BM, and LK, LK is better. need to learn more about LK
                if fs:  # if featureset found
                    dx = 0
                    dy = 0
                    for f in fs:
                        dx = dx + f.dx  # add all the optical flow detected
                        dy = dy + f.dy

                    dx = (dx / len(fs))  # Taking average
                    dy = (dy / len(fs))

                    prev = current
                    sleep(0.01)
                    current.save(d)

                    if dy > 2 or dy < -2:
                        vol = int(m.getvolume()[0])  # getting master volume
                        if dy < 0:
                            vol = vol + (-dy * 3)
                        else:
                            vol = vol + (-dy * 3)
                        if vol > 100:
                            vol = 100
                        elif vol < 0:
                            vol = 0
                        print vol
                        m.setvolume(int(vol))  # setting master volume

                    if dx > 3:
                        cur_t = time()
                        if cur_t > 5 + prev_t:  # adding some time delay
                            self.play("next")  # changing next
                            prev_t = cur_t

                    if dx < -3:
                        cur_t = time()
                        if cur_t > 5 + prev_t:
                            prev_t = cur_t
                        self.play("previous")  # changing previous
Exemple #5
0
def calibrate():
    winsize = (640, 480)
    display = Display(winsize)
    bg_img = get_image()
    bg_img.save(display)
    while not display.isDone():
        img = get_image()
        img.save(display)
        if display.mouseLeft:
            return img.getPixel(display.mouseX, display.mouseY), bg_img, img
Exemple #6
0
    def recordVideo(self, length=5):
        BUFFER_NAME = 'buffer.avi'
        vs = VideoStream(fps=24, filename=BUFFER_NAME, framefill=True)
        self.disp = Display((self.width, self.height))
        cam = Camera(prop_set={"width":self.width,"height":self.height})

        while self.continueRecord:
            gen = (i for i in range(0, 30 * length) if self.continueRecord)
            for i in gen:
                img = cam.getImage()
                vs.writeFrame(img)
                img.save(self.disp)
            self.continueRecord = False
        print "Broke capture loop"
        self.disp.quit()

        print "Saving video"
Exemple #7
0
 def __init__(self, window_size=(640, 480), **kwargs):
     while True:  # Initialize the Camera
         try:
             cam = Camera()
             cam.getImage().flipHorizontal()
         except:
             continue
         else:
             break
     self.cam = cam
     self.image = None
     self.window_size = window_size
     self.display = Display(self.window_size)
     self.__window_center = (
         338, 377)  # (self.window_size[0]/2, self.window_size[1]/2)
     self.__distance = None
     self.__blobs = None
     self.__segmented = None
     self.__circles = None
     self.__scope_layer = None
     self.initialize_scope_layer()
Exemple #8
0
from SimpleCV import Image, Display
from time import sleep

Display1 = Display()
Image1 = Image("raspberrypi.png")
Image1.save(Display1)
while not Display1.isDone():
    sleep(1)
Exemple #9
0
    def turk(self,
             saveOriginal=False,
             disp_size=(800, 600),
             showKeys=True,
             font_size=16,
             color=Color.RED,
             spacing=10):
        """
        **SUMMARY**

        This function does the turning of the data. The method goes through each image,
        applies the preprocessing (which can return multiple images), displays each image
        with an optional display of the key mapping. The user than selects the key that describes
        the class of the image. The image is then post processed and saved to the directory.
        The escape key kills the turking, the space key skips an image.

        **PARAMETERS**

        * *saveOriginal* - if true save the original image versus the preprocessed image.
        * *disp_size* - size of the display to create.
        * *showKeys* - Show the key mapping for the turking. Note that on small images this may not render correctly.
        * *font_size* - the font size for the turking display.
        * *color* - the font color.
        * *spacing* - the spacing between each line of text on the display.

        **RETURNS**

        Nothing but stores each image in the directory. The image sets are also available
        via the getClass method.

        **EXAMPLE**

        >>>> def GetBlobs(img):
        >>>>     blobs = img.findBlobs()
        >>>>     return [b.mMask for b in blobs]

        >>>> def ScaleIng(img):
        >>>>     return img.resize(100,100).invert()

        >>>> turker = TurkingModule(['./data/'],['./turked/'],['apple','banana','cherry'],['a','b','c'],preProcess=GetBlobs,postProcess=ScaleInv]
        >>>> turker.turk()
        >>>> # ~~~ stuff ~~~
        >>>> turker.save('./derp.pkl')

        ** TODO **
        TODO: fix the display so that it renders correctly no matter what the image size.
        TODO: Make it so you can stop and start turking at any given spot in the process
        """
        disp = Display(disp_size)
        bail = False
        for img in self.srcImgs:
            print img.filename
            samples = self.preProcess(img)
            for sample in samples:
                if (showKeys):
                    sample = self._drawControls(sample, font_size, color,
                                                spacing)

                sample.save(disp)
                gotKey = False
                while (not gotKey):
                    keys = disp.checkEvents(True)
                    for k in keys:
                        if k in self.keyMap:
                            if saveOriginal:
                                self._saveIt(img, self.keyMap[k])
                            else:
                                self._saveIt(sample, self.keyMap[k])
                            gotKey = True
                        if k == 'space':
                            gotKey = True  # skip
                        if k == 'escape':
                            return
from SimpleCV import Image,Display,DrawingLayer,Color
from time import sleep

myDisplay = Display()

raspberryImage = Image("test.jpg")

myDrawingLayer = DrawingLayer((raspberryImage.width, raspberryImage.height))
myDrawingLayer.rectangle((50,20),(250,60),filled=True)
myDrawingLayer.setFontSize(45)
myDrawingLayer.text("Raspberries!",(50,20),color=Color.WHITE)
raspberryImage.addDrawingLayer(myDrawingLayer)
raspberryImage.applyLayers()
raspberryImage.save(myDisplay)
while not myDisplay.isDone():
  sleep(0.1)
Exemple #11
0
def get_bounding_box(keyword, url, filename):
    # get the image
    img = Image(url)

    # resize the image so things aren't so slow, if necessary
    w, h = img.size()
    if w > 1200 or h > 1200:
        maxdim = max(w, h)
        ratio = math.ceil(maxdim/800.0)
        print "   resizing..."
        img = img.resize(w=int(w/ratio), h=int(h/ratio))
    else:
        ratio = 1

    # get the canvas
    disp = Display((800, 800))
    # text overlay
    textlayer = DrawingLayer(img.size())
    textlayer.setFontSize(30)
    cx, cy = 10, 10
    for xoff in range(-2, 3):
        for yoff in range(-2, 3):
            textlayer.text(keyword, (cx + xoff, cy + yoff), color=Color.BLACK)
    textlayer.text(keyword, (cx, cy), color=Color.WHITE)

    # two points to declare a bounding box
    point1 = None
    point2 = None
    while disp.isNotDone():
        cursor = (disp.mouseX, disp.mouseY)
        if disp.leftButtonUp:
            if point1 and point2:
                point1 = None
                point2 = None
            if point1:
                point2 = disp.leftButtonUpPosition()
            else:
                point1 = disp.leftButtonUpPosition()
        bb = None
        if point1 and point2:
            bb = disp.pointsToBoundingBox(point1, point2)
        elif point1 and not point2:
            bb = disp.pointsToBoundingBox(point1, cursor)

        img.clearLayers()
        drawlayer = DrawingLayer(img.size())
        if bb:
            drawlayer.rectangle((bb[0], bb[1]), (bb[2], bb[3]), color=Color.RED)

        # keyboard commands
        if pygame.key.get_pressed()[pygame.K_s]:
            # skip for now
            raise Skip()
        elif pygame.key.get_pressed()[pygame.K_b]:
            # mark it as an invalid picture
            raise BadImage()
        elif pygame.key.get_pressed()[pygame.K_RETURN]:
            if point1 and point2:
                bb = disp.pointsToBoundingBox(scale(ratio, point1), scale(ratio, point2))
                return bb
            elif not point1 and not point2:
                bb = disp.pointsToBoundingBox((0, 0), (w, h))
                return bb


        drawlayer.line((cursor[0], 0), (cursor[0], img.height), color=Color.BLUE)
        drawlayer.line((0, cursor[1]), (img.width, cursor[1]), color=Color.BLUE)
        #drawlayer.circle(cursor, 2, color=Color.BLUE, filled=True)
        img.addDrawingLayer(textlayer)
        img.addDrawingLayer(drawlayer)
        img.save(disp)
Exemple #12
0
GPIO.setmode(GPIO.BOARD)
GPIO.setup(PULneg, GPIO.OUT)
GPIO.setup(DIRpos, GPIO.OUT)
GPIO.setup(DIRneg, GPIO.OUT)
GPIO.setup(enblPin, GPIO.OUT)

GPIO.output(PULneg, False)
GPIO.output(DIRpos, False)
GPIO.output(DIRneg, False)
GPIO.output(enblPin, True)

########################################################################################################################

# CV Initialization
winsize = (640, 480)
display = Display(winsize)
normaldisplay = True

# SERVO INITIALIZATION
pwm = Adafruit_PCA9685.PCA9685(0x40)  # PCA
servo_initial = 375
circle_x = 0
circle_y = 0
servo_min = 125  # Min pulse length out of 4096
servo_max = 625  # Max pulse length out of 4096

# Set frequency to 60hz, good for servos.
pwm.set_pwm_freq(60)

pwm.set_pwm(0, 0, servo_initial)
time.sleep(0.0166667)
Exemple #13
0
	def getDisplay(self):
		self.__setupDisplayProperties()
		return Display(self.fullscreen_size, pygame.RESIZABLE, self.title)
Exemple #14
0
'''
This program super imposes the camera onto the television in the picture
'''
print __doc__

from SimpleCV import Camera, Image, Display

tv_original = Image("family_watching_television_1958.jpg", sample=True)

tv_coordinates = [(353, 379), (433, 380), (432, 448), (354, 446)]
tv_mask = Image(tv_original.size()).invert().warp(tv_coordinates)
tv = tv_original - tv_mask

c = Camera()
d = Display(tv.size())

while d.isNotDone():
    bwimage = c.getImage().grayscale().resize(tv.width, tv.height)
    on_tv = tv + bwimage.warp(tv_coordinates)
    on_tv.save(d)
Exemple #15
0
            xS = Cx + r * np.sin(theta)
            yS = Cy + r * np.cos(theta)
            map_x.itemset((y, x), int(xS))
            map_y.itemset((y, x), int(yS))

    return map_x, map_y


# do the unwarping
def unwarp(img, xmap, ymap):
    output = cv2.remap(img.getNumpyCv2(), xmap, ymap, cv2.INTER_LINEAR)
    result = Image(output, cv2image=True)
    return result


disp = Display((800, 600))
vals = []
last = (0, 0)
# Load the video from the rpi
vc = VirtualCamera("video.h264", "video")
# Sometimes there is crud at the begining, buffer it out
for i in range(0, 10):
    img = vc.getImage()
    img.save(disp)
# Show the user a frame let them left click the center
# of the "donut" and the right inner and outer edge
# in that order. Press esc to exit the display
while not disp.isDone():
    test = disp.leftButtonDownPosition()
    if (test != last and test is not None):
        last = test
Exemple #16
0
    def track(self):
        print "Press right mouse button to pause or play"
        print "Use left mouse button to select target"
        print "Target color must be different from background"
        print "Target must have width larger than height"
        print "Target can be upside down"

        #Parameters
        isUDPConnection = False  # Currently switched manually in the code
        display = True
        displayDebug = True
        useBasemap = False
        maxRelativeMotionPerFrame = 2  # How much the target can moved between two succesive frames
        pixelPerRadians = 320
        radius = pixelPerRadians
        referenceImage = '../ObjectTracking/kite_detail.jpg'
        scaleFactor = 0.5
        isVirtualCamera = True
        useHDF5 = False

        # Open reference image: this is used at initlalisation
        target_detail = Image(referenceImage)

        # Get RGB color palette of target (was found to work better than using hue)
        pal = target_detail.getPalette(bins=2, hue=False)

        # Open video to analyse or live stream
        #cam = JpegStreamCamera('http://192.168.1.29:8080/videofeed')#640 * 480
        if isVirtualCamera:
            #cam = VirtualCamera('../../zenith-wind-power-read-only/KiteControl-Qt/videos/kiteFlying.avi','video')
            #cam = VirtualCamera('/media/bat/DATA/Baptiste/Nautilab/kite_project/robokite/ObjectTracking/00095.MTS', 'video')
            #cam = VirtualCamera('output.avi', 'video')
            cam = VirtualCamera(
                '../Recording/Videos/Flying kite images (for kite steering unit development)-YTMgX1bvrTo.mp4',
                'video')
            virtualCameraFPS = 25
        else:
            cam = JpegStreamCamera(
                'http://192.168.43.1:8080/videofeed')  #640 * 480
            #cam = Camera()

        # Get a sample image to initialize the display at the same size
        img = cam.getImage().scale(scaleFactor)
        print img.width, img.height
        # Create a pygame display
        if display:
            if img.width > img.height:
                disp = Display(
                    (27 * 640 / 10, 25 * 400 / 10)
                )  #(int(2*img.width/scaleFactor), int(2*img.height/scaleFactor)))
            else:
                disp = Display((810, 1080))
        #js = JpegStreamer()

        # Initialize variables
        previous_angle = 0  # target has to be upright when starting. Target width has to be larger than target heigth.
        previous_coord_px = (
            0, 0)  # Initialized to top left corner, which always exists
        previous_dCoord = previous_coord_px
        previous_dAngle = previous_angle
        angles = []
        coords_px = []
        coord_px = [0, 0]
        angle = 0
        target_elevations = []
        target_bearings = []
        times = []
        wasTargetFoundInPreviousFrame = False
        i_frame = 0
        isPaused = False
        selectionInProgress = False
        th = [100, 100, 100]
        skycolor = Color.BLUE
        timeLastTarget = 0

        # Prepare recording
        recordFilename = datetime.datetime.utcnow().strftime(
            "%Y%m%d_%Hh%M_") + 'simpleTrack'
        if useHDF5:
            try:
                os.remove(recordFilename + '.hdf5')
            except:
                print('Creating file ' + recordFilename + '.hdf5')
            """ The following line is used to silence the following error (according to http://stackoverflow.com/questions/15117128/h5py-in-memory-file-and-multiprocessing-error)
    #000: ../../../src/H5F.c line 1526 in H5Fopen(): unable to open file
    major: File accessability
    minor: Unable to open file"""
            h5py._errors.silence_errors()
            recordFile = h5py.File(
                os.path.join(os.getcwd(), 'log', recordFilename + '.hdf5'),
                'a')
            hdfSize = 0
            dset = recordFile.create_dataset('kite', (2, 2),
                                             maxshape=(None, 7))
            imset = recordFile.create_dataset('image',
                                              (2, img.width, img.height, 3),
                                              maxshape=(None, img.width,
                                                        img.height, 3))
        else:
            try:
                os.remove(recordFilename + '.csv')
            except:
                print('Creating file ' + recordFilename + '.csv')
            recordFile = file(
                os.path.join(os.getcwd(), 'log', recordFilename + '.csv'), 'a')
            csv_writer = csv.writer(recordFile)
            csv_writer.writerow([
                'Time (s)', 'x (px)', 'y (px)', 'Orientation (rad)',
                'Elevation (rad)', 'Bearing (rad)', 'ROT (rad/s)'
            ])

        # Launch a thread to get UDP message with orientation of the camera
        mobile = mobileState.mobileState()
        if isUDPConnection:
            mobile.open()
        # Loop while not canceled by user
        t0 = time.time()
        previousTime = t0
        while not (display) or disp.isNotDone():
            t = time.time()
            deltaT = (t - previousTime)
            FPS = 1.0 / deltaT
            #print 'FPS =', FPS
            if isVirtualCamera:
                deltaT = 1.0 / virtualCameraFPS
            previousTime = t
            i_frame = i_frame + 1
            timestamp = datetime.datetime.utcnow()

            # Receive orientation of the camera
            if isUDPConnection:
                mobile.computeRPY([2, 0, 1], [-1, 1, 1])
            ctm = np.array([[sp.cos(mobile.roll), -sp.sin(mobile.roll)], \
                    [sp.sin(mobile.roll), sp.cos(mobile.roll)]]) # Coordinate transform matrix

            if useBasemap:
                # Warning this really slows down the computation
                m = Basemap(width=img.width,
                            height=img.height,
                            projection='aeqd',
                            lat_0=sp.rad2deg(mobile.pitch),
                            lon_0=sp.rad2deg(mobile.yaw),
                            rsphere=radius)

            # Get an image from camera
            if not isPaused:
                img = cam.getImage()
                img = img.resize(int(scaleFactor * img.width),
                                 int(scaleFactor * img.height))

            if display:
                # Pause image when right button is pressed
                dwn = disp.rightButtonDownPosition()
                if dwn is not None:
                    isPaused = not (isPaused)
                    dwn = None

            if display:
                # Create a layer to enable user to make a selection of the target
                selectionLayer = DrawingLayer((img.width, img.height))

            if img:
                if display:
                    # Create a new layer to host information retrieved from video
                    layer = DrawingLayer((img.width, img.height))
                    # Selection is a rectangle drawn while holding mouse left button down
                    if disp.leftButtonDown:
                        corner1 = (disp.mouseX, disp.mouseY)
                        selectionInProgress = True
                    if selectionInProgress:
                        corner2 = (disp.mouseX, disp.mouseY)
                        bb = disp.pointsToBoundingBox(
                            corner1,
                            corner2)  # Display the temporary selection
                        if disp.leftButtonUp:  # User has finished is selection
                            selectionInProgress = False
                            selection = img.crop(bb[0], bb[1], bb[2], bb[3])
                            if selection != None:
                                # The 3 main colors in the area selected are considered.
                                # Note that the selection should be included in the target and not contain background
                                try:
                                    selection.save('../ObjectTracking/' +
                                                   'kite_detail_tmp.jpg')
                                    img0 = Image(
                                        "kite_detail_tmp.jpg"
                                    )  # For unknown reason I have to reload the image...
                                    pal = img0.getPalette(bins=2, hue=False)
                                except:  # getPalette is sometimes bugging and raising LinalgError because matrix not positive definite
                                    pal = pal
                                wasTargetFoundInPreviousFrame = False
                                previous_coord_px = (bb[0] + bb[2] / 2,
                                                     bb[1] + bb[3] / 2)
                        if corner1 != corner2:
                            selectionLayer.rectangle((bb[0], bb[1]),
                                                     (bb[2], bb[3]),
                                                     width=5,
                                                     color=Color.YELLOW)

                # If the target was already found, we can save computation time by
                # reducing the Region Of Interest around predicted position
                if wasTargetFoundInPreviousFrame:
                    ROITopLeftCorner = (max(0, previous_coord_px[0]-maxRelativeMotionPerFrame/2*width), \
                              max(0, previous_coord_px[1] -height*maxRelativeMotionPerFrame/2))
                    ROI = img.crop(ROITopLeftCorner[0], ROITopLeftCorner[1],                          \
                                         maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height, \
                             centered = False)
                    if display:
                        # Draw the rectangle corresponding to the ROI on the complete image
                        layer.rectangle((previous_coord_px[0]-maxRelativeMotionPerFrame/2*width,  \
                                                 previous_coord_px[1]-maxRelativeMotionPerFrame/2*height), \
                                              (maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height), \
                               color = Color.GREEN, width = 2)
                else:
                    # Search on the whole image if no clue of where is the target
                    ROITopLeftCorner = (0, 0)
                    ROI = img
                    '''#Option 1
        target_part0 = ROI.hueDistance(color=(142,50,65)).invert().threshold(150)
        target_part1 = ROI.hueDistance(color=(93,16,28)).invert().threshold(150)
        target_part2 = ROI.hueDistance(color=(223,135,170)).invert().threshold(150)
        target_raw_img = target_part0+target_part1+target_part2
        target_img = target_raw_img.erode(5).dilate(5)

        #Option 2
        target_img = ROI.hueDistance(imgModel.getPixel(10,10)).binarize().invert().erode(2).dilate(2)'''

                    # Find sky color
                sky = (img - img.binarize()).findBlobs(minsize=10000)
                if sky:
                    skycolor = sky[0].meanColor()
                # Option 3
                target_img = ROI - ROI  # Black image

                # Loop through palette of target colors
                if display and displayDebug:
                    decomposition = []
                i_col = 0
                for col in pal:
                    c = tuple([int(col[i]) for i in range(0, 3)])
                    # Search the target based on color
                    ROI.save('../ObjectTracking/' + 'ROI_tmp.jpg')
                    img1 = Image('../ObjectTracking/' + 'ROI_tmp.jpg')
                    filter_img = img1.colorDistance(color=c)
                    h = filter_img.histogram(numbins=256)
                    cs = np.cumsum(h)
                    thmax = np.argmin(
                        abs(cs - 0.02 * img.width * img.height)
                    )  # find the threshold to have 10% of the pixel in the expected color
                    thmin = np.argmin(
                        abs(cs - 0.005 * img.width * img.height)
                    )  # find the threshold to have 10% of the pixel in the expected color
                    if thmin == thmax:
                        newth = thmin
                    else:
                        newth = np.argmin(h[thmin:thmax]) + thmin
                    alpha = 0.5
                    th[i_col] = alpha * th[i_col] + (1 - alpha) * newth
                    filter_img = filter_img.threshold(
                        max(40, min(200, th[i_col]))).invert()
                    target_img = target_img + filter_img
                    #print th
                    i_col = i_col + 1
                    if display and displayDebug:
                        [R, G, B] = filter_img.splitChannels()
                        white = (R - R).invert()
                        r = R * 1.0 / 255 * c[0]
                        g = G * 1.0 / 255 * c[1]
                        b = B * 1.0 / 255 * c[2]
                        tmp = white.mergeChannels(r, g, b)
                        decomposition.append(tmp)

                # Get a black background with with white target foreground
                target_img = target_img.threshold(150)

                target_img = target_img - ROI.colorDistance(
                    color=skycolor).threshold(80).invert()

                if display and displayDebug:
                    small_ini = target_img.resize(
                        int(img.width / (len(pal) + 1)),
                        int(img.height / (len(pal) + 1)))
                    for tmp in decomposition:
                        small_ini = small_ini.sideBySide(tmp.resize(
                            int(img.width / (len(pal) + 1)),
                            int(img.height / (len(pal) + 1))),
                                                         side='bottom')
                    small_ini = small_ini.adaptiveScale(
                        (int(img.width), int(img.height)))
                    toDisplay = img.sideBySide(small_ini)
                else:
                    toDisplay = img
                    #target_img = ROI.hueDistance(color = Color.RED).threshold(10).invert()

                # Search for binary large objects representing potential target
                target = target_img.findBlobs(minsize=500)

                if target:  # If a target was found

                    if wasTargetFoundInPreviousFrame:
                        predictedTargetPosition = (
                            width * maxRelativeMotionPerFrame / 2,
                            height * maxRelativeMotionPerFrame / 2
                        )  # Target will most likely be close to the center of the ROI
                    else:
                        predictedTargetPosition = previous_coord_px
                        # If there are several targets in the image, take the one which is the closest of the predicted position
                    target = target.sortDistance(predictedTargetPosition)

                    # Get target coordinates according to minimal bounding rectangle or centroid.
                    coordMinRect = ROITopLeftCorner + np.array(
                        (target[0].minRectX(), target[0].minRectY()))
                    coord_px = ROITopLeftCorner + np.array(
                        target[0].centroid())

                    # Rotate the coordinates of roll angle around the middle of the screen
                    rot_coord_px = np.dot(
                        ctm, coord_px -
                        np.array([img.width / 2, img.height / 2])) + np.array(
                            [img.width / 2, img.height / 2])
                    if useBasemap:
                        coord = sp.deg2rad(
                            m(rot_coord_px[0],
                              img.height - rot_coord_px[1],
                              inverse=True))
                    else:
                        coord = localProjection(
                            rot_coord_px[0] - img.width / 2,
                            img.height / 2 - rot_coord_px[1],
                            radius,
                            mobile.yaw,
                            mobile.pitch,
                            inverse=True)
                    target_bearing, target_elevation = coord

                    # Get minimum bounding rectangle for display purpose
                    minR = ROITopLeftCorner + np.array(target[0].minRect())

                    contours = target[0].contour()

                    contours = [
                        ROITopLeftCorner + np.array(contour)
                        for contour in contours
                    ]

                    # Get target features
                    angle = sp.deg2rad(target[0].angle()) + mobile.roll
                    angle = sp.deg2rad(
                        unwrap180(sp.rad2deg(angle),
                                  sp.rad2deg(previous_angle)))
                    width = target[0].width()
                    height = target[0].height()

                    # Check if the kite is upside down
                    # First rotate the kite
                    ctm2 = np.array([[sp.cos(-angle+mobile.roll), -sp.sin(-angle+mobile.roll)], \
                        [sp.sin(-angle+mobile.roll), sp.cos(-angle+mobile.roll)]]) # Coordinate transform matrix
                    rotated_contours = [
                        np.dot(ctm2, contour - coordMinRect)
                        for contour in contours
                    ]
                    y = [-tmp[1] for tmp in rotated_contours]
                    itop = np.argmax(y)  # Then looks at the points at the top
                    ibottom = np.argmin(y)  # and the point at the bottom
                    # The point the most excentered is at the bottom
                    if abs(rotated_contours[itop][0]) > abs(
                            rotated_contours[ibottom][0]):
                        isInverted = True
                    else:
                        isInverted = False

                    if isInverted:
                        angle = angle + sp.pi

                        # Filter the data
                    alpha = 1 - sp.exp(-deltaT / self.filterTimeConstant)
                    if not (isPaused):
                        dCoord = np.array(previous_dCoord) * (
                            1 - alpha) + alpha * (
                                np.array(coord_px) - previous_coord_px
                            )  # related to the speed only if cam is fixed
                        dAngle = np.array(previous_dAngle) * (
                            1 - alpha) + alpha * (np.array(angle) -
                                                  previous_angle)
                    else:
                        dCoord = np.array([0, 0])
                        dAngle = np.array([0])


#print coord_px, angle, width, height, dCoord

# Record important data
                    times.append(timestamp)
                    coords_px.append(coord_px)
                    angles.append(angle)
                    target_elevations.append(target_elevation)
                    target_bearings.append(target_bearing)

                    # Export data to controller
                    self.elevation = target_elevation
                    self.bearing = target_bearing
                    self.orientation = angle
                    dt = time.time() - timeLastTarget
                    self.ROT = dAngle / dt
                    self.lastUpdateTime = t

                    # Save for initialisation of next step
                    previous_dCoord = dCoord
                    previous_angle = angle
                    previous_coord_px = (int(coord_px[0]), int(coord_px[1]))
                    wasTargetFoundInPreviousFrame = True
                    timeLastTarget = time.time()

                else:
                    wasTargetFoundInPreviousFrame = False

                if useHDF5:
                    hdfSize = hdfSize + 1
                    dset.resize((hdfSize, 7))
                    imset.resize((hdfSize, img.width, img.height, 3))
                    dset[hdfSize - 1, :] = [
                        time.time(), coord_px[0], coord_px[1], angle,
                        self.elevation, self.bearing, self.ROT
                    ]
                    imset[hdfSize - 1, :, :, :] = img.getNumpy()
                    recordFile.flush()
                else:
                    csv_writer.writerow([
                        time.time(), coord_px[0], coord_px[1], angle,
                        self.elevation, self.bearing, self.ROT
                    ])

                if display:
                    if target:
                        # Add target features to layer
                        # Minimal rectange and its center in RED
                        layer.polygon(minR[(0, 1, 3, 2), :],
                                      color=Color.RED,
                                      width=5)
                        layer.circle(
                            (int(coordMinRect[0]), int(coordMinRect[1])),
                            10,
                            filled=True,
                            color=Color.RED)

                        # Target contour and centroid in BLUE
                        layer.circle((int(coord_px[0]), int(coord_px[1])),
                                     10,
                                     filled=True,
                                     color=Color.BLUE)
                        layer.polygon(contours, color=Color.BLUE, width=5)

                        # Speed vector in BLACK
                        layer.line((int(coord_px[0]), int(coord_px[1])),
                                   (int(coord_px[0] + 20 * dCoord[0]),
                                    int(coord_px[1] + 20 * dCoord[1])),
                                   width=3)

                        # Line giving angle
                        layer.line((int(coord_px[0] + 200 * sp.cos(angle)),
                                    int(coord_px[1] + 200 * sp.sin(angle))),
                                   (int(coord_px[0] - 200 * sp.cos(angle)),
                                    int(coord_px[1] - 200 * sp.sin(angle))),
                                   color=Color.RED)

                    # Line giving rate of turn
                    #layer.line((int(coord_px[0]+200*sp.cos(angle+dAngle*10)), int(coord_px[1]+200*sp.sin(angle+dAngle*10))), (int(coord_px[0]-200*sp.cos(angle + dAngle*10)), int(coord_px[1]-200*sp.sin(angle+dAngle*10))))

                # Add the layer to the raw image
                    toDisplay.addDrawingLayer(layer)
                    toDisplay.addDrawingLayer(selectionLayer)

                    # Add time metadata
                    toDisplay.drawText(str(i_frame) + " " + str(timestamp),
                                       x=0,
                                       y=0,
                                       fontsize=20)

                    # Add Line giving horizon
                    #layer.line((0, int(img.height/2 + mobile.pitch*pixelPerRadians)),(img.width, int(img.height/2 + mobile.pitch*pixelPerRadians)), width = 3, color = Color.RED)

                    # Plot parallels
                    for lat in range(-90, 90, 15):
                        r = range(0, 361, 10)
                        if useBasemap:
                            # \todo improve for high roll
                            l = m(r, [lat] * len(r))
                            pix = [np.array(l[0]), img.height - np.array(l[1])]
                        else:
                            l = localProjection(sp.deg2rad(r), \
                                    sp.deg2rad([lat]*len(r)), \
                                    radius, \
                                    lon_0 = mobile.yaw, \
                                    lat_0 = mobile.pitch, \
                                    inverse = False)
                            l = np.dot(ctm, l)
                            pix = [
                                np.array(l[0]) + img.width / 2,
                                img.height / 2 - np.array(l[1])
                            ]

                        for i in range(len(r) - 1):
                            if isPixelInImage(
                                (pix[0][i], pix[1][i]), img) or isPixelInImage(
                                    (pix[0][i + 1], pix[1][i + 1]), img):
                                layer.line((pix[0][i], pix[1][i]),
                                           (pix[0][i + 1], pix[1][i + 1]),
                                           color=Color.WHITE,
                                           width=2)

                # Plot meridians
                    for lon in range(0, 360, 15):
                        r = range(-90, 91, 10)
                        if useBasemap:
                            # \todo improve for high roll
                            l = m([lon] * len(r), r)
                            pix = [np.array(l[0]), img.height - np.array(l[1])]
                        else:
                            l= localProjection(sp.deg2rad([lon]*len(r)), \
                                    sp.deg2rad(r), \
                                    radius, \
                                    lon_0 = mobile.yaw, \
                                    lat_0 = mobile.pitch, \
                                    inverse = False)
                            l = np.dot(ctm, l)
                            pix = [
                                np.array(l[0]) + img.width / 2,
                                img.height / 2 - np.array(l[1])
                            ]

                        for i in range(len(r) - 1):
                            if isPixelInImage(
                                (pix[0][i], pix[1][i]), img) or isPixelInImage(
                                    (pix[0][i + 1], pix[1][i + 1]), img):
                                layer.line((pix[0][i], pix[1][i]),
                                           (pix[0][i + 1], pix[1][i + 1]),
                                           color=Color.WHITE,
                                           width=2)

                # Text giving bearing
                # \todo improve for high roll
                    for bearing_deg in range(0, 360, 30):
                        l = localProjection(sp.deg2rad(bearing_deg),
                                            sp.deg2rad(0),
                                            radius,
                                            lon_0=mobile.yaw,
                                            lat_0=mobile.pitch,
                                            inverse=False)
                        l = np.dot(ctm, l)
                        layer.text(
                            str(bearing_deg),
                            (img.width / 2 + int(l[0]), img.height - 20),
                            color=Color.RED)

                # Text giving elevation
                # \todo improve for high roll
                    for elevation_deg in range(-60, 91, 30):
                        l = localProjection(0,
                                            sp.deg2rad(elevation_deg),
                                            radius,
                                            lon_0=mobile.yaw,
                                            lat_0=mobile.pitch,
                                            inverse=False)
                        l = np.dot(ctm, l)
                        layer.text(str(elevation_deg),
                                   (img.width / 2, img.height / 2 - int(l[1])),
                                   color=Color.RED)

                    #toDisplay.save(js)
                    toDisplay.save(disp)
            if display:
                toDisplay.removeDrawingLayer(1)
                toDisplay.removeDrawingLayer(0)
        recordFile.close()
Exemple #17
0
def doface(aa, f1, cc, f2, ee):

    camera = PiCamera()
    #imgg = Image('img1.jpg')
    #disp = Display(imgg.size())
    dsize = (640, 480)
    disp = Display(dsize)
    #drawing = Image('mustache.png')
    #maskk = drawing.createAlphaMask()

    #camera.start_preview()
    #sleep(2)

    #['right_eye.xml', 'lefteye.xml', 'face3.xml', 'glasses.xml',
    # 'right_ear.xml', 'fullbody.xml', 'profile.xml', 'upper_body2.xml',
    # 'face.xml', 'face4.xml', 'two_eyes_big.xml', 'right_eye2.xml',
    # 'left_ear.xml', 'nose.xml', 'upper_body.xml', 'left_eye2.xml',
    # 'two_eyes_small.xml', 'face2.xml', 'eye.xml', 'face_cv2.xml',
    # 'mouth.xml', 'lower_body.xml']

    while disp.isNotDone():
        camera.capture('img2.png')
        img = Image('img2.png')
        img = img.resize(640, 480)
        #whatt = img.listHaarFeatures()
        faces = img.findHaarFeatures('face.xml')
        print 'faces:', faces
        if faces:  #is not None:
            face = faces.sortArea()[-1]
            #print 'size:',face.size
            if aa == 'none':
                break
            elif aa == 'block':
                face.draw()
            else:
                f0draw = aa + '.png'
                draw0 = Image('use/' + f0draw)
                face = face.blit(draw0, pos=(100, 200))
            #bigFace = face[-1]

            myface = face.crop()
            if f1 and cc is not None:
                feature01 = f1 + '.xml'
                f1draw = cc + '.png'
                draw1 = Image('/home/pi/cv/use/' + f1draw)

                feature1s = myface.findHaarFeatures(feature01)
                if feature1s is not None:
                    feature1 = feature1s.sortArea()[-1]
                    xpos1 = face.points[0][0] + feature1.x - (draw1.width / 2)
                    ypos1 = face.points[0][
                        1] + feature1.y  #+ (2*draw1.height/3)
                    #pos = (xmust,ymust)
                    img = img.blit(draw1, pos=(xpos1, ypos1))  #mask=maskk)

            if f2 and ee is not None:
                feature02 = f2 + '.xml'
                f2draw = ee + '.png'
                draw2 = Image('/home/pi/cv/use/' + f2draw)

                feature2s = myface.findHaarFeatures(feature02)
                if feature2s is not None:
                    feature2 = feature2s.sortArea()[-1]
                    xpos2 = face.points[0][0] + feature2.x - (draw2.width / 2)
                    ypos2 = face.points[0][
                        1] + feature2.y  #+ (2*draw2.height/3)
                    #pos = (xmust,ymust)
                    img = img.blit(draw2, pos=(xpos2, ypos2))  #mask=maskk)

            img.save(disp)
        else:
            print 'no face~~'
Exemple #18
0
from SimpleCV import Camera, Display
from time import sleep

myCamera = Camera (prop_set={'width':640, 'height':480})

myDisplay = Display(resolution=(640,480))

while not myDisplay.isDone():
	frame = myCamera.getImage()
	frame = frame.scale(0.5)
	faces = frame.findHaarFeatures("face.xml")
	
    faces = faces.sortArea() 
    faces = faces[-1] #picking the largest face
    faces = img.crop(faces) #crops face from the image

	if faces:
		for face in faces:
			face.draw()

	else:
		print "No Faces detected."

		

	frame.save(myDisplay)
	sleep(.1)
from SimpleCV import Camera, Display, Image
from time import sleep

myCamera = Camera(prop_set={'width': 320, 'height': 240})
myDisplay = Display(resolution=(320, 240))
stache = Image("mustache-small.bmp")
stacheMask = stache.createBinaryMask(color1=(0, 0, 0), color2=(254, 254, 254))
stacheMask = stacheMask.invert()
while not myDisplay.isDone():
    frame = myCamera.getImage()
    faces = frame.findHaarFeatures('face')
    if faces:
        for face in faces:
            print "Face at: " + str(face.coordinates())
            myFace = face.crop()
            noses = myFace.findHaarFeatures('nose')
            if noses:
                nose = noses.sortArea()[-1]
                print "Nose at: " + str(nose.coordinates())
                xmust = face.points[0][0] + nose.x - (stache.width / 2)
                ymust = face.points[0][1] + nose.y + (stache.height / 3)
                frame = frame.blit(stache, pos=(xmust, ymust), mask=stacheMask)
                frame.save(myDisplay)
    else:
        print "No faces detected."
    sleep(1)
Exemple #20
0
from SimpleCV import Display, Camera
import time

#Motion Detection using SimpleCV

cap = Camera()

t0 = cap.getImage()

disp = Display(t0.size())

while not disp.isDone():
    t1 = cap.getImage()
    dist = t1 - t0

    mat = dist.getNumpy()
    mean = mat.mean()

    dist.save(disp)

    if mean >= 3:
        print 'Something is crawling'

    time.sleep(0.5)
    t0 = t1
Exemple #21
0
from base64 import b64encode

import pygame
import requests
from PIL import (Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont,
                 ImageOps)
from SimpleCV import Image as Image2
from SimpleCV import Camera, Display

# when running this project, pass in the key and secret via command line.
# Little more secure.
REKOGNITION_KEY = sys.argv[1]
REKOGNITION_SECRET = sys.argv[2]
URL = "http://rekognition.com/func/api/"
WEBCAM = Camera(0)
VIDEO_DISPLAY = Display()


def play_video(image_file):
    """
    Get video feed from camera and save frame on mouse click.

    :param image_file: - The image file location to save to.
    :type image_file: str
    """
    while VIDEO_DISPLAY.isNotDone():
        webcam_image = WEBCAM.getImage().scale(800, 450).show()
        if VIDEO_DISPLAY.mouseLeft:
            webcam_image.save(image_file)
            break
    return
Exemple #22
0
from SimpleCV import Camera, Display
import time

cam = Camera()

disp = Display(cam.getImage().size())

while disp.isNotDone():
    img = cam.getImage()

    # Look for a face
    faces = img.findHaarFeatures('face')

    if faces is not None:
        # Get the largest face
        faces = faces.sortArea()
        biggestFace = faces[-1]

        # Draw a green box around the face
        biggestFace.draw()

        #print bigFace

        noses = img.findHaarFeatures('nose')
        if noses is not None:
            noses = noses.sortArea()
            theNose = noses[-1]

        #	theNose.draw()

        eyes = img.findHaarFeatures('eye')
from SimpleCV import Color, Camera, Display, RunningSegmentation
import time

cam = Camera()
rs = RunningSegmentation(0.9, (99, 99, 99))

size = (cam.getImage().size())
disp = Display(size)

# Start the crosshairs in the center of the screen
center = (size[0] / 2, size[1] / 2)

while disp.isNotDone():
    input = cam.getImage()
    # Assume using monitor mounted camera, so flip to create mirror image
    input = input.flipHorizontal()
    rs.addImage(input)  #

    if (rs.isReady()):
        # Get the object that moved
        img = rs.getSegmentedImage(False)  #
        blobs = img.dilate(10).findBlobs()

        # If an object in motion was found
        if (blobs is not None):
            blobs = blobs.sortArea()
            # Update the crosshairs onto the object in motion
            center = (int(blobs[-1].minRectX()), int(blobs[-1].minRectY()))

        # Inside circle
        input.dl().circle(center, 50, Color.BLACK, width=3)  #
Exemple #24
0
from SimpleCV import Image, Color, Display
import time

car_in_lot = Image("parking-car.png")

car = car_in_lot.crop(470, 200, 200, 200)

yellow_car = car.colorDistance(Color.YELLOW)

only_car = car - yellow_car
only_car = only_car.toRGB()

displayObject = Display()

print only_car.meanColor()

# Show the results
only_car.save(displayObject)

while displayObject.isNotDone():
    time.sleep(0.5)
Exemple #25
0
    def new_dewarp(self):
        vidpath = self.iVidPath  #get input video path

        # isInROI is deprecated and not used in this program
        def isInROI(x, y, R1, R2, Cx, Cy):
            isInOuter = False
            isInInner = False
            xv = x - Cx
            yv = y - Cy
            rt = (xv * xv) + (yv * yv)
            if (rt < R2 * R2):
                isInOuter = True
                if (rt < R1 * R1):
                    isInInner = True
            return isInOuter and not isInInner

        """ ws = width of input video
            hs = height of input video
            wd = width of destination/output video
            Hd = height of destinaton/output video
          
        """

        def buildMap(Ws, Hs, Wd, Hd, R1, R2, Cx, Cy):
            #the function throws type error, if Wd and Hd are not converted to integers
            Hd = int(Hd)
            Wd = int(Wd)
            map_x = np.zeros((Hd, Wd), np.float32)
            map_y = np.zeros((Hd, Wd), np.float32)
            rMap = np.linspace(R1, R1 + (R2 - R1), Hd)
            thetaMap = np.linspace(0, 0 + float(Wd) * 2.0 * np.pi, Wd)
            sinMap = np.sin(thetaMap)
            cosMap = np.cos(thetaMap)

            for y in xrange(0, int(Hd - 1)):
                map_x[y] = Cx + rMap[y] * sinMap
                map_y[y] = Cy + rMap[y] * cosMap

            return map_x, map_y

        # do the unwarping
        def unwarp(img, xmap, ymap):
            output = cv2.remap(img.getNumpyCv2(), xmap, ymap, cv2.INTER_LINEAR)
            result = Image(output, cv2image=True)
            # return result
            return result

        disp = Display(
            (800, 600))  #initialise a 800x600 simplecv display to show preview
        #disp = Display((1296,972))
        vals = []
        last = (0, 0)
        # Load the video
        vc = VirtualCamera(vidpath, "video")
        # Sometimes there is crud at the begining, buffer it out
        for i in range(0, 10):
            img = vc.getImage()
            img.save(disp)
        # Show the user a frame let them left click the center
        #    of the "donut" and the right inner and outer edge
        # in that order. Press esc to exit the display
        while not disp.isDone():
            test = disp.leftButtonDownPosition()
            if test != last and test is not None:
                last = test
                print "[360fy]------- center = {0}\n".format(last)

                vals.append(test)
        print "[360fy]------- Dewarping video and generating frames using center, offset1, offset2\n"

        Cx = vals[0][0]
        Cy = vals[0][1]
        #print str(Cx) + " " + str(Cy)
        # Inner donut radius
        R1x = vals[1][0]
        R1y = vals[1][1]
        R1 = R1x - Cx
        #print str(R1)
        # outer donut radius
        R2x = vals[2][0]
        R2y = vals[2][1]
        R2 = R2x - Cx
        #print str(R2)
        # our input and output image siZes
        Wd = round(float(max(R1, R2)) * 2.0 * np.pi)
        #Wd = 2.0*((R2+R1)/2)*np.pi
        #Hd = (2.0*((R2+R1)/2)*np.pi) * (90/360)
        Hd = (R2 - R1)
        Ws = img.width
        Hs = img.height
        # build the pixel map, this could be sped up
        print "BUILDING MAP"

        xmap, ymap = buildMap(Ws, Hs, Wd, Hd, R1, R2, Cx, Cy)
        print "MAP DONE"

        result = unwarp(img, xmap, ymap)

        result.save(disp)

        print "[360fy]------- Storing frames into ../temp_data/frames\n"
        i = 0
        while img is not None:
            print bcolors.OKBLUE + "\rFrame Number: {0}".format(
                i) + bcolors.ENDC,

            sys.stdout.flush(
            )  #flushes stdout so that frame numbers print continually without skipping
            #print " percent complete         \r",
            result = unwarp(img, xmap, ymap)
            result.save(disp)
            # Save to file
            fname = "../temp_data/frames/FY{num:06d}.png".format(num=i)
            result.save(fname)

            img = vc.getImage()
            i = i + 1
        print " \n"

        if img is None:
            self.statusText.setText(str("Status: Done"))
            disp.quit()
from SimpleCV import Image, Display, Camera, Color
import glob, os
import pygame as pg
from CardUtil import SUITS, RANKS, MISC
#SUITS = ('c', 'd', 'h', 's')
#RANKS = ('2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K', 'A')
#MISC = ( 'none','bad','joker')

disp = Display((640, 480))
cam = Camera()

path = "./data/"
ext = ".png"
suit_ptr = 0
rank_ptr = 0
current_dir = ""
allDone = False
for s in SUITS:
    for r in RANKS:
        directory = path + s + "/" + r + "/"
        if not os.path.exists(directory):
            os.makedirs(directory)
print "Current Data: " + str(RANKS[rank_ptr]) + str(SUITS[suit_ptr])
while not allDone:
    keys = disp.checkEvents()
    img = cam.getImage()
    for k in keys:
        if (k == pg.K_SPACE):
            directory = path + SUITS[suit_ptr] + "/" + RANKS[rank_ptr] + "/"
            files = glob.glob(directory + "*.*")
            count = len(files)
Exemple #27
0
cap.set(cv.CV_CAP_PROP_POS_FRAMES, 10010)
params = cv2.SimpleBlobDetector_Params()
params.minDistBetweenBlobs = 1.0
params.filterByInertia = False
params.filterByConvexity = False
params.filterByColor = False
params.filterByCircularity = False
params.filterByArea = True
params.minArea = 5.0
params.maxArea = 200.0
params.minThreshold = 15
params.maxThreshold = 255

b = cv2.SimpleBlobDetector(params)

display = Display()
counter = 0
box_dim = 48

while display.isNotDone():

    # Capture frame-by-frame
    ret, frame = cap.read()
    blob = b.detect(frame)

    fcount = 0
    for beest in blob:

        if fcount > 100:
            continue
        tmpImg = Image(frame, cv2image=True).crop(int(beest.pt[0]),
Exemple #28
0
from SimpleCV import Image, Display, Color, Camera
cam = Camera(0)  # Get the first camera
disp = Display((640, 480))  # Create a 640x480 display
while (disp.isNotDone()):  # While we don't exit the display
    img = cam.getImage().binarize()  # Get an image and make it black and white
    # Draw the text "Hello World" at (40,40) in red.
    img.drawText("Hello World!", 40, 40, fontsize=60, color=Color.RED)
    img.save(disp)  # Save it to the screen
Exemple #29
0
            except KeyboardInterrupt:
                print "User exit"
                done.value = True

    def cleanup_completely(self):
        map(lambda f: os.remove(os.path.join(self._dir, f)), self.get_list())
        os.removedirs(self._dir)

    def getImage(self):
        return Image(self.get_current_image_path())

    def __del__(self):
        self.kill_mplayer()


if __name__ == "__main__":
    cam = MplayerCamera()
    disp = Display()
    done = False
    while not done:
        try:
            cam.getImage().save(disp)
        except KeyboardInterrupt:
            print "User exit"
            done = True
        except Exception, e:
            print e
            done = True
    cam.kill_mplayer()
    time.sleep(0.1)
Exemple #30
0
#!/usr/bin/env python
# Original Author: Patrick Benz / @Pa_trick17
# Check out the video here:
# http://www.youtube.com/watch?v=cAL6u6Q0Xuc
from SimpleCV import Image, Display
import time
 
#webcam-URLs
marktplatz = 'http://www.tuebingen.de/camera/webcam...'
marktgasse = 'http://leuchtengalerie.com/webcam/leu...'
neckarbruecke1 = 'http://www.tagblatt.de/cms_media/webc...'
neckarbruecke2 = 'http://tuebingen-info.de/fileadmin/we...'
 
display = Display((1240, 960))
 
counter = 0
 
while not display.isNotDone():
img1 = Image(marktplatz)
img1 = img1.adaptiveScale((640, 480))
img2 = Image(marktgasse)
img2 = img2.adaptiveScale((640, 480))
img3 = Image(neckarbruecke1)
img3 = img3.adaptiveScale((640, 480))
img4 = Image(neckarbruecke2)
img4 = img4.adaptiveScale((640, 480))
top = img1.sideBySide(img2)
bottom = img3.sideBySide(img4)
combined = top.sideBySide(bottom, side="bottom")
combined.save(display)
combined.save("webcam" +str(counter).zfill(4) +".jpg")