示例#1
0
def main():
    scale_amount = (200, 150)
    cam = Camera(0)
    prev = cam.get_image().resize(scale_amount[0], scale_amount[1])
    time.sleep(0.5)
    t = 0.5
    buffer = 20
    count = 0

    while True:
        current = cam.get_image()
        current = current.resize(scale_amount[0], scale_amount[1])
        if count < buffer:
            count = count + 1
        else:
            fs = current.find(Motion, prev, window=15, method="BM")
            length_of_fs = len(fs)
            if fs:
                dx = 0
                dy = 0
                for f in fs:
                    dx = dx + f.dx
                    dy = dy + f.dy

                dx = dx / length_of_fs
                dy = dy / length_of_fs
                motion_str = movement_check(dx, dy, t)
                current.dl().text(motion_str, (10, 10))

        prev = current
        time.sleep(0.01)
        current.show()
def main():
    scale_amount = (200, 150)
    cam = Camera(0)
    prev = cam.get_image().resize(scale_amount[0], scale_amount[1])
    time.sleep(0.5)
    t = 0.5
    buffer = 20
    count = 0

    while True:
        current = cam.get_image()
        current = current.resize(scale_amount[0], scale_amount[1])
        if count < buffer:
            count = count + 1
        else:
            fs = current.find(Motion, prev, window=15, method="BM")
            length_of_fs = len(fs)
            if fs:
                dx = 0
                dy = 0
                for f in fs:
                    dx = dx + f.dx
                    dy = dy + f.dy

                dx = dx / length_of_fs
                dy = dy / length_of_fs
                motion_str = movement_check(dx, dy, t)
                current.dl().text(motion_str, (10, 10))

        prev = current
        time.sleep(0.01)
        current.show()
示例#3
0
 def __init__(self):
     super(ColorSegmentationWindow,
           self).__init__('Color Segmentation Example')
     self.cam = Camera()
     self.template_img = None
     self.point1 = (0, 0)
     self.point2 = (0, 0)
     self.mosue_down = False
示例#4
0
class ColorSegmentationWindow(Window):
    def __init__(self):
        super(ColorSegmentationWindow,
              self).__init__('Color Segmentation Example')
        self.cam = Camera()
        self.template_img = None
        self.point1 = (0, 0)
        self.point2 = (0, 0)
        self.mosue_down = False

    def on_mouse(self, event, x, y, mouse_key, data=None):
        """ Callback for mouse events

            event - int - see cv2.EVENT_* constants
            x, y - int, int - position of the cursor
            mouse_key - int - mouse key
        """
        if event == cv2.EVENT_LBUTTONDOWN:
            self.point1 = (x, y)
            self.point2 = (x, y)
            self.mosue_down = True
        elif event == cv2.EVENT_MOUSEMOVE:
            if self.mosue_down == True:
                self.point2 = (x, y)
        elif event == cv2.EVENT_LBUTTONUP:
            self.mosue_down = False
            self.point2 = (x, y)

            x1, y1, w, h = points_to_roi(self.point1, self.point2)
            if w > 0 and h > 0:
                self.template_img = self.cam.get_image().crop(x1, y1, w, h)
                print "Mode:", "Detecting features..."

    def on_key(self, key):
        if key == 32:  # Space bar to select template
            self.template_img = None
            print "Mode:", "Selecting template..."

    def on_update(self):
        """ Callback for periodic update.
        """
        img = self.cam.get_image()

        if self.mosue_down:
            img.dl().rectangle_to_pts(self.point1,
                                      self.point2,
                                      color=Color.RED)
            img = img.apply_layers()
        elif self.template_img is not None:
            # Detect and draw key points
            img = img.draw_keypoint_matches(self.template_img)
            img = img.apply_layers()

        self.show(img)
class ColorSegmentationWindow(Window):

    def __init__(self):
        super(ColorSegmentationWindow, self).__init__('Color Segmentation Example')
        self.cam = Camera()
        self.template_img = None
        self.point1 = (0, 0)
        self.point2 = (0, 0)
        self.mosue_down = False

    def on_mouse(self, event, x, y, mouse_key, data=None):
        """ Callback for mouse events

            event - int - see cv2.EVENT_* constants
            x, y - int, int - position of the cursor
            mouse_key - int - mouse key
        """
        if event == cv2.EVENT_LBUTTONDOWN:
            self.point1 = (x, y)
            self.point2 = (x, y)
            self.mosue_down = True
        elif event == cv2.EVENT_MOUSEMOVE:
            if self.mosue_down == True:
                self.point2 = (x, y)
        elif event == cv2.EVENT_LBUTTONUP:
            self.mosue_down = False
            self.point2 = (x, y)

            x1, y1, w, h = points_to_roi(self.point1, self.point2)
            if w > 0 and h > 0:
                self.template_img = self.cam.get_image().crop(x1, y1, w, h)
                print "Mode:", "Detecting features..."


    def on_key(self, key):
        if key == 32:  # Space bar to select template
            self.template_img = None
            print "Mode:", "Selecting template..."

    def on_update(self):
        """ Callback for periodic update.
        """
        img = self.cam.get_image()

        if self.mosue_down:
            img.dl().rectangle_to_pts(self.point1, self.point2, color=Color.RED)
            img = img.apply_layers()
        elif self.template_img is not None:
            # Detect and draw key points
            img = img.draw_keypoint_matches(self.template_img)
            img = img.apply_layers()

        self.show(img)
    def __init__(self, parent=None):

        QtGui.QWidget.__init__(self,parent)        
        self.MainWindow = Ui_Dialog()
        self.MainWindow.setupUi(self)
        self.webcam = Camera(0, {"width": 640, "height": 480})

        self.timer = QtCore.QTimer()

        self.connect(self.timer, QtCore.SIGNAL('timeout()'), self.show_frame)

        self.timer.start(1)
class BallTrackWindow(Window):

    def __init__(self):
        super(BallTrackWindow, self).__init__('Ball Track Example')
        self.cam = Camera()  # initialize the camera
        self.normal = True  # mode toggle for segment detection

    def on_key(self, key):
        if key == 32:  # Space bar to switch between modes
            self.normal = not self.normal
            print "Display Mode:", "Normal" if self.normal else "Segmented"

    def on_update(self):
        """ Callback for periodic update.
        """
        img = self.cam.get_image().flip_horizontal()  # grab image from camera
        dist = img.color_distance(Color.BLACK).dilate(2)  # try to separate colors in image
        segmented = dist.stretch(200, 255)  # really try to push out white colors
        if not self.normal:
            img = segmented
        blobs = segmented.find_blobs()  # search the image for blob objects
        if blobs:  # if blobs are found
            circles = blobs.filter([b.is_circle(0.2) for b in blobs])  # filter out only circle shaped blobs
            if circles:
                # draw the circle on the main image
                img.dl().circle((circles[-1].x, circles[-1].y),
                                circles[-1].radius(), Color.BLUE, width=3)
        self.show(img.apply_layers())
 def __init__(self):
     super(ColorSegmentationWindow, self).__init__('Color Segmentation Example')
     self.cam = Camera()
     self.template_img = None
     self.point1 = (0, 0)
     self.point2 = (0, 0)
     self.mosue_down = False
class Webcam(QtGui.QMainWindow):
    def __init__(self, parent=None):

        QtGui.QWidget.__init__(self,parent)        
        self.MainWindow = Ui_Dialog()
        self.MainWindow.setupUi(self)
        self.webcam = Camera(0, {"width": 640, "height": 480})

        self.timer = QtCore.QTimer()

        self.connect(self.timer, QtCore.SIGNAL('timeout()'), self.show_frame)

        self.timer.start(1)

    def show_frame(self):
        ipl_image = self.webcam.get_image()
        ipl_image.dl().circle((150, 75), 50, Color.RED, filled=True)
        data = ipl_image.tostring()
        image = QtGui.QImage(data, ipl_image.width, ipl_image.height, 3 * ipl_image.width, QtGui.QImage.Format_RGB888)
        pixmap = QtGui.QPixmap()
        pixmap.convertFromImage(image.rgbSwapped())
        self.MainWindow.label.setPixmap(pixmap)
示例#10
0
#!/usr/bin/python
"""
This program basically simulates some kind of 80's music video.
"""
print __doc__


from simplecv.api import Camera, Blob


cam = Camera()

# settings for the project
min_size = 0.1 * cam.get_property("width") * cam.get_property("height")  # Change threshold
thresh = 10  # frame difference threshold

last_img = cam.get_image()
last_img.dl().text("Move around to get the party started!", (5, 5))
last_img.show()

while True:
    new_img = cam.get_image()
    track_img = new_img - last_img  # difference the images
    blobs =  track_img.find(Blob, -1, threshblocksize=99)  # use adapative blob detection
    if blobs:
        blobs.draw(autocolor=True)
        track_img.show()
    last_img = new_img # update the image
import time
from simplecv.api import Camera
from pyfirmata import Arduino, util


board = Arduino('/dev/ttyUSB0')  # The location of the Arduino
analog_pin_1 = board.get_pin('a:1:i')  # Use pin 1 as input
analog_pin_2 = board.get_pin('a:2:i')  # Use pin 2 as input
button_13 = board.get_pin('d:13:i')  # Use pin 13 for button input

it = util.Iterator(board)  # Initalize the pin monitor for the Arduino
it.start()  # Start the pin monitor loop

multiplier = 400.0  # A value to adjust the edge threshold by
cam = Camera()  # Initalize the camera

while True:
    t1 = analog_pin_1.read()  # Read the value from pin 1
    t2 = analog_pin_2.read()  # Read the value from pin 2
    b13 = button_13.read()  # Read if the button has been pressed.

    if not t1:  # Set a default if no value read
        t1 = 50 
    else:
        t1 *= multiplier 

    if not t2:  # Set a default if no value read
        t2 = 100
    else:
        t2 *= multiplier
#!/usr/bin/python
"""
This SimpleCV example uses a technique called frame differencing to determine
if motion has occurred.  You take an initial image, then another, subtract
the difference, what is left over is what has changed between those two images
this are typically blobs on the images, so we do a blob search to count
the number of blobs and if they exist then motion has occurred
"""
import time
from simplecv.api import Camera, Blob

cam = Camera()  # setup the camera

# settings for the project
# make the threshold adaptable for various camera sizes
min_size = 0.1 * cam.get_property("width") * cam.get_property("height")
thresh = 10  # frame diff threshold
show_message_for = 2  # the amount of seconds to show the motion detected message
motion_timestamp = int(time.time())
message_text = "Motion detected"
draw_message = False

last_img = cam.get_image()
last_img.show()

while True:
    new_img = cam.get_image()
    track_img = new_img - last_img  # diff the images
    blobs =  track_img.find(Blob)  # use adaptive blob detection
    now = int(time.time())
示例#13
0
from simplecv.api import Camera
from simplecv.color import Color
from simplecv.segmentation.mog_segmentation import MOGSegmentation

mog = MOGSegmentation(history=200, mixtures=5, bg_ratio=0.3, noise_sigma=16,
                      learning_rate=0.3)

cam = Camera()

while True:
    mog.add_image(cam.get_image())

    segmented_image = mog.get_segmented_image()

    blobs = mog.get_segmented_blobs()
    for blob in blobs:
        segmented_image.dl().circle((blob.x, blob.y), 10, Color.RED)

    segmented_image.show()
示例#14
0
from simplecv.api import Camera

c = Camera()

while True:
    img = c.get_image()
    split = img.split(2, 1)
    left = split[0][0]
    mirrorred = img.blit(left.flip_horizontal(), (left.width + 1, 0))
    mirrorred.show()
示例#15
0
#!/usr/bin/python
"""
This program is basically the hello world in SimpleCV
all it does is grab an image from the camera and display it
"""
print __doc__

from simplecv.api import Camera

cam = Camera()

while True:
    img = cam.get_image()
    img.show()
#!/usr/bin/python
"""
This example just takes an image, finds the edges, and draws them
the threshold is used for the edge detection, if you adjust the
max_threshold and threshold_step values and run the program you will
see it change over time
"""
print __doc__

from simplecv.api import Camera, Color

cam = Camera()  # initialize the camera
max_threshold = 255  # this is used for the edge detection
threshold_step = 0.5  # this is the amount to adjust the threshold by each time the display is updated
threshold = max_threshold

while True:
    image = cam.get_image() # get image (or frame) from camera
    flipped_image = image.flip_horizontal() # flip it so it looks mirrored
    edged_image = flipped_image.edges(threshold) # get the image edges

    # This just automatically cycles through threshold levels
    if threshold <= 0:
        threshold = max_threshold
    else:
        threshold -= 0.5

    edged_image.dl().text("Current Edge Threshold:" + str(threshold), (10, 20), color=Color.GREEN)
    edged_image.show()
示例#17
0
"""
This program super imposes the camera onto the television in the picture
"""
print __doc__

from simplecv.api import Camera, Image

tv_original = Image("family_watching_television_1958.jpg", sample=True)

tv_coordinates = [(353, 379), (433, 380), (432, 448), (354, 446)]
tv_mask = Image(tv_original.size_tuple).invert().warp(tv_coordinates)
tv = tv_original - tv_mask

c = Camera()

while True:
    bwimage = c.get_image().to_gray().resize(tv.width, tv.height).to_bgr()
    on_tv = tv + bwimage.warp(tv_coordinates)
    on_tv.show()
示例#18
0
from simplecv.api import Camera

c = Camera()

while True:
    img = c.get_image()
    split = img.split(2, 1)
    left = split[0][0]
    mirrorred = img.blit(left.flip_horizontal(),(left.width + 1, 0))
    mirrorred.show()
#!/usr/bin/python
"""
This program does basic motion blurring.  It averages the number of
maxframes that are set using some basic image math
"""
print __doc__

from operator import add
from simplecv.api import Camera


cam = Camera()  # initialize the camera

# the number of frames
maxframes = 3
frames = []
frames.append(cam.get_image())
frames[0].show()

while True:
    frames.append(cam.get_image())
    #add the next frame to the end of the set

    if len(frames) > maxframes:
        frames.pop(0)  # remove the earliest frame if we're at max

    pic = reduce(add, [i / float(len(frames)) for i in frames])
    # add the frames in the array, weighted by 1 / number of frames

    pic.show()
"""
This program super imposes the camera onto the television in the picture
"""
print __doc__

from simplecv.api import Camera, Image


tv_original = Image("family_watching_television_1958.jpg", sample=True)

tv_coordinates = [(353, 379), (433, 380), (432, 448), (354, 446)]
tv_mask = Image(tv_original.size_tuple).invert().warp(tv_coordinates)
tv = tv_original - tv_mask

c = Camera()

while True:
    bwimage = c.get_image().to_gray().resize(tv.width, tv.height).to_bgr()
    on_tv = tv + bwimage.warp(tv_coordinates)
    on_tv.show()
示例#21
0
#!/usr/bin/python
"""
This example just takes an image, finds the edges, and draws them
the threshold is used for the edge detection, if you adjust the
max_threshold and threshold_step values and run the program you will
see it change over time
"""
print __doc__

from simplecv.api import Camera, Color

cam = Camera()  # initialize the camera
max_threshold = 255  # this is used for the edge detection
threshold_step = 0.5  # this is the amount to adjust the threshold by each time the display is updated
threshold = max_threshold

while True:
    image = cam.get_image()  # get image (or frame) from camera
    flipped_image = image.flip_horizontal()  # flip it so it looks mirrored
    edged_image = flipped_image.edges(threshold)  # get the image edges

    # This just automatically cycles through threshold levels
    if threshold <= 0:
        threshold = max_threshold
    else:
        threshold -= 0.5

    edged_image.dl().text("Current Edge Threshold:" + str(threshold), (10, 20),
                          color=Color.GREEN)
    edged_image.show()
示例#22
0
#!/usr/bin/python

from simplecv.api import Camera
from simplecv.stream import VideoStream
import time

c = Camera()
vs = VideoStream("foo.avi")

for i in range(0, 500):
    c.get_image().edges().invert().save(vs)
    time.sleep(0.05)
#!/usr/bin/python
"""
This program is basically the hello world in SimpleCV
all it does is grab an image from the camera and display it
"""
print __doc__

from simplecv.api import Camera


cam = Camera()

while True:
    img = cam.get_image()
    img.show()
示例#24
0
#!/usr/bin/python
"""
This program uses a Color model to try and do segmentation based on color
"""

print __doc__

import time
from simplecv.color_model import ColorModel
from simplecv.api import Camera

c = Camera()
i = c.get_image()
cm = ColorModel(i)
i.show()
t = int(time.time())
ticks = 0

while True:
    cm.threshold(c.get_image()).show()
    time.sleep(0.01)
    ticks = ticks + 1
    if int(time.time()) > t:
        print str(ticks) + " fps"
        ticks = 0
        t = int(time.time())
#!/usr/bin/python
"""
This SimpleCV example uses a technique called frame differencing to determine
if motion has occurred.  You take an initial image, then another, subtract
the difference, what is left over is what has changed between those two images
this are typically blobs on the images, so we do a blob search to count
the number of blobs and if they exist then motion has occurred
"""
import time
from simplecv.api import Camera, Blob

cam = Camera()  # setup the camera

# settings for the project
# make the threshold adaptable for various camera sizes
min_size = 0.1 * cam.get_property("width") * cam.get_property("height")
thresh = 10  # frame diff threshold
show_message_for = 2  # the amount of seconds to show the motion detected message
motion_timestamp = int(time.time())
message_text = "Motion detected"
draw_message = False

last_img = cam.get_image()
last_img.show()

while True:
    new_img = cam.get_image()
    track_img = new_img - last_img  # diff the images
    blobs = track_img.find(Blob)  # use adaptive blob detection
    now = int(time.time())
示例#26
0
print __doc__

import time
from simplecv.api import Camera
from pyfirmata import Arduino, util

board = Arduino('/dev/ttyUSB0')  # The location of the Arduino
analog_pin_1 = board.get_pin('a:1:i')  # Use pin 1 as input
analog_pin_2 = board.get_pin('a:2:i')  # Use pin 2 as input
button_13 = board.get_pin('d:13:i')  # Use pin 13 for button input

it = util.Iterator(board)  # Initalize the pin monitor for the Arduino
it.start()  # Start the pin monitor loop

multiplier = 400.0  # A value to adjust the edge threshold by
cam = Camera()  # Initalize the camera

while True:
    t1 = analog_pin_1.read()  # Read the value from pin 1
    t2 = analog_pin_2.read()  # Read the value from pin 2
    b13 = button_13.read()  # Read if the button has been pressed.

    if not t1:  # Set a default if no value read
        t1 = 50
    else:
        t1 *= multiplier

    if not t2:  # Set a default if no value read
        t2 = 100
    else:
        t2 *= multiplier
示例#27
0
#!/usr/bin/env python
#
# Released under the BSD license. See LICENSE file for details.
"""
This program basically overlays an edge detector window that gives the
illusion of X-ray vision.  It is mearly meant to show how to perform a
basic image operation and overlay back onto the original image.
"""
print __doc__

from simplecv.api import Camera

# Initialize the camera
cam = Camera()

# Loop forever
while True:
    # Grab image from camera and flip it
    img = cam.get_image().flip_horizontal()

    # Set the x and the y location to scale
    crop_x = 50
    crop_y = 50
    crop_width = img.width - 100
    crop_height = img.height - 100

    # Crop out the section of image we want
    cropped_img = img.crop(crop_x, crop_y, crop_width, crop_height)
    # Get the edges of cropped region
    xray_img = cropped_img.edges().smooth()
    # Draw the cropped image onto the current image
示例#28
0
 def __init__(self):
     super(BallTrackWindow, self).__init__('Ball Track Example')
     self.cam = Camera()  # initialize the camera
     self.normal = True  # mode toggle for segment detection
示例#29
0
>>> python webkit-gtk.py

*Note: You are not required to run the webkit-gtk.py, you can also
visit http://localhost:5000

"""
print __doc__

from flask import Flask, jsonify, render_template, request
from werkzeug import SharedDataMiddleware
import tempfile
import simplejson as json
from simplecv.api import Camera

app = Flask(__name__)
cam = Camera()


@app.route('/')
def show(name=None):
    img = cam.getImage()
    tf = tempfile.NamedTemporaryFile(suffix=".png")
    loc = 'static/' + tf.name.split('/')[-1]
    tf.close()
    img.save(loc)
    return render_template('index.html', img=loc)


@app.route('/_snapshot')
def snapshot():
    """
示例#30
0
    left_blue = left.merge_channels(None, b, g)
    # left_blue.save("blue.png", sample=True)
    (r, g, b) = right.split_channels()
    right_red = right.merge_channels(r, None, None)
    #right_red.save("red.png", sample=True)
    sz = (left.width + offset[0], left.height + offset[1])
    output = left_blue.embiggen(size=sz, pos=(0, 0))
    output = output.blit(right_red, alpha=0.5, pos=offset)
    output = output.crop(offset[0], y=offset[1], w=left.width - offset[0], h=left.height - offset[1])
    return output


print "Taking pictures. Please move your camera slightly to its right"
print "after every picture."

c = Camera()
time.sleep(1)
images = []

for i in range(5):
    images.append(c.get_image())
    print "Picture %d taken" % (i + 1)
    time.sleep(1)

offset = (0, 0)

for i in range(4):
    left = images[i]
    right = images[i + 1]
    output = threedee_me(left, right, offset)
    print output.save(temp=True)
示例#31
0
#!/usr/bin/python
"""
This program does basic motion blurring.  It averages the number of
maxframes that are set using some basic image math
"""
print __doc__

from operator import add
from simplecv.api import Camera

cam = Camera()  # initialize the camera

# the number of frames
maxframes = 3
frames = []
frames.append(cam.get_image())
frames[0].show()

while True:
    frames.append(cam.get_image())
    #add the next frame to the end of the set

    if len(frames) > maxframes:
        frames.pop(0)  # remove the earliest frame if we're at max

    pic = reduce(add, [i / float(len(frames)) for i in frames])
    # add the frames in the array, weighted by 1 / number of frames

    pic.show()