Exemple #1
0
def main():
    scale_amount = (200, 150)
    cam = Camera(0)
    prev = cam.get_image().resize(scale_amount[0], scale_amount[1])
    time.sleep(0.5)
    t = 0.5
    buffer = 20
    count = 0

    while True:
        current = cam.get_image()
        current = current.resize(scale_amount[0], scale_amount[1])
        if count < buffer:
            count = count + 1
        else:
            fs = current.find(Motion, prev, window=15, method="BM")
            length_of_fs = len(fs)
            if fs:
                dx = 0
                dy = 0
                for f in fs:
                    dx = dx + f.dx
                    dy = dy + f.dy

                dx = dx / length_of_fs
                dy = dy / length_of_fs
                motion_str = movement_check(dx, dy, t)
                current.dl().text(motion_str, (10, 10))

        prev = current
        time.sleep(0.01)
        current.show()
Exemple #2
0
 def __init__(self):
     super(ColorSegmentationWindow,
           self).__init__('Color Segmentation Example')
     self.cam = Camera()
     self.template_img = None
     self.point1 = (0, 0)
     self.point2 = (0, 0)
     self.mosue_down = False
    def __init__(self, parent=None):

        QtGui.QWidget.__init__(self,parent)        
        self.MainWindow = Ui_Dialog()
        self.MainWindow.setupUi(self)
        self.webcam = Camera(0, {"width": 640, "height": 480})

        self.timer = QtCore.QTimer()

        self.connect(self.timer, QtCore.SIGNAL('timeout()'), self.show_frame)

        self.timer.start(1)
    left_blue = left.merge_channels(None, b, g)
    # left_blue.save("blue.png", sample=True)
    (r, g, b) = right.split_channels()
    right_red = right.merge_channels(r, None, None)
    #right_red.save("red.png", sample=True)
    sz = (left.width + offset[0], left.height + offset[1])
    output = left_blue.embiggen(size=sz, pos=(0, 0))
    output = output.blit(right_red, alpha=0.5, pos=offset)
    output = output.crop(offset[0], y=offset[1], w=left.width - offset[0], h=left.height - offset[1])
    return output


print "Taking pictures. Please move your camera slightly to its right"
print "after every picture."

c = Camera()
time.sleep(1)
images = []

for i in range(5):
    images.append(c.get_image())
    print "Picture %d taken" % (i + 1)
    time.sleep(1)

offset = (0, 0)

for i in range(4):
    left = images[i]
    right = images[i + 1]
    output = threedee_me(left, right, offset)
    print output.save(temp=True)
Exemple #5
0
from simplecv.api import Camera
from simplecv.color import Color
from simplecv.segmentation.mog_segmentation import MOGSegmentation

mog = MOGSegmentation(history=200, mixtures=5, bg_ratio=0.3, noise_sigma=16,
                      learning_rate=0.3)

cam = Camera()

while True:
    mog.add_image(cam.get_image())

    segmented_image = mog.get_segmented_image()

    blobs = mog.get_segmented_blobs()
    for blob in blobs:
        segmented_image.dl().circle((blob.x, blob.y), 10, Color.RED)

    segmented_image.show()
Exemple #6
0
print __doc__

import time
from simplecv.api import Camera
from pyfirmata import Arduino, util

board = Arduino('/dev/ttyUSB0')  # The location of the Arduino
analog_pin_1 = board.get_pin('a:1:i')  # Use pin 1 as input
analog_pin_2 = board.get_pin('a:2:i')  # Use pin 2 as input
button_13 = board.get_pin('d:13:i')  # Use pin 13 for button input

it = util.Iterator(board)  # Initalize the pin monitor for the Arduino
it.start()  # Start the pin monitor loop

multiplier = 400.0  # A value to adjust the edge threshold by
cam = Camera()  # Initalize the camera

while True:
    t1 = analog_pin_1.read()  # Read the value from pin 1
    t2 = analog_pin_2.read()  # Read the value from pin 2
    b13 = button_13.read()  # Read if the button has been pressed.

    if not t1:  # Set a default if no value read
        t1 = 50
    else:
        t1 *= multiplier

    if not t2:  # Set a default if no value read
        t2 = 100
    else:
        t2 *= multiplier
Exemple #7
0
#!/usr/bin/python
"""
This example just takes an image, finds the edges, and draws them
the threshold is used for the edge detection, if you adjust the
max_threshold and threshold_step values and run the program you will
see it change over time
"""
print __doc__

from simplecv.api import Camera, Color

cam = Camera()  # initialize the camera
max_threshold = 255  # this is used for the edge detection
threshold_step = 0.5  # this is the amount to adjust the threshold by each time the display is updated
threshold = max_threshold

while True:
    image = cam.get_image()  # get image (or frame) from camera
    flipped_image = image.flip_horizontal()  # flip it so it looks mirrored
    edged_image = flipped_image.edges(threshold)  # get the image edges

    # This just automatically cycles through threshold levels
    if threshold <= 0:
        threshold = max_threshold
    else:
        threshold -= 0.5

    edged_image.dl().text("Current Edge Threshold:" + str(threshold), (10, 20),
                          color=Color.GREEN)
    edged_image.show()
 def __init__(self):
     super(BallTrackWindow, self).__init__('Ball Track Example')
     self.cam = Camera()  # initialize the camera
     self.normal = True  # mode toggle for segment detection
#!/usr/bin/python
"""
This SimpleCV example uses a technique called frame differencing to determine
if motion has occurred.  You take an initial image, then another, subtract
the difference, what is left over is what has changed between those two images
this are typically blobs on the images, so we do a blob search to count
the number of blobs and if they exist then motion has occurred
"""
import time
from simplecv.api import Camera, Blob

cam = Camera()  # setup the camera

# settings for the project
# make the threshold adaptable for various camera sizes
min_size = 0.1 * cam.get_property("width") * cam.get_property("height")
thresh = 10  # frame diff threshold
show_message_for = 2  # the amount of seconds to show the motion detected message
motion_timestamp = int(time.time())
message_text = "Motion detected"
draw_message = False

last_img = cam.get_image()
last_img.show()

while True:
    new_img = cam.get_image()
    track_img = new_img - last_img  # diff the images
    blobs = track_img.find(Blob)  # use adaptive blob detection
    now = int(time.time())