コード例 #1
0
def main():
    cs = CameraServer.getInstance()
    cs.enableLogging()

    camera = cs.startAutomaticCapture()
    camera.setResolution(HORIZONTAL_RES, VERTICAL_RES)

    NetworkTables.initialize(server='10.56.54.2')

    pipeline = GripPipeline()

    cvSink = cs.getVideo()

    outputStream = cs.putVideo('Shooter', HORIZONTAL_RES, VERTICAL_RES)

    img = np.zeros(shape=(VERTICAL_RES, HORIZONTAL_RES, 3), dtype=np.uint8)

    while True:

        time, img = cvSink.grabFrame(img)

        if time == 0:
            outputStream.notifyError(cvSink.getError())

            continue

        pipeline.process(img)

        if len(pipeline.find_contours_output) > 0:
            extra_process(
                max(pipeline.find_contours_output, key=cv2.contourArea))

        outputStream.putFrame(pipeline.find_contours_image)
コード例 #2
0
def main(system_arguments):
    # initialize the camera and grab a reference to the raw camera capture
    with PiCamera() as camera:

        camera_setup(camera)
        raw_capture = PiRGBArray(camera, size=(WINDOW_WIDTH, WINDOW_HEIGHT))

        gripped = GripPipeline()

        # allow the camera to warm up
        time.sleep(0.1)

        # capture frames from the camera
        for frame in camera.capture_continuous(raw_capture, format="bgr", use_video_port=True):
            # grab the raw NumPy array representing the image, then initialize the timestamp
            # and occupied/unoccupied text
            image = frame.array
            gripped.process(image)

            c = gripped.filter_contours_output
            draw(c, image)

            # show the frame
            cv2.imshow("Frame", image)

            # clear the stream in preparation for the next frame
            raw_capture.truncate(0)

            # if the `q` key was pressed, break from the loop
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break

    cv2.destroyAllWindows()
    return 0
コード例 #3
0
def main():
    '''
    print('Initializing NetworkTables')
    NetworkTable.setIPAddress("10.52.55.98")
    NetworkTable.setClientMode()
    NetworkTable.initialize()


    print('Creating video capture')
    '''
    cap = cv2.VideoCapture(0)
    print("here")
    bytes = ''
    first = False

    print('Creating pipeline')
    pipeline = GripPipeline()

    print('Running pipeline')

    while cap.isOpened():
        have_frame, frame = cap.read()
        if have_frame:
            pipeline.process(frame)
            extra_processing(pipeline)
            print(x)
            '''
            
    plt.grid(True)
    plt.legend(loc='best')
    plt.show()
'''
    print('Capture closed')
コード例 #4
0
def main():
    if len(sys.argv) != 2:
        print("Error: specify an IP to connect to!")
        exit(0)

    ip = sys.argv[1]

    print(ip)

    print('Initializing NetworkTables')
    NetworkTables.setClientMode()
    #NetworkTables.setIPAddress('roboRIO-6355-FRC')
    NetworkTables.setTeam(6355)
    # NetworkTables.initialize()
    NetworkTables.initialize(server=ip)

    print('Creating video capture')
    cap = cv2.VideoCapture(0)

    print('Creating pipeline')
    pipeline = GripPipeline()

    print('Running pipeline')
    while cap.isOpened():
        have_frame, frame = cap.read()
        if have_frame:
            print('have frame')
            cv2.imshow('Unprocessed', cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))
            cv2.waitKey()
            pipeline.process(frame)
            extra_processing(pipeline)

    print('Capture closed')
コード例 #5
0
def main():
    NetworkTable.setTeam(5587)  # TODO set your team number
    NetworkTable.setClientMode()
    # TODO switch to RIO IP, or IP of laptop running OutlineViewer for setup
    NetworkTable.setIPAddress('10.55.87.2')
    NetworkTable.initialize()

    # TODO find what v4l2-ctl settings you need. Pass each commandline option
    # through this array. REQUIRES v4l2-utils to be installed.
    call([
        "v4l2-ctl", "--set-ctrl=exposure_auto=1",
        "--set-ctrl=exposure_absolute=9",
        "--set-ctrl=white_balance_temperature_auto=0"
    ],
         shell=False)

    cap = cv2.VideoCapture(0)
    pipeline = GripPipeline()
    while True:
        ret, frame = cap.read()
        if ret:
            # TODO add extra parameters if the pipeline takes more than just a
            # single image
            pipeline.process(frame)
            extra_processing(pipeline)
コード例 #6
0
ファイル: R2.py プロジェクト: ianwinson/RobotArmQuantumIOT
def getXY():
    """ Code that determines the x any y values of the cubes in pixels
        No Parameters
        Returns the X coordinate and the Y coordinate
    """
    cap = cv2.VideoCapture(0)  # Connects to the Camera
    pipeline = GripPipeline()
    have_frame, frame = cap.read(
    )  # Sets the frame variable equal to the image the video puts off.
    if have_frame:
        pipeline.process(frame)  # Isolates the Contours
        center_x_positions = []
        center_y_positions = []
        widths = []
        heights = []
        # Find the bounding boxes of the contours to get x, y, width, and height
        for contour in pipeline.filter_contours_output:
            x, y, w, h = cv2.boundingRect(contour)
            center_x_positions.append(
                x + w / 2
            )  # X and Y are coordinates of the top-left corner of the bounding box
            center_y_positions.append(
                y + h / 2
            )  # Gets the center of the Box by finding the adding half of the width
            widths.append(w)
            heights.append(h)
            print(center_x_positions[0])
            print(center_y_positions[0])
            return center_x_positions[0], center_y_positions[
                0]  # Returns the  X and Y Values
コード例 #7
0
def main():
    print('Initializing post-processor')
    post.init()

    print('Creating video capture')
    # From robot stream
    stream = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    while True:  # Loop until we successfully connected
        try:
            stream.settimeout(4.5)
            stream.connect(("roborio-3647-frc.local", 1180))
            print('Connected to video source')
            break  # Get out of retry loop, as we were successful
        except (socket.timeout, OSError):
            print('Failed to connect to video source, retrying')
            stream = socket.socket(socket.AF_INET,
                                   socket.SOCK_STREAM)  # Recreate new socket
            time.sleep(1.5)
    stream.send(struct.pack('!i', 30))  # FPS
    stream.send(struct.pack('!i', -1))  # HW_COMPRESSION
    stream.send(struct.pack('!i', 1))  # framesize

    # From webcam
    """
    cap = cv2.VideoCapture(0)
    print('Connected to video source')
    """

    print('Creating pipeline')
    pipeline = GripPipeline()

    print('Running pipeline')
    # From robot stream
    sbytes = bytearray()
    while True:
        # TODO: Insert Try Except, and restart on error
        newb = stream.recv(1024)
        sbytes += newb
        a = sbytes.find(b'\xff\xd8')
        b = sbytes.find(b'\xff\xd9')
        if a != -1 and b != -1:
            jpg = sbytes[a:b + 2]
            sbytes = sbytes[b + 2:]
            if len(jpg) == 0:
                continue
            frame = cv2.imdecode(np.fromstring(bytes(jpg), dtype=np.uint8),
                                 cv2.IMREAD_COLOR)
            pipeline.process(frame)
            post.process(frame, pipeline)

    # From webcam
    """
    while cap.isOpened():
        have_frame, frame = cap.read()
        if have_frame:
            pipeline.process(frame)
            extra_processing(frame, pipeline)
    """

    print('Capture closed')
コード例 #8
0
def main():
    """Run this file to process vision code.
    Please go to README.md to learn how to use this properly
    By Grant Perkins, 2018
    """
    # Initialize pipeline, image, camera server
    pipe = GripPipeline()
    img = np.zeros(shape=(480, 640, 3), dtype=np.uint8)

    cs = CameraServer.getInstance()
    camera = cs.startAutomaticCapture()
    camera.setResolution(640, 480)
    camera.setFPS(20)
    camera.setExposureManual(40)

    # Get a CvSink. This will capture images from the camera
    cvsink = cs.getVideo()
    # Wait on vision processing until connected to Network Tables
    cond = threading.Condition()
    notified = [False]

    def listener(connected, info):
        print(info, '; Connected=%s' % connected)
        with cond:
            notified[0] = True
            cond.notify()

    NetworkTables.initialize(server='roborio-1100-frc.local')
    NetworkTables.addConnectionListener(listener, immediateNotify=True)

    with cond:
        print("Waiting")
        if not notified[0]:
            # Wait until connected. cond.wait() is exited once listener is called by ConnectionListener
            cond.wait()
    print("Connected!")
    table = NetworkTablesInstance.getDefault().getTable("Pi")

    imagetaken = False
    last = -1
    while True:
        time.sleep(.4)
        # Tell the CvSink to grab a frame from the camera and put it
        # in the source image.  If there is an error notify the output.
        t, img = cvsink.grabFrame(img)
        if t == 0:
            continue
        pipe.process(img)
        cx, cy = pipe.get_centeroid()
        area = pipe.get_area()
        print(cx)

        if cx != last and cx != -1:
            print(cx)
            last = cx

        table.getEntry("centerx").setDouble(cx)
        table.getEntry("centery").setDouble(cy)
        table.getEntry("area").setDouble(area)
コード例 #9
0
def main():
    print('Initializing NetworkTables...')
    NetworkTables.initialize(server=server)

    print('Creating Video Capture...')
    cap = cv2.VideoCapture(videoDevice)

    # Change Exposure (***change video device as needed***):
    os.system(
        'v4l2-ctl -d /dev/video0 -c exposure_auto=1 -c exposure_absolute=0')

    print('Creating Pipeline...')
    pipeline = GripPipeline()

    print('Running Pipeline.')
    while cap.isOpened():
        have_frame, frame = cap.read()

        if have_frame:
            pipeline.process(frame)
            publishData(pipeline)

        if enableGUI:
            # Set thickness of lines
            thickness = 2

            # Draw the normal contours (Green):
            cv2.drawContours(frame, pipeline.filter_contours_output, -1,
                             (0, 255, 0), thickness)

            # Draw the proper rectangles:
            for contour in pipeline.filter_contours_output:

                # Minimum Bounding Rectangle:
                minAreaRect_FULL = cv2.minAreaRect(contour)
                boxCorners = cv2.boxPoints(minAreaRect_FULL)

                # Numpy Conversion:
                box = numpy.array(boxCorners).reshape(
                    (-1, 1, 2)).astype(numpy.int32)

                # Draw rotated rectangle (Red):
                cv2.drawContours(frame, [box], -1, (0, 0, 255), thickness)

                # Normal Bounding Rectangle:
                x, y, w, h = cv2.boundingRect(contour)
                cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0),
                              thickness)

            # Show all data in a window:
            cv2.imshow('FRC Vision', frame)

            # Kill if 'q' is pressed:
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

    print('Capture closed.')
コード例 #10
0
class PipelineWrapper:
    def __init__(self, image):
        self.pw_image = image
        self.gp = GripPipeline()
        self.gp.process(self.pw_image)

    #Use the GripPipeline class to get contours
    def getContours(self):
        cnts = self.gp.filter_contours_output
        return cnts
コード例 #11
0
ファイル: GripRunner.py プロジェクト: rjbohinski/StormCV2017
def main():
    NetworkTable.setTeam('0000')  # TODO set your team number
    NetworkTable.initialize()
    cap = cv2.VideoCapture(0)
    pipeline = GripPipeline()
    while True:
        ret, frame = cap.read()
        if ret:
            pipeline.process(
                frame
            )  # TODO add extra parameters if the pipeline takes more than just a single image
            extra_processing(pipeline)
コード例 #12
0
ファイル: main.py プロジェクト: yuhao1118/GripExample
def main():
    # For usb/built-in camera
    camera = cv2.VideoCapture(1)
    # For ip camera (MJPEG Stream)
    # camera = cv2.VideoCapture("/url-to-your-stream/video.mjpg")
    mypipeline = GripPipeline()

    while camera.isOpened():
        have_frame, frame = camera.read()
        if have_frame is not None:
            mypipeline.process(frame)
            extra_process(mypipeline)
            sleep(0.5)

    print("Capture closed")
コード例 #13
0
def main(cameraPort, CAMERA_INSTANCE):
    print('Waiting on network tables')
    wait_for_port_open(networktableHostname, networktablePort)
    print('Network tables available')
    print('Waiting on camera')
    wait_for_port_open(cameraHostname, cameraPort)
    print('Camera available')

    print('Initializing NetworkTables')
    NetworkTables.setClientMode()
    NetworkTables.setIPAddress(networktableHostname)
    NetworkTables.initialize()
    table = NetworkTables.getTable('/vision/diagnostic_%s' % CAMERA_INSTANCE)

    print('Creating video capture')
    stream = urllib.request.urlopen('http://%s:%d/?action=stream' %
                                    (cameraHostname, cameraPort))

    print('Creating pipeline')
    pipeline = GripPipeline()

    if not stream.isclosed():
        print('opened capture')
    print('Running pipeline')

    i = 0
    previous_frame_time = time.time()

    extraBytes = bytes()

    while True:
        frame, extraBytes = getFrame(stream, extraBytes)
        frame_start = time.time()
        pipeline.process(frame)
        extra_processing(pipeline, CAMERA_INSTANCE)
        frame_end = time.time()
        frame_interval = frame_start - previous_frame_time
        frame_rate = 1 / (frame_interval)
        frame_time = frame_end - frame_start
        previous_frame_time = frame_start
        i += 1
        table.putNumber('count', i)
        table.putNumber('frameRate', frame_rate)
        table.putNumber('frameTime', frame_time)
        table.putNumber('frameInterval', frame_interval)

    print('Capture closed')
コード例 #14
0
def main():
    #Establishes a connection to NetworkTable
    print("Establishing a connection to the NetworkTable")
    NetworkTables.setClientMode()
    NetworkTables.setIPAddress("roboRIO-540-frc.local")
    NetworkTables.initialize()
    print("Connected to NetworkTable")

    #Captures frames and runs them through the GRIP Pipeline
    print("Starting vision tracking and GRIP Pipeline")
    cp = cv2.VideoCapture(0)
    pipeline = GripPipeline()
    while cp.isOpened():
        have_frame, frame = cp.read()
        if have_frame:
            pipeline.process(frame)
            extra_processing(pipeline)
コード例 #15
0
ファイル: setAccess.py プロジェクト: 2643/2017-Vision-Code
class VisionCalls:
    def __init__(self):
        self.grip = GripPipeline()

        self.ocv_vision = cv.VideoCapture()
        self.ocv_vision_on = False

        self.mjpg_process = None
        self.mjpg_on = False

    def stream(self):
        """
        Turns on streamer and closes opencv vision
        """
        if self.ocv_vision_on:
            self.ocv_vision.release()
            self.ocv_vision_on = False

        if !self.mjpg_on:
            self.mjpg_process = subprocess.Popen(
                [
                    "mjpg_streamer",
                    "-i",
                    "input_uvc.so -d /dev/video0 -f 30 -r 640x480 -y -n",
                    "-o",
                    "output_http.so -w ./www -p 1180",
                    "&"
                ])
            self.mjpg_on = True

    def vision(self):
        """
        Enables opencv and runs the process
        """
        if self.mjpg_on:
            self.mjpg_process.kill()
            time.sleep(2.5)

            self.mjpg_on = False
            self.ocv_vision_on = False

        if !self.ocv_vision_on:
            self.ocv_vision.open(0)
            self.ocv_vision_on = True

        self.grip.process(self.ocv_vision.read()[1])
コード例 #16
0
def main():
    print('Initializing NetworkTables')
    NetworkTables.setClientMode()
    NetworkTables.setIPAddress('localhost')
    NetworkTables.initialize()

    print('Creating video capture')
    cap = cv2.VideoCapture(0)

    print('Creating pipeline')
    pipeline = GripPipeline()

    print('Running pipeline')
    while cap.isOpened():
        have_frame, frame = cap.read()
        if have_frame:
            pipeline.process(frame)
            extra_processing(pipeline)

    print('Capture closed')
コード例 #17
0
def start_process(cv_sink, nt_instance):
    data_entry = nt_instance.getTable("Vision").getEntry("data")
    data_entry.setDefaultString("-1 -1")
    pipeline = GripPipeline()

    while True:
        _, frame = cv_sink.grabFrame(np.zeros((480, 640, 3), dtype=np.uint8))
        pipeline.process(frame)
        contours = pipeline.find_contours_output
        if len(contours) > 0:
            contour = get_largest_contour(contours)
            moments = cv2.moments(contour)
            if moments["m00"] != 0:
                center_x = int(moments["m10"] / moments["m00"])
                x, y, width, height = cv2.boundingRect(contour)
                data_entry.setString("{} {}".format(center_x, width))
                print("Contour:", center_x, width)
            else:
                data_entry.setString("-1 -1")
        else:
            data_entry.setString("-1 -1")
コード例 #18
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--img", required=True)
    args = parser.parse_args()
    image_name = args.img

    image2 = image.copy()
    gp = GripPipeline()
    gp.process(image2)
    contours = gp.find_contours_output
    print(len(contours))  #Number of contours found

    #Left Target
    e = 0.05 * cv2.arcLength(contours[0], True)  #6
    approx = cv2.approxPolyDP(contours[0], e,
                              True)  #Finds 4 corners of a target
    bottomLeft = findBottomLeft(approx)
    print(bottomLeft)
    upperLeft = findUpperLeft(approx)
    print(upperLeft)
    print(approx
          )  #coordinates of all corners for checking if bottomLeft is correct

    #Right Target
    e2 = 0.1 * cv2.arcLength(contours[1], True)  #4
    approx2 = cv2.approxPolyDP(contours[1], e2,
                               True)  #Finds 4 corners of other target
    bottomRight = findBottomRight(approx2)
    print(bottomRight)
    upperRight = findUpperRight(approx2)
    print(upperRight)
    print(approx2)

    cv2.drawContours(image2, approx, -1, (0, 255, 0), 2)
    cv2.drawContours(image2, approx2, -1, (0, 255, 0), 2)

    cv2.imshow("contours", image2)

    cv2.waitKey(0)
コード例 #19
0
ファイル: Main.py プロジェクト: ianwinson/RobotArmFinal
def getXY():
    cap = cv2.VideoCapture(0)
    pipeline = GripPipeline()
    have_frame, frame = cap.read()
    print(frame.shape)
    if have_frame:
        pipeline.process(frame)
        center_x_positions = []
        center_y_positions = []
        widths = []
        heights = []
        # Find the bounding boxes of the contours to get x, y, width, and height
        for contour in pipeline.filter_contours_output:
            x, y, w, h = cv2.boundingRect(contour)
            center_x_positions.append(
                x + w / 2
            )  # X and Y are coordinates of the top-left corner of the bounding box
            center_y_positions.append(y + h / 2)
            widths.append(w)
            heights.append(h)
            print(center_x_positions[0])
            print(center_y_positions[0])
            return center_x_positions[0], center_y_positions[0]
コード例 #20
0
ファイル: rungrip.py プロジェクト: n00shE/TX1Vision2018
"""

while streamRunning:
    bytes += stream.read(1024)
    a = bytes.find('\xff\xd8')
    b = bytes.find('\xff\xd9')
    if a != -1 and b != -1:
        jpg = bytes[a:b + 2]
        bytes = bytes[b + 2:]

        color = cv2.CV_LOAD_IMAGE_COLOR if version == 2 else cv2.IMREAD_COLOR  #name better
        frame = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), color)

        cv2.imshow('Image', frame)

        pipeline.process(frame)
        #print pipeline.boundingRects
        #print pipeline.center
        #print pipeline.filter_contours_output
        #print pipeline.rects
        #print pipeline.largestRect
        if (pipeline.largestRect) != None:
            """
		    #xtwo = pipeline.rects[0][0] + pipeline.rects[0][2]
	    	    #ytwo = pipeline.rects[0][1] + pipeline.rects[0][3]
	    	    #cv2.rectangle(frame, (pipeline.rects[0][0],pipeline.rects[0][1]), (xtwo,ytwo), (255,0,0), thickness=3, lineType=8, shift=0)
	    	    #cv2.imshow("Rectangle", frame)
		    """
            xtwo = pipeline.largestRect[0] + pipeline.largestRect[2]
            ytwo = pipeline.largestRect[1] + pipeline.largestRect[3]
コード例 #21
0
ファイル: capture.py プロジェクト: FRC4048/VisionTesting
#Set Camera resoultion
cam.set(3, HRES)
cam.set(4, VRES)
cnt1 = Contour()
cnt2 = Contour()
count = 0
t_end = time.time() + 10
while time.time() <= t_end:
    count = count + 1
    captureStartTime = cv2.getTickCount()
    s, im = cam.read()  # captures image
    captureEndTime = cv2.getTickCount()
    captureTime = (captureEndTime - captureStartTime) / cv2.getTickFrequency()
    cv2.imshow("Test Picture", im)  # displays captured image
    processStartTime = cv2.getTickCount()
    pipeline.process(im)
    processEndTime = cv2.getTickCount()
    processTime = (processEndTime - processStartTime) / cv2.getTickFrequency()
    find_target(cnt1, cnt2)
    distance, distanceRC = vision_math.find_distance(cnt1, cnt2, M_HFOV)
    angle, angleRC = vision_math.find_angle(cnt1, cnt2, M_HFOV)
    if printTime:
        print("Frame processing time:" + str(processTime) +
              "\nFrame Capture Time:" + str(captureTime))

    event = cv2.waitKey(25) & 0xFF
    if event == ord('c'):
        cv2.imwrite("./microsoft" + str(i) + ".jpg", im)
        print("microsoft" + str(i))
        i = i + 1
    elif event == ord('p'):
コード例 #22
0
    x3 = int((x1 + x2) / 2)

    cv2.line(image, (x1, y1), (x2, y2), (0, 255, 0))
    cv2.line(image, (x3, 0), (x3, 480), (0, 255, 0))

    return x3

#i =0

# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
    # grab the raw NumPy array representing the image, then initialize the timestamp
    # and occupied/unoccupied text
    image = frame.array
    gripped.process(image)

    #if i % 100 == 0:
    #    cv2.imwrite("image{}{}".format(i, ".jpeg"), image)
    #i = i +1

    cv2.drawContours(image, gripped.filter_contours_output, -1 , (255, 0, 0), 3)

    if(len(gripped.filter_contours_output) == 2):
        c = gripped.filter_contours_output
        draw(c[0], c[1])

    # show the frame
    cv2.imshow("Frame", image)
    key = cv2.waitKey(1) & 0xFF
コード例 #23
0
        cond.wait()
print("Connected!")
# loop forever
table = NetworkTables.getTable('VisionData')
while True:

    # grab the frame from the sink, call it img
    time, img = cvsink.grabFrame(img)

    # If there's an error or no frame, lets skip this loop iteration
    if time == 0:
        # skip the rest of this iteration (no point in processing an image that doesnt exist)
        continue

    # Process image through pipeline
    pipeline.process(img)

    # Get all center coordinates for blobs and put them in a list
    blobs = []
    for x in range(0, pipeline.find_blobs_output.__len__()):
        blobs.append(pipeline.find_blobs_output[x].pt)
    blobs.sort()

    # get the difference in X values for the 2 first leftmost blobs if they exist
    try:
        diffx1 = blobs[1][0] - blobs[0][0]
    except IndexError:
        diffx1 = False
        continue
    # get the difference in X values for the 2nd and 3rd blobs if they exist
    try:
コード例 #24
0
#!/usr/bin/env python
# Capture frame-by-frame

from grip import GripPipeline
import cv2
from pprint import PrettyPrinter

cap = cv2.VideoCapture(0)
gp = GripPipeline()
pp = PrettyPrinter()
while True:
    ret, frame = cap.read()
    gp.process(frame)
    #pp.pprint( gp.find_contours_output)
コード例 #25
0
for p in sys.argv:
	pS = str(p)
	if (pS == "-e"): #sets exposure to -100
	    os.system("v4l2-ctl -d /dev/video0 -c exposure_auto=1 -c exposure_absolute=0")
		print("Setting exposure setting to 5 . . . ")

	if (pS == "-b"):
		numOfPics = 20
		delay = "5"
		print("Now bursting photos!")
	if (pS == "-g"):
		willBeProcessed = True

currTime = str(int(time.time()))
for i in range(numOfPics):
	imgName = "img" + currTime + "_" + str(i) + ".jpg"
	os.system("fswebcam --no-banner -S " + delay + " --save ~/img/" + imgName )
	print("recharging my camera!")
	print("+================================")
	if (willBeProcessed == True):
		img = cv2.imread("/home/pi/img/" + imgName, cv2.IMREAD_COLOR)
		if (type(img) is numpy.ndarray):
			print("Processing . . . ")
			gp.process(img)
		else:
			print("ERROR: cv2 img is empty! || " + imgName)
	time.sleep(1)

print("Done!!! :3")
コード例 #26
0
ファイル: main.py プロジェクト: Team1437/FRC_2018
for key in table.getKeys():
    table.delete(key)

grip = GripPipeline()

while (True):
    # Capture frame-by-frame
    ret, frame = cap.read()
    if not ret:
        raise Exception("could not load image !")
    # Our operations on the frame come here
    #height, width, channels = frame.shape
    #print(width)
    img = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    grip.process(img)
    contour, idx = grip.publish()
    table.putNumberArray('Contour', contour)

    cv2.drawContours(frame, grip.convex_hulls_output, -1, (0, 255, 255), 3)
    cv2.drawContours(frame, grip.convex_hulls_output, idx, (255, 255, 0), 3)

    # Display the resulting frame
    cv2.imshow('frame', frame)
    #cv2.imshow('frame', grip.hsv_threshold_output)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
コード例 #27
0
ファイル: fg.py プロジェクト: CRRobotics/2019-VisionCode
import cv2
import numpy as np
from grip import GripPipeline
import argparse

cap = cv2.VideoCapture(1)
pipeline = GripPipeline()
while True:
    # Capture frame-by-frame
    ret, frame_in = cap.read()
    pipeline.process(frame_in)
    # print(frame_in)
    frame = np.zeros(frame_in.shape[:-1], frame_in.dtype)
    # print(frame)
    # cv2.drawContours(frame, pipeline.find_contours_output, -1, 255, 2)
    cv2.fillPoly(frame, pts=pipeline.find_contours_output, color=255)

    # frame = frame_in
    # Our operations on the frame come here
    circles = cv2.HoughCircles(frame, cv2.HOUGH_GRADIENT, 1.6, 100)

    # ensure at least some circles were found
    if circles is not None:
        # convert the (x, y) coordinates and radius of the circles to integers
        circles = np.round(circles[0, :]).astype("int")

        # loop over the (x, y) coordinates and radius of the circles
        for (x, y, r) in circles:
            # draw the circle in the output image, then draw a rectangle
            # corresponding to the center of the circle
            cv2.circle(frame, (x, y), r, (200, 15, 15), 4)
コード例 #28
0
    return ((old_value - old_min) / (old_max - old_min)) * (new_max - new_min) + new_min


def myround(x, base=5):
    return base * round(x/base)


# VISION LOOP
while True:
    # Capture frame-by-frame
    ret, frame = cap.read()
    # print(frame.shape) 480 x 640
    # Our operations on the frame come here

    # Grip Pipeline
    points = grip.process(frame)
    motorInput = 0
    if points:
        if points[0][0] > 320:
            temp = (maps(points[0][0] - 320, 0, 320, 0, 100))
            if temp > 10:
                motorInput = myround(temp) * 2
        else:
            temp = (maps(points[0][0], 0, 320, -100, 0))
            if temp < -10:
                motorInput = myround(temp) * 2
        print(motorInput)

        ser.write(str(70).encode() + b"\n")
        ser.write(str(motorInput).encode() + b"\n")
    else:
コード例 #29
0
class Main():

    def __init__(self):
        self.grip = GripPipeline()

        self.cap = cv2.VideoCapture(0)

        self.blank_image = np.zeros((480, 640, 3), np.uint8)

        self.contoursImg = None
        self.newVar = None

        logging.basicConfig(level=logging.DEBUG)

        NetworkTables.initialize(server='10.62.39.2')

        self.rp = NetworkTables.getTable("RaspberryPi")

        
        


    def drawRectangle(self, contour_output, contour_hierarchy, source0):

        height, width, _ = source0.shape
        min_x, min_y = width, height
        max_x = max_y = 0
        # computes the bounding box for the contour, and draws it on the frame,
        for contour, hier in zip(contour_output, contour_hierarchy):
            (x,y,w,h) = cv2.boundingRect(contour)
            min_x, max_x = min(x, min_x), max(x+w, max_x)
            min_y, max_y = min(y, min_y), max(y+h, max_y)
            center = x+(w/2)
            if w > 80 and h > 80:
                cv2.rectangle(source0, (x,y), (x+w,y+h), (255, 0, 0), 2)
                cv2.circle(source0, (int(x+(w/2)), int(y+(h/2))), 10, (0, 255, 0))

            if max_x - min_x > 0 and max_y - min_y > 0:
                cv2.rectangle(source0, (min_x, min_y), (max_x, max_y), (255, 0, 0), 2)
                cv2.circle(source0, (int(x+(w/2)), int(y+(h/2))), 10, (0, 255, 0))

        return center

    def drawRectangleBetter(self, cnt, img):
        rect = cv2.minAreaRect(img)
        box = cv2.boxPoints(rect) # cv2.boxPoints(rect) for OpenCV 3.x
        box = np.int0(box)
        cv2.drawContours(img, [box], 0, (0,0,255), 2)

    def run(self):

        while True:
            ret, frame = self.cap.read()
            contoursImg = frame.copy()
            self.grip.process(frame)

            cv2.drawContours(self.blank_image, self.grip.find_contours_output, cv2.FILLED, [0, 0, 255], 2)

            circleXCenter = self.drawRectangle(self.grip.filter_contours_output, self.grip.contour_hierarchy, contoursImg)

            #cv2.imshow("source", frame)
            #cv2.imshow("rectangle", contoursImg)
            #cv2.imshow("frame", blank_image)
            circleXCenter = (circleXCenter - 320) / 10
            self.rp.putNumber("distance", circleXCenter)
コード例 #30
0
    #Get the first camera
    cameraServer = streams[0]
    # Get a CvSink. This will capture images from the camera
    cvSink = cameraServer.getVideo()

    # (optional) Setup a CvSource. This will send images back to the Dashboard
    outputStream = cameraServer.putVideo("stream", image_width, image_height)
    # Allocating new images is very expensive, always try to preallocate
    img = np.zeros(shape=(image_height, image_width, 3), dtype=np.uint8)
    grip = GripPipeline()

    # loop forever
    while True:
        # Tell the CvSink to grab a frame from the camera and put it
        # in the source image.  If there is an error notify the output.
        timestamp, img = cvSink.grabFrame(img)
        frame = img
        #frame = flipImage(img)
        if timestamp == 0:
            # Send the output the error.
            outputStream.notifyError(cvSink.getError())
            # skip the rest of the current iteration
            continue

        grip.process(frame)
        processed = findTargets(grip.filter_contours_output, frame)
        # threshold = threshold_video(frame)
        # processed = findContours(frame, threshold)
        # (optional) send some image back to the dashboard
        outputStream.putFrame(processed)