예제 #1
0
def main():
    print('Initializing post-processor')
    post.init()

    print('Creating video capture')
    # From robot stream
    stream = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    while True:  # Loop until we successfully connected
        try:
            stream.settimeout(4.5)
            stream.connect(("roborio-3647-frc.local", 1180))
            print('Connected to video source')
            break  # Get out of retry loop, as we were successful
        except (socket.timeout, OSError):
            print('Failed to connect to video source, retrying')
            stream = socket.socket(socket.AF_INET,
                                   socket.SOCK_STREAM)  # Recreate new socket
            time.sleep(1.5)
    stream.send(struct.pack('!i', 30))  # FPS
    stream.send(struct.pack('!i', -1))  # HW_COMPRESSION
    stream.send(struct.pack('!i', 1))  # framesize

    # From webcam
    """
    cap = cv2.VideoCapture(0)
    print('Connected to video source')
    """

    print('Creating pipeline')
    pipeline = GripPipeline()

    print('Running pipeline')
    # From robot stream
    sbytes = bytearray()
    while True:
        # TODO: Insert Try Except, and restart on error
        newb = stream.recv(1024)
        sbytes += newb
        a = sbytes.find(b'\xff\xd8')
        b = sbytes.find(b'\xff\xd9')
        if a != -1 and b != -1:
            jpg = sbytes[a:b + 2]
            sbytes = sbytes[b + 2:]
            if len(jpg) == 0:
                continue
            frame = cv2.imdecode(np.fromstring(bytes(jpg), dtype=np.uint8),
                                 cv2.IMREAD_COLOR)
            pipeline.process(frame)
            post.process(frame, pipeline)

    # From webcam
    """
    while cap.isOpened():
        have_frame, frame = cap.read()
        if have_frame:
            pipeline.process(frame)
            extra_processing(frame, pipeline)
    """

    print('Capture closed')
예제 #2
0
def getXY():
    """ Code that determines the x any y values of the cubes in pixels
        No Parameters
        Returns the X coordinate and the Y coordinate
    """
    cap = cv2.VideoCapture(0)  # Connects to the Camera
    pipeline = GripPipeline()
    have_frame, frame = cap.read(
    )  # Sets the frame variable equal to the image the video puts off.
    if have_frame:
        pipeline.process(frame)  # Isolates the Contours
        center_x_positions = []
        center_y_positions = []
        widths = []
        heights = []
        # Find the bounding boxes of the contours to get x, y, width, and height
        for contour in pipeline.filter_contours_output:
            x, y, w, h = cv2.boundingRect(contour)
            center_x_positions.append(
                x + w / 2
            )  # X and Y are coordinates of the top-left corner of the bounding box
            center_y_positions.append(
                y + h / 2
            )  # Gets the center of the Box by finding the adding half of the width
            widths.append(w)
            heights.append(h)
            print(center_x_positions[0])
            print(center_y_positions[0])
            return center_x_positions[0], center_y_positions[
                0]  # Returns the  X and Y Values
예제 #3
0
def main():
    '''
    print('Initializing NetworkTables')
    NetworkTable.setIPAddress("10.52.55.98")
    NetworkTable.setClientMode()
    NetworkTable.initialize()


    print('Creating video capture')
    '''
    cap = cv2.VideoCapture(0)
    print("here")
    bytes = ''
    first = False

    print('Creating pipeline')
    pipeline = GripPipeline()

    print('Running pipeline')

    while cap.isOpened():
        have_frame, frame = cap.read()
        if have_frame:
            pipeline.process(frame)
            extra_processing(pipeline)
            print(x)
            '''
            
    plt.grid(True)
    plt.legend(loc='best')
    plt.show()
'''
    print('Capture closed')
예제 #4
0
def main():
    cs = CameraServer.getInstance()
    cs.enableLogging()

    camera = cs.startAutomaticCapture()
    camera.setResolution(HORIZONTAL_RES, VERTICAL_RES)

    NetworkTables.initialize(server='10.56.54.2')

    pipeline = GripPipeline()

    cvSink = cs.getVideo()

    outputStream = cs.putVideo('Shooter', HORIZONTAL_RES, VERTICAL_RES)

    img = np.zeros(shape=(VERTICAL_RES, HORIZONTAL_RES, 3), dtype=np.uint8)

    while True:

        time, img = cvSink.grabFrame(img)

        if time == 0:
            outputStream.notifyError(cvSink.getError())

            continue

        pipeline.process(img)

        if len(pipeline.find_contours_output) > 0:
            extra_process(
                max(pipeline.find_contours_output, key=cv2.contourArea))

        outputStream.putFrame(pipeline.find_contours_image)
def main():
    NetworkTable.setTeam(5587)  # TODO set your team number
    NetworkTable.setClientMode()
    # TODO switch to RIO IP, or IP of laptop running OutlineViewer for setup
    NetworkTable.setIPAddress('10.55.87.2')
    NetworkTable.initialize()

    # TODO find what v4l2-ctl settings you need. Pass each commandline option
    # through this array. REQUIRES v4l2-utils to be installed.
    call([
        "v4l2-ctl", "--set-ctrl=exposure_auto=1",
        "--set-ctrl=exposure_absolute=9",
        "--set-ctrl=white_balance_temperature_auto=0"
    ],
         shell=False)

    cap = cv2.VideoCapture(0)
    pipeline = GripPipeline()
    while True:
        ret, frame = cap.read()
        if ret:
            # TODO add extra parameters if the pipeline takes more than just a
            # single image
            pipeline.process(frame)
            extra_processing(pipeline)
예제 #6
0
  def __init__(self, webcam_device, webcam_name, local_ip="127.0.0.1", local_port=8087, calc="peg"):
    self.__calc = calc
    
    # start web cam and set dimensions
    self.__camera = cv2.VideoCapture(webcam_device)
    self.__camera.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    self.__camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

    # setup cv source to write frames to for mjpeg server
    self.__cvsource = cs.CvSource(webcam_name, cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 30)

    # setup mjpeg server
    self.__mjpeg_server = cs.MjpegServer("httpserver", local_port)
    self.__mjpeg_server.setSource(self.__cvsource);

    # setup grip pipeline from grip.py
    self.__grip_pipeline = GripPipeline()

    # write camera info
    table = NetworkTable.getTable("CameraPublisher")
    cam = table.getSubTable(webcam_name)

    cam.putString("source", "usb:IMAQdx/")
    cam.putStringArray("streams", ["mjpg:http://" + local_ip + ":" + str(local_port) + "/stream.mjpg?name="+webcam_name])

    if self.__debug:
      self.__webcam_name = webcam_name
      cv2.namedWindow(self.__webcam_name)
예제 #7
0
def main(system_arguments):
    # initialize the camera and grab a reference to the raw camera capture
    with PiCamera() as camera:

        camera_setup(camera)
        raw_capture = PiRGBArray(camera, size=(WINDOW_WIDTH, WINDOW_HEIGHT))

        gripped = GripPipeline()

        # allow the camera to warm up
        time.sleep(0.1)

        # capture frames from the camera
        for frame in camera.capture_continuous(raw_capture, format="bgr", use_video_port=True):
            # grab the raw NumPy array representing the image, then initialize the timestamp
            # and occupied/unoccupied text
            image = frame.array
            gripped.process(image)

            c = gripped.filter_contours_output
            draw(c, image)

            # show the frame
            cv2.imshow("Frame", image)

            # clear the stream in preparation for the next frame
            raw_capture.truncate(0)

            # if the `q` key was pressed, break from the loop
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break

    cv2.destroyAllWindows()
    return 0
def main():
    if len(sys.argv) != 2:
        print("Error: specify an IP to connect to!")
        exit(0)

    ip = sys.argv[1]

    print(ip)

    print('Initializing NetworkTables')
    NetworkTables.setClientMode()
    #NetworkTables.setIPAddress('roboRIO-6355-FRC')
    NetworkTables.setTeam(6355)
    # NetworkTables.initialize()
    NetworkTables.initialize(server=ip)

    print('Creating video capture')
    cap = cv2.VideoCapture(0)

    print('Creating pipeline')
    pipeline = GripPipeline()

    print('Running pipeline')
    while cap.isOpened():
        have_frame, frame = cap.read()
        if have_frame:
            print('have frame')
            cv2.imshow('Unprocessed', cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))
            cv2.waitKey()
            pipeline.process(frame)
            extra_processing(pipeline)

    print('Capture closed')
예제 #9
0
def main():
    """Run this file to process vision code.
    Please go to README.md to learn how to use this properly
    By Grant Perkins, 2018
    """
    # Initialize pipeline, image, camera server
    pipe = GripPipeline()
    img = np.zeros(shape=(480, 640, 3), dtype=np.uint8)

    cs = CameraServer.getInstance()
    camera = cs.startAutomaticCapture()
    camera.setResolution(640, 480)
    camera.setFPS(20)
    camera.setExposureManual(40)

    # Get a CvSink. This will capture images from the camera
    cvsink = cs.getVideo()
    # Wait on vision processing until connected to Network Tables
    cond = threading.Condition()
    notified = [False]

    def listener(connected, info):
        print(info, '; Connected=%s' % connected)
        with cond:
            notified[0] = True
            cond.notify()

    NetworkTables.initialize(server='roborio-1100-frc.local')
    NetworkTables.addConnectionListener(listener, immediateNotify=True)

    with cond:
        print("Waiting")
        if not notified[0]:
            # Wait until connected. cond.wait() is exited once listener is called by ConnectionListener
            cond.wait()
    print("Connected!")
    table = NetworkTablesInstance.getDefault().getTable("Pi")

    imagetaken = False
    last = -1
    while True:
        time.sleep(.4)
        # Tell the CvSink to grab a frame from the camera and put it
        # in the source image.  If there is an error notify the output.
        t, img = cvsink.grabFrame(img)
        if t == 0:
            continue
        pipe.process(img)
        cx, cy = pipe.get_centeroid()
        area = pipe.get_area()
        print(cx)

        if cx != last and cx != -1:
            print(cx)
            last = cx

        table.getEntry("centerx").setDouble(cx)
        table.getEntry("centery").setDouble(cy)
        table.getEntry("area").setDouble(area)
예제 #10
0
    def __init__(self):
        self.grip = GripPipeline()

        self.ocv_vision = cv.VideoCapture()
        self.ocv_vision_on = False

        self.mjpg_process = None
        self.mjpg_on = False
def main():
    print('Initializing NetworkTables...')
    NetworkTables.initialize(server=server)

    print('Creating Video Capture...')
    cap = cv2.VideoCapture(videoDevice)

    # Change Exposure (***change video device as needed***):
    os.system(
        'v4l2-ctl -d /dev/video0 -c exposure_auto=1 -c exposure_absolute=0')

    print('Creating Pipeline...')
    pipeline = GripPipeline()

    print('Running Pipeline.')
    while cap.isOpened():
        have_frame, frame = cap.read()

        if have_frame:
            pipeline.process(frame)
            publishData(pipeline)

        if enableGUI:
            # Set thickness of lines
            thickness = 2

            # Draw the normal contours (Green):
            cv2.drawContours(frame, pipeline.filter_contours_output, -1,
                             (0, 255, 0), thickness)

            # Draw the proper rectangles:
            for contour in pipeline.filter_contours_output:

                # Minimum Bounding Rectangle:
                minAreaRect_FULL = cv2.minAreaRect(contour)
                boxCorners = cv2.boxPoints(minAreaRect_FULL)

                # Numpy Conversion:
                box = numpy.array(boxCorners).reshape(
                    (-1, 1, 2)).astype(numpy.int32)

                # Draw rotated rectangle (Red):
                cv2.drawContours(frame, [box], -1, (0, 0, 255), thickness)

                # Normal Bounding Rectangle:
                x, y, w, h = cv2.boundingRect(contour)
                cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0),
                              thickness)

            # Show all data in a window:
            cv2.imshow('FRC Vision', frame)

            # Kill if 'q' is pressed:
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

    print('Capture closed.')
예제 #12
0
def main():

    global idNum
    plDone = False
    didStreamSucceed = False

    while didStreamSucceed is False:

        if plDone is False:
            try:       
                print "Creating pipeline"
                pipeline = GripPipeline.GripPipeline()
                print "Pipeline created"
                plDone = True

            except Exception as err:
                print "Error creating pipeline: " + str(err)

        if plDone is True:
            try:
                print "Opening Stream"
                stream = urllib.urlopen('http://10.42.37.2:1181/?action=stream')
                print "Stream Opened"
                didStreamSucceed = True

            except Exception as err:
                print "Error creating stream: " + str(err)

    print "Initialization Complete"
    bytes = ''

    isRunning = True

    while isRunning is True:

        try:
            bytes += stream.read(16384)
            b = bytes.rfind('\xff\xd9')
            a = bytes.rfind('\xff\xd8', 0, b-1)

            if a != -1 and b != -1:
                jpg = bytes[a:b+2]
                bytes = bytes[b+2:]
                img = cv2.imdecode(np.fromstring(jpg, dtype = np.uint8), cv2.IMREAD_COLOR)
                #print "Writing /mnt/usb/pre" + str(idNum) + ".jpg"
                #cv2.imwrite("/mnt/usb/pre" + str(idNum) + ".jpg", img)
                #print "Running"
                pipeline.process(img, idNum)
                extra_processing(pipeline)

        except KeyboardInterrupt:
            print "Ending"
            isRunning = False

        except Exception as err:
            print "Error in main loop, continuing: " + str(err)
예제 #13
0
def main():
    NetworkTable.setTeam('0000')  # TODO set your team number
    NetworkTable.initialize()
    cap = cv2.VideoCapture(0)
    pipeline = GripPipeline()
    while True:
        ret, frame = cap.read()
        if ret:
            pipeline.process(
                frame
            )  # TODO add extra parameters if the pipeline takes more than just a single image
            extra_processing(pipeline)
예제 #14
0
def main():
    global idNum
    while True:
        plDone = False
        didStreamSucceed = False
        while didStreamSucceed is False:
            if plDone is False:
                try:
                    print "Creating pipeline"
                    pipeline = GripPipeline.GripPipeline()
                    print "Pipeline created"
                    plDone = True

                except Exception as err:
                    print "Error creating pipeline: " + str(err)

            if plDone is True:
                try:
                    print "Opening Stream"
                    camTable = NetworkTables.getTable(
                        'CameraPublisher/USB Camera 0')
                    cameras = camTable.getStringArray('streams')
                    print cameras
                    return
                    #                    for item in cameras:
                    #                        print item
                    stream = cv2.VideoCapture(
                        'http://10.42.37.2:1181/stream.mjpg')
                    print "Stream Opened"
                    didStreamSucceed = True

                except Exception as err:
                    print "Error creating stream: " + str(err)

        print "Initialization Complete"

        while stream.isOpened():
            ret, img = stream.read()
            if ret is True:
                pipeline.process(img)
                if len(pipeline.filter_contours_output) >= 1 and (idNum %
                                                                  10) is 0:
                    print "Writing image to usb"
                    cv2.imwrite("/mnt/usb/" + str(idNum) + "raw.jpg", img)
                    cv2.imwrite("/mnt/usb/" + str(idNum) + "filtered.jpg",
                                pipeline.hsl_threshold_output)
                extra_processing(pipeline)
            else:
                print "Error reading from camera"
                break
예제 #15
0
def main():
    # For usb/built-in camera
    camera = cv2.VideoCapture(1)
    # For ip camera (MJPEG Stream)
    # camera = cv2.VideoCapture("/url-to-your-stream/video.mjpg")
    mypipeline = GripPipeline()

    while camera.isOpened():
        have_frame, frame = camera.read()
        if have_frame is not None:
            mypipeline.process(frame)
            extra_process(mypipeline)
            sleep(0.5)

    print("Capture closed")
    def __init__(self):
        self.grip = GripPipeline()

        self.cap = cv2.VideoCapture(0)

        self.blank_image = np.zeros((480, 640, 3), np.uint8)

        self.contoursImg = None
        self.newVar = None

        logging.basicConfig(level=logging.DEBUG)

        NetworkTables.initialize(server='10.62.39.2')

        self.rp = NetworkTables.getTable("RaspberryPi")
예제 #17
0
def main(cameraPort, CAMERA_INSTANCE):
    print('Waiting on network tables')
    wait_for_port_open(networktableHostname, networktablePort)
    print('Network tables available')
    print('Waiting on camera')
    wait_for_port_open(cameraHostname, cameraPort)
    print('Camera available')

    print('Initializing NetworkTables')
    NetworkTables.setClientMode()
    NetworkTables.setIPAddress(networktableHostname)
    NetworkTables.initialize()
    table = NetworkTables.getTable('/vision/diagnostic_%s' % CAMERA_INSTANCE)

    print('Creating video capture')
    stream = urllib.request.urlopen('http://%s:%d/?action=stream' %
                                    (cameraHostname, cameraPort))

    print('Creating pipeline')
    pipeline = GripPipeline()

    if not stream.isclosed():
        print('opened capture')
    print('Running pipeline')

    i = 0
    previous_frame_time = time.time()

    extraBytes = bytes()

    while True:
        frame, extraBytes = getFrame(stream, extraBytes)
        frame_start = time.time()
        pipeline.process(frame)
        extra_processing(pipeline, CAMERA_INSTANCE)
        frame_end = time.time()
        frame_interval = frame_start - previous_frame_time
        frame_rate = 1 / (frame_interval)
        frame_time = frame_end - frame_start
        previous_frame_time = frame_start
        i += 1
        table.putNumber('count', i)
        table.putNumber('frameRate', frame_rate)
        table.putNumber('frameTime', frame_time)
        table.putNumber('frameInterval', frame_interval)

    print('Capture closed')
예제 #18
0
def main():
    #Establishes a connection to NetworkTable
    print("Establishing a connection to the NetworkTable")
    NetworkTables.setClientMode()
    NetworkTables.setIPAddress("roboRIO-540-frc.local")
    NetworkTables.initialize()
    print("Connected to NetworkTable")

    #Captures frames and runs them through the GRIP Pipeline
    print("Starting vision tracking and GRIP Pipeline")
    cp = cv2.VideoCapture(0)
    pipeline = GripPipeline()
    while cp.isOpened():
        have_frame, frame = cp.read()
        if have_frame:
            pipeline.process(frame)
            extra_processing(pipeline)
class Vision:
    gp = GripPipeline()
    #initialize network tables comms
    sd = NetworkTables.getTable("SmartDashboard")
    angle = 0
    dist = 0

    def pImg(self):

        img = self.grab()
        # img = cv2.imread("C:\Users\KnightVision\Pictures\RPI vision testing\ledPicx\img1487521546_0.jpg", cv2.IMREAD_COLOR)  # temporary test code
        if type(img) is numpy.ndarray:  # checks if image is not empty
            print("Processing . . . ")
            cx, self.dist = self.gp.process(img)
            self.calc_angle(cx)
        else:
            print("ERROR: cv2 img is empty! || " + "testImg.jpg")
            return 2


    def calc_angle(self, u):
        print("Calculating Angle . . . \n")
        raw_angle = math.degrees(math.atan((u-176)/298.7))
        init_angle = math.degrees(math.atan(1.25/self.dist))
        self.angle = init_angle + raw_angle
        print(init_angle, raw_angle, self.angle)
        print("DONE!")

    def grab(self):
        result, im = cap.read()
        return im

    def reset_exposure(self):
        os.system("v4l2-ctl -d /dev/video0 -c exposure_auto=1 -c exposure_absolute=0")
        print("Setting exposure setting to 5 . . . ")
        time.sleep(1)
        print("Done!")

    def push_data(self):
        if self.angle > 0.1 or self.angle < -0.1:
            self.sd.putNumber("pegAngle", self.angle)
        if self.dist > 0:
            self.sd.putNumber("distance", self.dist)
예제 #20
0
def main():
    global idNum
    
    while True:
        plDone = False
        didStreamSucceed = False
        while didStreamSucceed is False:
            if plDone is False:
                try:       
                    print "Creating pipeline"
                    pipeline = GripPipeline.GripPipeline()
                    print "Pipeline created"
                    plDone = True

                except Exception as err:
                    print "Error creating pipeline: " + str(err)

            if plDone is True:
                try:
                    print "Opening Stream"
                    stream = cv2.VideoCapture('http://roborio-4237-frc.local:1181/stream.mjpg')
                    #stream = cv2.VideoCapture(1)
                    print "Stream Opened"
                    didStreamSucceed = True

                except Exception as err:
                    print "Error creating stream: " + str(err)

        print "Initialization Complete"

        while stream.isOpened():
                ret, img = stream.read()
                if ret is True:
                    pipeline.process(img)
                    if len(pipeline.filter_contours_output) >= 1 and (idNum % 10) is 0:
                        pass
                        #print "Writing image to usb"
                        #cv2.imwrite("/mnt/usb/" + str(idNum) + "raw.jpg", img)
                        #cv2.imwrite("/mnt/usb/" + str(idNum) + "filtered.jpg", pipeline.hsl_threshold_output)
                    extra_processing(pipeline)
                else:
                    print "Error reading from camera"
                    break
예제 #21
0
def main():
    print('Initializing NetworkTables')
    NetworkTables.setClientMode()
    NetworkTables.setIPAddress('localhost')
    NetworkTables.initialize()

    print('Creating video capture')
    cap = cv2.VideoCapture(0)

    print('Creating pipeline')
    pipeline = GripPipeline()

    print('Running pipeline')
    while cap.isOpened():
        have_frame, frame = cap.read()
        if have_frame:
            pipeline.process(frame)
            extra_processing(pipeline)

    print('Capture closed')
예제 #22
0
def start_process(cv_sink, nt_instance):
    data_entry = nt_instance.getTable("Vision").getEntry("data")
    data_entry.setDefaultString("-1 -1")
    pipeline = GripPipeline()

    while True:
        _, frame = cv_sink.grabFrame(np.zeros((480, 640, 3), dtype=np.uint8))
        pipeline.process(frame)
        contours = pipeline.find_contours_output
        if len(contours) > 0:
            contour = get_largest_contour(contours)
            moments = cv2.moments(contour)
            if moments["m00"] != 0:
                center_x = int(moments["m10"] / moments["m00"])
                x, y, width, height = cv2.boundingRect(contour)
                data_entry.setString("{} {}".format(center_x, width))
                print("Contour:", center_x, width)
            else:
                data_entry.setString("-1 -1")
        else:
            data_entry.setString("-1 -1")
예제 #23
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--img", required=True)
    args = parser.parse_args()
    image_name = args.img

    image2 = image.copy()
    gp = GripPipeline()
    gp.process(image2)
    contours = gp.find_contours_output
    print(len(contours))  #Number of contours found

    #Left Target
    e = 0.05 * cv2.arcLength(contours[0], True)  #6
    approx = cv2.approxPolyDP(contours[0], e,
                              True)  #Finds 4 corners of a target
    bottomLeft = findBottomLeft(approx)
    print(bottomLeft)
    upperLeft = findUpperLeft(approx)
    print(upperLeft)
    print(approx
          )  #coordinates of all corners for checking if bottomLeft is correct

    #Right Target
    e2 = 0.1 * cv2.arcLength(contours[1], True)  #4
    approx2 = cv2.approxPolyDP(contours[1], e2,
                               True)  #Finds 4 corners of other target
    bottomRight = findBottomRight(approx2)
    print(bottomRight)
    upperRight = findUpperRight(approx2)
    print(upperRight)
    print(approx2)

    cv2.drawContours(image2, approx, -1, (0, 255, 0), 2)
    cv2.drawContours(image2, approx2, -1, (0, 255, 0), 2)

    cv2.imshow("contours", image2)

    cv2.waitKey(0)
예제 #24
0
def getXY():
    cap = cv2.VideoCapture(0)
    pipeline = GripPipeline()
    have_frame, frame = cap.read()
    print(frame.shape)
    if have_frame:
        pipeline.process(frame)
        center_x_positions = []
        center_y_positions = []
        widths = []
        heights = []
        # Find the bounding boxes of the contours to get x, y, width, and height
        for contour in pipeline.filter_contours_output:
            x, y, w, h = cv2.boundingRect(contour)
            center_x_positions.append(
                x + w / 2
            )  # X and Y are coordinates of the top-left corner of the bounding box
            center_y_positions.append(y + h / 2)
            widths.append(w)
            heights.append(h)
            print(center_x_positions[0])
            print(center_y_positions[0])
            return center_x_positions[0], center_y_positions[0]
예제 #25
0
파일: surf.py 프로젝트: Team1437/FRC_2018
import cv2
from grip import GripPipeline
from networktables import NetworkTables

cap = cv2.VideoCapture(0)
NetworkTables.initialize(server='roboRIO-1437-FRC.local')
table = NetworkTables.getTable('Vision')
#Clear the table
for key in table.getKeys():
    table.delete(key)

grip = GripPipeline()

while(True):
    # Capture frame-by-frame
    ret, frame = cap.read()
    if not ret:
        raise Exception("could not load image !")
    # Our operations on the frame come here
    #height, width, channels = frame.shape
    #print(width)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    sift = cv2.xfeatures2d.SIFT_create()
    kp = sift.detect(gray, None)
    cv2.drawKeypoints(gray, kp, frame, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    # Display the resulting frame
    cv2.imshow('frame', frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
예제 #26
0
import os

from networktables import NetworkTable
from grip import GripPipeline

now = datetime.datetime.now()
print("Vision Log for " + str(now.day) + "/" + str(now.month) + "/" +
      str(now.year) + "  ~    " + str(now.hour) + ":" + str(now.minute) + ":" +
      str(now.second))
print("OpenCV version: " + str(cv2.__version__))
print("Starting Vision...")

bytes = ''
version = int(cv2.__version__[:1])
streamRunning = True
pipeline = GripPipeline()

try:
    NetworkTable.setTeam(2551)
    NetworkTable.setIPAddress("roborio-2551-frc.local")
    NetworkTable.setClientMode()
    NetworkTable.initialize()
    print("Initializing Network Tables...")
except:
    print("Network Tables already initialized")
    pass

#NetworkTable.setTeam(2551)
#NetworkTable.setIPAddress("roborio-2551-frc.local")
#NetworkTable.setClientMode()
#NetworkTable.initialize()
예제 #27
0
import cv2
import numpy as numpy
from grip import GripPipeline


def processImg(dir, gripCode):
    img = cv2.imread(dir, cv2.IMREAD_COLOR)
    img = cv2.resize(img, (320, 240))
    out = gripCode.process(img)
    scale = 2
    out = cv2.resize(out, (320 * scale, 240 * scale))
    cv2.imshow(dir, out)


dirBegin = '../resources/'

gripCode = GripPipeline()

processImg(dirBegin + 'cargoship.png', gripCode)
processImg(dirBegin + 'cargoship2.png', gripCode)
processImg(dirBegin + 'cargoshipNoise.png', gripCode)
processImg(dirBegin + 'confusingTarget.png', gripCode)
processImg(dirBegin + 'missingPart.png', gripCode)
processImg(dirBegin + 'multipleTargets.png', gripCode)
processImg(dirBegin + 'target.png', gripCode)

cv2.waitKey(0)
cv2.destroyAllWindows()
예제 #28
0
 def __init__(self,image):
     self.pw_image = image
     self.gp = GripPipeline()
     self.gp.process(self.pw_image)
예제 #29
0
def init_filter():
    global _filter
    _filter = GripPipeline()
    return do_process
예제 #30
0
 def __init__(self, cam, system, arduino):
     self.cam = cam
     self.grip = GripPipeline()
     self.line = LineAnalysis(False)
     self.system = system
     self.arduino = arduino