示例#1
0
import cv2 as cv
import numpy as np
import grip
from GRIPget import WebcamVideoStream
import time
vid1 = WebcamVideoStream(src=0).start()
another = grip.GripPipeline()
counter = 0
start = time.time()
try:
    while True:
        img1 = vid1.read()
        another.process(img1)
        counter += 1
        k = cv.waitKey(5) & 0xFF
        if k == 27:
            break
except KeyboardInterrupt:
    print(counter / (time.time() - start))
    vid1.release()
示例#2
0
# import the necessary packages
print("beginning execution")
import grip
import time
import cv2 as cv
import pyrealsense2 as rs
import numpy as np
import altusi.visualizer as vis
import robot
import ikpy
print("libraries imported")

gp = grip.GripPipeline()
print("grip instantiated")

SEARCH_LR = 0
ALIGN_UD = 1
IK = 2
GRAB = 3
HOME = 4

stage = 0
tracking = 0

my_chain = ikpy.chain.Chain.from_urdf_file("./niryo_one.urdf")

#object_detector = ObjectDetector()

home_pin = 13  #GPIO pin connected to set home switch
delay = 1.2  # seconds
angle = 2  #degrees
示例#3
0
文件: vision.py 项目: frc2503/r2019
        ntinst.startServer()
    else:
        print("Setting up NetworkTables client for team {}".format(team))
        ntinst.startClientTeam(team)

    # start cameras
    cameras = []
    for cameraConfig in cameraConfigs:
        cameras.append(startCamera(cameraConfig))

    tapeSink = CameraServer.getInstance().getVideo(name="Tape Camera")

    img = np.zeros(shape=(320, 240, 3), dtype=np.uint8)

    # GRIP pipeline
    pipe = grip.GripPipeline()

    cameraWidth = 320 / 2

    # NetworkTable
    table = ntinst.getTable("TapeFinder")

    # Run forever
    while True:
        time, img = tapeSink.grabFrame(img)

        if time == 0:
            continue

        # Process
        pipe.process(img)
示例#4
0
from picamera import PiCamera
from picamera.array import PiRGBArray
from networktables import NetworkTables
from time import clock

import grip

NetworkTables.initialize(server='roborio-167-frc.local')
table = NetworkTables.getTable('myContoursReport')

camera = PiCamera()
camera.resolution = (416, 320)
camera.framerate = 24
camera.exposure_compensation = 0
rawCapture = PiRGBArray(camera, size=(416, 320))
processor = grip.GripPipeline()
for frame in camera.capture_continuous(rawCapture,
                                       format='bgr',
                                       use_video_port=True):
    contours = processor.process(frame.array)
    datax, datay, dataw, datah = [], [], [], []
    for contour in contours:
        data = cv2.boundingRect(contour)
        datax.append(data[0])
        datay.append(data[1])
        dataw.append(data[2])
        datah.append(data[3])
    table.putNumberArray('x', datax)
    table.putNumberArray('y', datay)
    table.putNumberArray('w', dataw)
    table.putNumberArray('h', datah)
示例#5
0
import cv2
import numpy as np
import grip

vc = cv2.VideoCapture(1)
imagePipeline = grip.GripPipeline()

if vc.isOpened():  # try to get the first frame
    rval, frame = vc.read()
else:
    rval = False

#prosses image
while rval:

    cv2.imshow("image", frame)
    cv2.imshow("hull", imagePipeline.process(frame))

    #Get next image
    rval, frame = vc.read()

    #break on ESC
    key = cv2.waitKey(20)
    if key == 27:
        break

cv2.destroyWindow("image")
cv2.destroyWindow("color")
cv2.destroyWindow("hull")
示例#6
0
文件: pp.py 项目: Jzw-WG/p
import cv2
import numpy as np
from matplotlib import pyplot as plt
import grip as gp

img = cv2.imread('0.jpg', 0)
# f = np.fft.fft2(img)
# ff = np.fft.fftshift(f)
# res = (20*np.log(np.abs(ff))).astype(np.uint8)
a = gp.GripPipeline()
a.process(img)
a.cv_threshold_output
cv2.imshow('1', a.cv_laplacian_output)
cv2.waitKey()
# plt.subplot(121), plt.imshow(img, 'gray'), plt.title('Original Image')
# plt.axis('off')
# plt.subplot(122), plt.imshow(res, 'gray'), plt.title('Fourier Image')
# plt.axis('off')
# plt.show()