Пример #1
0
    def __init__(self):
        self.nt = NetworkTable.getTable('/camera')

        #Cameras
        self.piston_cam = cs.UsbCamera('Piston Cam', 0)
        self.piston_cam.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, 160, 120,
                                     35)  #160 vs. 120

        self.piston_cam.setExposureAuto()
        self.piston_cam.getProperty('backlight_compensation').set(5)

        #self.piston_cam.setExposureManual(35)
        #self.piston_cam.setBrightness(65)
        #test = self.piston_cam.getProperty("exposure_absolute")
        #print(self.piston_cam.getProperty("exposure_absolute"))

        self.piston_server = cs.MjpegServer('httpserver', 1181)
        self.piston_server.setSource(self.piston_cam)

        if self.secondary_cam:
            self.light_ring_cam = cs.UsbCamera('Light Ring Cam', 0)
            self.light_ring_cam.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG,
                                             320, 240, 20)

            # This only seems to affect automatic exposure mode
            # -> higher value means it lets more light in when facing a big light
            self.light_ring_cam.getProperty('backlight_compensation').set(5)

            #Image Processing
            self.cvsink = cs.CvSink('cvsink')
            self.cvsink.setSource(self.light_ring_cam)

            self.cvsource = cs.CvSource('cvsource',
                                        cs.VideoMode.PixelFormat.kMJPEG, 320,
                                        240, 20)

            #Streaming Servers

            #self.ring_stream = cs.MjpegServer("ring server", 1182)
            #self.ring_stream.setSource(self.light_ring_cam)

            if self.STREAM_CV:
                self.cv_stream = cs.MjpegServer('cv stream', 1183)
                self.cv_stream.setSource(self.cvsource)

            #Blank mat
            self.img = np.zeros(shape=(320, 240, 3), dtype=np.uint8)

            self.processor = ImageProcessor()
Пример #2
0
  def __init__(self, webcam_device, webcam_name, local_ip="127.0.0.1", local_port=8087, calc="peg"):
    self.__calc = calc
    
    # start web cam and set dimensions
    self.__camera = cv2.VideoCapture(webcam_device)
    self.__camera.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    self.__camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

    # setup cv source to write frames to for mjpeg server
    self.__cvsource = cs.CvSource(webcam_name, cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 30)

    # setup mjpeg server
    self.__mjpeg_server = cs.MjpegServer("httpserver", local_port)
    self.__mjpeg_server.setSource(self.__cvsource);

    # setup grip pipeline from grip.py
    self.__grip_pipeline = GripPipeline()

    # write camera info
    table = NetworkTable.getTable("CameraPublisher")
    cam = table.getSubTable(webcam_name)

    cam.putString("source", "usb:IMAQdx/")
    cam.putStringArray("streams", ["mjpg:http://" + local_ip + ":" + str(local_port) + "/stream.mjpg?name="+webcam_name])

    if self.__debug:
      self.__webcam_name = webcam_name
      cv2.namedWindow(self.__webcam_name)
Пример #3
0
def main():
    camera = cs.HttpCamera("httpcam", "http://localhost:8081/?action=stream")
    camera.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 30)

    cvsink = cs.CvSink("cvsink")
    cvsink.setSource(camera)

    cvSource = cs.CvSource("cvsource", cs.VideoMode.PixelFormat.kMJPEG, 320,
                           240, 30)
    cvMjpegServer = cs.MjpegServer("cvhttpserver", 8083)
    cvMjpegServer.setSource(cvSource)

    print("OpenCV output mjpg server listening at http://0.0.0.0:8083")

    test = np.zeros(shape=(240, 320, 3), dtype=np.uint8)
    flip = np.zeros(shape=(240, 320, 3), dtype=np.uint8)

    while True:

        time, test = cvsink.grabFrame(test)
        if time == 0:
            print("error:", cvsink.getError())
            continue

        print("got frame at time", time, test.shape)

        cv2.flip(test, flipCode=0, dst=flip)
        cvSource.putFrame(flip)
Пример #4
0
def main():
    logger.info(
        "The camera server started! you made it this far i'm proud of you")
    try:
        cam = cscore.UsbCamera("usbcam0", 0)
        cam.setVideoMode(cscore.VideoMode.PixelFormat.kMJPEG, 320, 240, 30)
        cam.setBrightness(40)
        mjpegServer = cscore.MjpegServer(name="httpserver0", port=5806)
        mjpegServer.setSource(cam)
        while True:
            time.sleep(0.1)
    except Exception as e:
        print("Camera server is dead :(", e)
Пример #5
0
    def addServer(self, *, name=None, port=None, server=None):
        """Adds a MJPEG server
        
        :param name: Server name
        :param port: Port of server (if None, use next available port)
        :param server: 
        
        :returns: server object
        :rtype: :class:`cscore.VideoSink`
        
        All arguments must be specified as keyword arguments. The following
        combinations are accepted:
        
        * name
        * name, port
        * server
        
        """

        with self._mutex:
            if server is not None:
                assert name is None and port is None

            else:
                assert name is not None

                if port is None:
                    port = self._nextPort
                    self._nextPort += 1

                server = cscore.MjpegServer(name, port)

            sname = server.getName()
            sport = server.getPort()

            logger.info("CameraServer '%s' listening on port %s", sname, sport)

            self._sinks[sname] = server
            return server
Пример #6
0
hsvGroundUpper = (255, 70, 255)
# set the configFile
configFile = "/boot/frc.json"
cameraConfigs = []

# config the camera
ballCamera = cs.UsbCamera("ballcam", "/dev/video0")
ballCamera.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, width, height, 30)

groundCamera = cs.UsbCamera("groundcam", "/dev/video1")
groundCamera.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, width, height, 30)

# set the mjpegServer
# Server initial

ballServer = cs.MjpegServer("ballSource", 1181)
ballServer.setSource(ballCamera)

groundServer = cs.MjpegServer("groundSource", 1183)
groundServer.setSource(groundCamera)

print("ball server listening at http://10.72.80.12:1181")
print("ground server listening at http://10.72.80.12:1183")

ballSink = cs.CvSink("ballSink")
ballSink.setSource(ballCamera)

groundSink = cs.CvSink("groundSink")
groundSink.setSource(groundCamera)

cvBallSource = cs.CvSource("cvballsource", cs.VideoMode.PixelFormat.kMJPEG,
Пример #7
0
ret, camframe1 = visionCam.read()

VisonCamera = 0

vision = Processing.VisionThread(debug=False)
vision.setCameraCharacteristics(Constants.camera2Matrix,Constants.camera2DistortionCoefficients)
if(ret):
	vision.run(camframe1)

logging.basicConfig(level=logging.DEBUG)
NetworkTables.initialize(server='roborio-3373-frc.local')
sd = NetworkTables.getTable("VisionData")
#cameraPre = NetworkTables.getTable("PreLoad")

cvSource = cs.CvSource("Vision Camera", cs.VideoMode.PixelFormat.kMJPEG, 480, 320, 10)
cvMjpegServer = cs.MjpegServer("httpCamera1", port=5801)
cvMjpegServer.setSource(cvSource)

csTable = NetworkTables.getDefault()
csTable.getEntry("/CameraPublisher/PiCam/streams").setStringArray(["mjpeg:http://10.33.73.20:5801/?action=stream"])
#cvSource2 = cs.CvSource("Camera2", cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 15)
#cvMjpegServer2 = cs.MjpegServer("httpCamera2", port=5802)
#cvMjpegServer2.setSource(cvSource2)

cameras=[{"camera":visionCam,"stream":cvSource,"id":VisonCamera,"timeout":0}]

count = 0
#NetworkTables.initialize()

def switchCamera(camobject,deviceNum):
	#subprocess.call(shlex.split("v4l2-ctl -d "+str(deviceNum)+" -c exposure_auto=3"))
import cscore as cs
import time

gear_cam = cs.UsbCamera("gear","/dev/gear_camera")
shooter_cam = cs.UsbCamera("shooter","/dev/shooter_camera")

gear_cam.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 15)
shooter_cam.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 15)

gear_server = cs.MjpegServer("gear_http_server", 5800)
shooter_server = cs.MjpegServer("shooter_http_server", 5801)

gear_server.setSource(gear_cam)
shooter_server.setSource(shooter_cam)

print("Server started on ports 5800 and 5801")
while True:
	time.sleep(0.1)
Пример #9
0
# Connect NetworkTables
# Note:  actual IP address should be robot IP address
NetworkTables.initialize(server="192.168.0.113")

width = 320
height = 240
frames_per_sec = 15

# Open connection to USB Camera (video device 0 [/dev/video0]) */
camera = cs.UsbCamera("usbcam", 0)
camera.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, width, height,
                    frames_per_sec)

# Start raw Video Streaming Server
mjpegServer = cs.MjpegServer("httpserver", 8081)
mjpegServer.setSource(camera)

cvsink = cs.CvSink("cvsink")
cvsink.setSource(camera)

# Start processed Video server
cvsource = cs.CvSource("cvsource", cs.VideoMode.PixelFormat.kMJPEG, width,
                       height, frames_per_sec)

cvMjpegServer = cs.MjpegServer("cvhttpserver", 8082)
cvMjpegServer.setSource(cvsource)

# Create Video Writer, if enabled
if outputVideoFilePath is not None:
    frameSize = (width, height)
Пример #10
0
hsvGroundUpper = (255, 70, 255)
# set the configFile
configFile = "/boot/frc.json"
cameraConfigs = []

# config the camera
ballCamera = cs.UsbCamera("ballcam", "/dev/video0")
ballCamera.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, width, height, 30)

groundCamera = cs.UsbCamera("groundcam", "/dev/video1")
groundCamera.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, width, height, 30)

# set the mjpegServer
# Server initial

ballServer = cs.MjpegServer("ballSource", 1181)
ballServer.setSource(ballCamera)

groundServer = cs.MjpegServer("groundSource", 8181)
groundServer.setSource(groundCamera)

print("ball server listening at http://0.0.0.0:1181")
print("ground server listening at http://0.0.0.0:8181")

ballSink = cs.CvSink("ballSink")
ballSink.setSource(ballCamera)

groundSink = cs.CvSink("groundSink")
groundSink.setSource(groundCamera)

# cvBallSource = cs.CvSource("cvballsource", cs.VideoMode.PixelFormat.kMJPEG, width, height, 30)
Пример #11
0
 def __init__(self, resolution = (1280, 720), framerate = 30, port = 5800):
     self.cvSource = cs.CvSource("cvsource", cs.VideoMode.PixelFormat.kMJPEG,
                                 resolution[0], resolution[1], framerate)
     mjpegServer = cs.MjpegServer("cvhttpserver", port)
     mjpegServer.setSource(self.cvSource)
Пример #12
0
                    VisionConfig.usb_resolution[0],
                    VisionConfig.usb_resolution[1], VisionConfig.usb_framerate)

# create a cv sink, which will grab images from the camera
cvsink = cscore.CvSink("cvsink")
cvsink.setSource(picam)

# create Pipeline Object
pipeline = GripPipeline()

# preallocate memory for images so that we don't allocate it every loop
img = np.zeros(shape=(240, 320, 3), dtype=np.uint8)

# set up mjpeg server, the ip for this is 0.0.0.0:1180 and 0.0.0.0:1181
# Comment this out before competition, or change ports to allowed port numbers
mjpegServer1 = cscore.MjpegServer("httpserver", 1180)
mjpegServer1.setSource(picam)
mjpegServer2 = cscore.MjpegServer("httpserver", 1181)
mjpegServer2.setSource(usbcam)

# initialize the networktable and wait for connection
cond = threading.Condition()
notified = [False]


def connectionlistener(connected, info):
    print(info, '; Connected=%s' % connected)
    with cond:
        notified[0] = True
        cond.notify()
Пример #13
0
try:
    import cscore
    camera = cscore.UsbCamera("usbcam", 0)
    camera.setVideoMode(cscore.VideoMode.PixelFormat.kMJPEG, 320, 240, 30)

    mjpegServer = cscore.MjpegServer("httpserver", 8081)
    mjpegServer.setSource(camera)
except BaseException as e:
    print("cscore error")
    print(e)
def mainloop():

    #Get currnt time as a string
    currentTime = time.localtime(time.time())
    timeString = str(currentTime.tm_year) + str(currentTime.tm_mon) + str(currentTime.tm_mday) + str(currentTime.tm_hour) + str(currentTime.tm_min)
    
    #Open a log file
    logFilename = '/data/Logs/Run_Log_' + timeString + '.txt'
    log_file = open(logFilename, 'w')
    log_file.write('Run started on %s.\n' % datetime.datetime.now())
    log_file.write('')

    #Load VMX module
    vmxpi = imp.load_source('vmxpi_hal_python', '/usr/local/lib/vmxpi/vmxpi_hal_python.py')
    vmx = vmxpi.VMXPi(False,50)
    if vmx.IsOpen() is False:
        log_file.write('Error:  Unable to open VMX Client.\n')
        log_file.write('\n')
        log_file.write('        - Is pigpio (or the system resources it requires) in use by another process?\n')
        log_file.write('        - Does this application have root privileges?')
        log_file.close()
        sys.exit(0)
        
    #Open connection to USB camera (video device 0)
    camera = cs.UsbCamera("usbcam", 0)
    camera.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, imgwidth, imgheight, frames_per_sec)

    #Start raw Video Streaming Server
    mjpegServer = cs.MjpegServer("httpserver", 8081)
    mjpegServer.setSource(camera)

    #Define video sink
    cvsink = cs.CvSink("cvsink")
    cvsink.setSource(camera)

    #Define video source
    cvsource = cs.CvSource("cvsource", cs.VideoMode.PixelFormat.kMJPEG, imgwidth, imgheight, frames_per_sec)

    #Start processed Video Streaming Server
    cvMjpegServer = cs.MjpegServer("cvhttpserver", 8082)
    cvMjpegServer.setSource(cvsource)

    #Create blank image
    img = np.zeros(shape=(imgheight, imgwidth, 3), dtype=np.uint8)

    #Set video codec and create VideoWriter
    fourcc = cv.VideoWriter_fourcc(*'XVID')
    videoFilename = '/data/Match Videos/robotcam-' + timeString + '.avi'
    imgout = cv.VideoWriter(videoFilename,fourcc,20.0,(320,240))

    #Connect NetworkTables
    NetworkTables.initialize(server="10.41.21.2")
    navxTable = NetworkTables.getTable("navx")
    visionTable = NetworkTables.getTable("vision")
    smartDash = NetworkTables.getTable("SmartDashboard")

    
    #Reset yaw gyro
    if not vmx.getAHRS().IsCalibrating:
        vmx.getAHRS().ZeroYaw()

    #Reset displacement
    vmx.getAHRS().ResetDisplacement()

    #Start main processing loop
    while (True):

        #Grab a frame from video feed
        video_timestamp, img = cvsink.grabFrame(img)

        #Check for frame error
        if video_timestamp == 0:
            log_file.write('Video error: \n')
            log_file.write(cvsink.getError())
            log_file.write('\n')
            sleep (float(frames_per_sec * 2) / 1000.0)
            continue

        #Find contours in image
        img_contours, cubeDistance, cubeAngle, cubeHeight = detect_contours(img)

        #Put contour info on image
        if cubeDistance != -1:
            distanceString = 'Distance: %d' % cubeDistance
            cv.putText(img_contours, distanceString, (10,15), cv.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1,cv.LINE_AA)
            angleString = 'Offset angle: %d' % cubeAngle
            cv.putText(img_contours, angleString, (10,30), cv.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1,cv.LINE_AA)

        #Put IMU info on image
        angleString = 'Angle: %.2f' % vmx.getAHRS().GetAngle()
        cv.putText (img, angleString, (30,70), cv.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1,cv.LINE_AA)
        yVelocityString = 'Y Vel:  %.2f' % vmx.getAHRS().GetVelocityY()
        cv.putText (img, yVelocityString, (30,90), cv.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1,cv.LINE_AA)
        xVelocityString = 'X Vel: %.2f' % vmx.getAHRS().GetVelocityX()
        cv.putText (img, xVelocityString, (30,110), cv.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1,cv.LINE_AA)
        yDispString = 'Y Disp: %.2f' % vmx.getAHRS().GetDisplacementY()
        cv.putText (img, yDispString, (30,130), cv.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1,cv.LINE_AA)
        xDispString = 'X Disp: %.2f' % vmx.getAHRS().GetDisplacementX()
        cv.putText (img, xDispString, (30,150), cv.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1,cv.LINE_AA)

	#Update network tables
        navxTable.putNumber("SensorTimestamp", vmx.getAHRS().GetLastSensorTimestamp())
        navxTable.putNumber("DriveAngle", vmx.getAHRS().GetAngle())
        navxTable.putNumber("YVelocity", vmx.getAHRS().GetVelocityY())
        navxTable.putNumber("XVelocity", vmx.getAHRS().GetVelocityX())
        navxTable.putNumber("YDisplacement", vmx.getAHRS().GetDisplacementY())
        navxTable.putNumber("XDisplacement", vmx.getAHRS().GetDisplacementX())
        visionTable.putNumber("CubeAngle", cubeAngle)
        visionTable.putNumber("CubeDistance", cubeDistance)
        smartDash.putNumber("DriveAngle", vmx.getAHRS().GetAngle())
        smartDash.putNumber("CubeAngle", cubeAngle)
        smartDash.putNumber("CubeDistance", cubeDistance)
        
        #Show image
        cv.imshow('My Webcam', img_contours)

        #Put frame in video stream
        cvsource.putFrame(img_contours)

        #Write image to file
        if writeVideo:
            imgout.write(img_contours)

        #Check for stop code from robot
        if cv.waitKey(1) == 27:
            break
        #robotStop = visionTable.getNumber("RobotStop", 0)
        #if robotStop == 1:
        #    break

    #Close all open windows
    cv.destroyAllWindows()

    #Close video file
    imgout.release()

    #Close the log file
    log_file.write('Run stopped on %s.' % datetime.datetime.now())
Пример #15
0
        
   
            

    
threads = Threads()
thread1 = Thread(target=threads.thread1)

thread1.start()
logging.basicConfig(level=logging.DEBUG)
NetworkTables.initialize(server='roborio-3373-frc.local')
sd = NetworkTables.getTable("VisionData")
#cameraPre = NetworkTables.getTable("PreLoad")

cvSource = cs.CvSource("Camera1", cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 15)
cvMjpegServer = cs.MjpegServer("httpCamera1", port=5801)
cvMjpegServer.setSource(cvSource)

cs = NetworkTables.getDefault()
cs.getEntry("/CameraPublisher/PiCam/streams").setStringArray(["mjpeg:http://10.33.73.18:5801/?action=stream"])
#cvSource2 = cs.CvSource("Camera2", cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 15)
#cvMjpegServer2 = cs.MjpegServer("httpCamera2", port=5802)
#cvMjpegServer2.setSource(cvSource2)

test = np.zeros(shape=(240, 640, 3), dtype=np.uint8)
count = 0
#NetworkTables.initialize()
CAMFOCALLENGTH = CAM1FOCALLENGTH
print("running")
if __name__ == '__main__':
    try:
Пример #16
0
import cscore as cs

from cvsink_thread import CvSinkThread, crop
from detect_target import detect_target

front_cam = cs.UsbCamera("front_cam", 0)
front_cam.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, 480, 270, 30)
front_cam.setBrightness(40)
front_cam.setExposureManual(2)

front_cam_sink = cs.CvSink("front_cam_sink")
front_cam_sink.setSource(front_cam)

mjpeg_source = cs.CvSource("mjpeg_source", cs.VideoMode.PixelFormat.kMJPEG,
                           320, 240, 8)
mjpeg_server = cs.MjpegServer("mjpeg_server", 8083)
mjpeg_server.setSource(mjpeg_source)
mjpeg_server.setDefaultCompression(50)
mjpeg_server.setCompression(50)

thread = CvSinkThread(front_cam_sink, (270, 480, 3))


def main_loop():
    # grab frame
    frame = thread.get_frame()

    detect_target(frame)

    cv2.line(frame, (frame.shape[1] // 2, 0),
             (frame.shape[1] // 2, frame.shape[0]), (0, 180, 180), 5)
Пример #17
0
def __get_ports__():
	ports = []
	for i in range(0, 7):
		threading.Thread(target=findPortRange, args=(ports,15*i,15*(i+1)).start()
	return ports
def __get_cap__(id,index):
  _camera_ = camera_server.startAutomaticCapture(dev=id, return_server=False)
  try:
    with open(__config_path__ + '/cameras/' + str(index) + '.json') as json_file:
      _camera_.setConfigJson("".join(i for i in json_file.readlines()))
  except:
    print("no json file for camera number " + str(id))
  _camera_.setResolution(width, height)
  _camera_.setFPS(fps)
  _camera_.setPixelFormat(cscore.VideoMode.PixelFormat.kYUYV)
  return _camera_
black_img = numpy.zeros(shape=(320, 240, 3), dtype=numpy.uint8)  
def get_video(index, image):
  obj = camera_server.getVideo(camera=cameras[0]).grabFrame(image)
  if obj[0] == 0:
    return black_img
  return obj[1]
 
def process_Init():
    global prev,nex,conf,pt,done

    s_time = time.time()
    counter = 1
    source = cscore.CvSource("conf_stream",cscore.VideoMode(cscore.VideoMode.PixelFormat(cscore.VideoMode.PixelFormat.kMJPEG),width,height,fps))
    server = cscore.MjpegServer(name="conf_stream",port=1191)
    server.setSource(source)
    with open("/root/.hawk/pipelines/grip_i.py","r") as grip_file:
          exec("".join(i for i in grip_file.readlines()))
          grip_pipe = eval('GripPipeline()')
    while not done:
        try:
          #update vars
          try:
            data = s.recvfrom(1024)[0] 
            encoding = 'utf-8'
            data = data.decode(encoding)
            changed_vals(data)
          except Exception as e:
            print("no data recieved",end='\r')
          finally:
            if not conf:
              conf = False
            #adjust prev/next
              if prev:
                prev = False
                counter -=1
                if counter <= 0:
                  counter = 1
              if nex:
                nex = False
                counter += 1
                if counter > 4:
                  counter = 4
              #TODO: add pt add 
              if prev or nex:
                  with open("/root/.hawk/pipelines/grip_" + (counter * 'i') + '.py') as grip_file:
                    exec("".join(i for i in grip_file.readlines()))
                    grip_pipe = eval('GripPipeline()')
                  os.system("reboot")
            ##get image from original 
            pic = cap1.read()[1]
            cv2.cvtColor(pic,84)
            ##process and get rgb threshold
            grip_pipe.process(pic)
            pic = grip_pipe.rgb_threshold_output
            ##publish to a new stream
            source.putFrame(pic)
            if prev or nex or conf or pt or done:
              print(str(prev) + ' ' + str(nex) + ' ' + str(conf) + ' ' + str(pt) + ' ' + str(done))
        except Exception as e:
          print(e)
def changed_vals(val):
  print('\n' + val)
  if not val: return
  print(val)
  global prev,nex,conf,pt,done
  if val == "PREV":
    prev = True
    return
  if val == "NEX":
    nex = True
    return
  if val == "SET_CONF":
    conf = True
    return
  if val == "ADD_PT":
    pt = True
    return
  if val == "DONE":
    Done = True
    return
def checkProcessInit():
    try:
      s.settimeout(0.0001)
      data = s.recvfrom(1024)[0] 
      encoding = 'utf-8'
      data = data.decode(encoding)
      if data == "START":
        process_Init()
    except: 
      pass

    
def error(e, pipeline):
  print(e)
  pipeline.putBoolean("valid", False)
#endregion

#region init
__pipe_name__ = "vision"
team_number = 0
width = 320
height = 240
fps = 75
cameras = []
prev,nex,conf,pt,done = False,False,False,False,False
os.system("rm -f ~/output.txt") # delete!
#paths
__config_path__ = os.path.expanduser('~') + '/.hawk'
# PSEYE camera settings
os.system("v4l2-ctl -c exposure=6")
os.system("v4l2-ctl --set-fmt-video=width=160,height=120,pixelformat=BGR")
#networking
__instance__ = NetworkTablesInstance.getDefault()
__instance__.startClientTeam(team_number)
__pipeline__ = NetworkTables.getTable(__pipe_name__)
NetworkTables.initialize()
camera_server = CameraServer.getInstance()
''' uncomment if you use the HawkEYE client
HOST = socket.gethostname()
TCP_IP = socket.gethostbyname(HOST)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((TCP_IP, 5000))
(Thread(checkProcessInit())).start()
'''
#endregion
    
def main():
    try: #load JSON settings file
        with open(__config_path__ + '/config.json', 'r') as file:
            config = json.loads(file.read())
            team_number = config['team_number']
            width = config['width']
            height = config['height']
            fps = config['fps']
            __pipe_name__ = config['pipe_name']
    except Exception as e:
        print("Error loading config")
        print(e)
    
    for index, id in enumerate(__get_ports__()): #load Camera, can take a while!
        cameras.append(__get_cap__(id, index,camera_server))
    cameras[0].setPixelFormat(cscore.VideoMode.PixelFormat.kYUYV)

    __index__ = int(__pipeline__.getNumber('cap_number', 1))
    #black picture array
    blackIMG = numpy.zeros(shape=(width, height, 3), dtype=numpy.uint8)
    __index__ = int(__pipeline__.getNumber('cap_number', 1)) #when connected to a robot, change 1 to -1
    #load GRIP
    gripScript = mlgrip.GripPipeline()
    while True:
        t = 0
        try:
            __image__ = blackIMG
            t = time.process_time()    
            if __index__ != -1:   
                    __image__ = get_video(__index__, __image__)
                    gripScript.process(__image__)
            processor(__image__, gripScript.filter_contours_output or [],__pipeline__)
            
        except Exception as e:
                    exc_type,exc_obj,exc_tb = sys.exc_info()
                    fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
                    print(exc_type,fname,exc_tb.tb_lineno)
                    error(e, __pipeline__)
        finally:
            print((time.process_time() - t))
                    


    
if __name__ == "__main__":
  main()
Пример #18
0
import cscore as cs
import numpy as np

output_source = cs.CvSource('camera-feed', cs.VideoMode.PixelFormat.kMJPEG, 640, 480, 30)
mjpeg_server = cs.MjpegServer('httpserver', 8081)
mjpeg_server.setSource(output_source)
print('mjpeg server listening on http://0.0.0.0:8081')

def send(feed_img):
    global output_source
    output_source.putFrame(feed_img)
Пример #19
0
def main():
    NetworkTables.initialize(server=k_NETWORKTABLES_SERVER_IP)

    vs.camera.brightness = 50
    vs.camera.contrast = 0
    vs.camera.saturation = 0

    cs = CameraServer.getInstance()
    ps = CameraServer.getInstance()
    cs.enableLogging()

    # Capture from the first USB Camera on the system
    #camera0 = cs.startAutomaticCapture()
    #camera1 = cs.startAutomaticCapture()
    #camera = cs.startAutomaticCapture(name="picam",path="/dev/v4l/by-path/platform-bcm2835-codec-video-index0")
    #camera = cs.startAutomaticCapture(name="usbcam",path="/dev/v4l/by-path/platform-fd500000.pcie-pci-0000:01:00.0-usb-0:1.2:1.0-video-index0")
    #camera0.setResolution(k_CONVEYOR_CAM_REZ_WIDTH, k_CONVEYOR_CAM_REZ_HEIGHT)
    camerausb = cscore.UsbCamera(
        name="usbcam",
        path=
        "/dev/v4l/by-path/platform-fd500000.pcie-pci-0000:01:00.0-usb-0:1.2:1.0-video-index0"
    )
    camerapi = cscore.CvSource("cvsource", cscore.VideoMode.PixelFormat.kMJPEG,
                               k_BALL_CAM_REZ_WIDTH, k_BALL_CAM_REZ_HEIGHT,
                               k_BALL_CAM_FPS)

    # setup MjpegServers
    usbMjpegServer = cscore.MjpegServer("ConveyorCam", 8081)
    piMjpegServer = cscore.MjpegServer("BallCam", 8082)

    # connect MjpegServers to their cameras
    usbMjpegServer.setSource(camerausb)
    piMjpegServer.setSource(camerapi)

    # Get a CvSink. This will capture images from the camera
    #cvSink = cs.getVideo()

    # (optional) Setup a CvSource. This will send images back to the Dashboard
    #outputStream = cs.putVideo("Name", k_BALL_CAM_REZ_WIDTH, k_BALL_CAM_REZ_HEIGHT)

    # Allocating new images is very expensive, always try to preallocate
    img = np.zeros(shape=(k_BALL_CAM_REZ_WIDTH, k_BALL_CAM_REZ_HEIGHT, 3),
                   dtype=np.uint8)

    while True:
        frame = vs.read()
        camerapi.putFrame(frame)
        # Tell the CvSink to grab a frame from the camera and put it
        # in the source image.  If there is an error notify the output.
        #time, img = cvSink.grabFrame(img)
        #if time == 0:
        #    # Send the output the error.
        #    outputStream.notifyError(cvSink.getError());
        #    # skip the rest of the current iteration
        #    continue

        #
        # Insert image processing logic here
        #

        # (optional) send some image back to the dashboard
        #outputStream.putFrame(frame)
        #outputStream.putFrame(frame)
        print("putting Frame")
Пример #20
0
    camera = cs.UsbCamera("usbcam", 0)
    camera.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 30)
else:
    import cv2
    import threading

    camera = cs.CvSource("cvsource", cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 30)

    # tell OpenCV to capture video for us
    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)

    # Do it in another thread
    def _thread():
        img = None
        while True:
            retval, img = cap.read(img)
            if retval:
                camera.putFrame(img)

    th = threading.Thread(target=_thread, daemon=True)
    th.start()


mjpegServer = cs.MjpegServer("httpserver", 8081)
mjpegServer.setSource(camera)

print("mjpg server listening at http://0.0.0.0:8081")
input("Press enter to exit...")
Пример #21
0
front_linecam = cs.UsbCamera("front_linecam", FRONT_LINECAM)
front_linecam.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 30)
front_linecam_sink = cs.CvSink("front_linecam_sink")
front_linecam_sink.setSource(front_linecam)

rear_cam = cs.UsbCamera("rear_cam", REAR_CAM)
rear_cam.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 30)
rear_cam_sink = cs.CvSink("rear_cam_sink")
rear_cam_sink.setSource(rear_cam)

table = NetworkTables.getTable("VisionTable")

mjpeg_source = cs.CvSource("mjpeg_source", cs.VideoMode.PixelFormat.kMJPEG,
                           320, 240, 6)
mjpeg_server = cs.MjpegServer("mjpeg_server", 5801)
mjpeg_server.setSource(mjpeg_source)
mjpeg_server.setDefaultCompression(30)
mjpeg_server.setCompression(30)

thread = CvSinkThread(front_cam_sink, (240, 320, 3))

current_camera = 0  # 0 = front, 1 = front line, 2 = rear
vision_enabled = False

pin_time = 0


def main_loop():
    global current_camera, vision_enabled, frame, pin_time