Пример #1
0
def main():
    camera = cs.HttpCamera("httpcam", "http://localhost:8081/?action=stream")
    camera.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 30)

    cvsink = cs.CvSink("cvsink")
    cvsink.setSource(camera)

    cvSource = cs.CvSource("cvsource", cs.VideoMode.PixelFormat.kMJPEG, 320,
                           240, 30)
    cvMjpegServer = cs.MjpegServer("cvhttpserver", 8083)
    cvMjpegServer.setSource(cvSource)

    print("OpenCV output mjpg server listening at http://0.0.0.0:8083")

    test = np.zeros(shape=(240, 320, 3), dtype=np.uint8)
    flip = np.zeros(shape=(240, 320, 3), dtype=np.uint8)

    while True:

        time, test = cvsink.grabFrame(test)
        if time == 0:
            print("error:", cvsink.getError())
            continue

        print("got frame at time", time, test.shape)

        cv2.flip(test, flipCode=0, dst=flip)
        cvSource.putFrame(flip)
Пример #2
0
  def __init__(self, webcam_device, webcam_name, local_ip="127.0.0.1", local_port=8087, calc="peg"):
    self.__calc = calc
    
    # start web cam and set dimensions
    self.__camera = cv2.VideoCapture(webcam_device)
    self.__camera.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    self.__camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

    # setup cv source to write frames to for mjpeg server
    self.__cvsource = cs.CvSource(webcam_name, cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 30)

    # setup mjpeg server
    self.__mjpeg_server = cs.MjpegServer("httpserver", local_port)
    self.__mjpeg_server.setSource(self.__cvsource);

    # setup grip pipeline from grip.py
    self.__grip_pipeline = GripPipeline()

    # write camera info
    table = NetworkTable.getTable("CameraPublisher")
    cam = table.getSubTable(webcam_name)

    cam.putString("source", "usb:IMAQdx/")
    cam.putStringArray("streams", ["mjpg:http://" + local_ip + ":" + str(local_port) + "/stream.mjpg?name="+webcam_name])

    if self.__debug:
      self.__webcam_name = webcam_name
      cv2.namedWindow(self.__webcam_name)
Пример #3
0
def loop():
    import cscore as cs
    from networktables import NetworkTables
    from time import sleep

    nt = NetworkTables.getTable("/components/vision")

    width = 320
    height = 240
    fps = 25
    videomode = cs.VideoMode.PixelFormat.kMJPEG, width, height, fps

    front_camera = cs.UsbCamera("frontcam", 0)
    front_camera.setVideoMode(*videomode)

    front_cam_server = cs.MjpegServer("frontcamserver", 8082)
    front_cam_server.setSource(front_camera)

    back_camera = cs.UsbCamera("backcam", 1)
    back_camera.setVideoMode(*videomode)

    back_cam_server = cs.MjpegServer("backcamserver", 8081)
    back_cam_server.setSource(back_camera)

    cvsink = cs.CvSink("cvsink")
    cvsink.setSource(front_camera)

    cvSource = cs.CvSource("cvsource", *videomode)
    cvmjpegServer = cs.MjpegServer("cvhttpserver", 8083)
    cvmjpegServer.setSource(cvSource)

    # Set the initial exposure of the front camera.
    # It may take a while for the exposure setting to have an effect.
    front_camera.setExposureManual(0)

    # Images are big. Preallocate an array to fill the image with.
    frame = np.zeros(shape=(height, width, 3), dtype=np.uint8)

    while True:
        if nt.getBoolean("vision_mode", True):
            front_camera.setExposureManual(0)
            time, frame = cvsink.grabFrame(frame)
            if time == 0:
                # We got an error; report it through the output stream.
                cvSource.notifyError(cvsink.getError())
            else:
                x, img, num_targets = find_target(frame)
                if num_targets > 0:
                    nt.putNumber("x", x)
                    nt.putNumber("time", time)
                    nt.putNumber("num_targets", num_targets)
                cvSource.putFrame(img)
        else:
            # Let the driver use the front camera to see.
            front_camera.setExposureAuto()
            # Avoid pegging the CPU.
            sleep(1)
Пример #4
0
def loop():  # pragma: no cover
    import cscore as cs
    from networktables import NetworkTables
    from time import sleep, time as now

    nt = NetworkTables.getTable("/components/vision")

    width = 320
    height = 240
    fps = 25
    videomode = cs.VideoMode.PixelFormat.kMJPEG, width, height, fps

    #front_cam_id = "/dev/v4l/by-id/usb-046d_C922_Pro_Stream_Webcam_5FC7BADF-video-index0"
    front_cam_id = "/dev/v4l/by-id/usb-046d_HD_Pro_Webcam_C920_332E8C9F-video-index0"

    front_camera = cs.UsbCamera("frontcam", front_cam_id)
    front_camera.setVideoMode(*videomode)

    front_cam_server = cs.MjpegServer("frontcamserver", 5803)
    front_cam_server.setSource(front_camera)

    cvsink = cs.CvSink("cvsink")
    cvsink.setSource(front_camera)

    cvSource = cs.CvSource("cvsource", *videomode)
    cvmjpegServer = cs.MjpegServer("cvhttpserver", 5804)
    cvmjpegServer.setSource(cvSource)

    # Images are big. Preallocate an array to fill the image with.
    frame = np.zeros(shape=(height, width, 3), dtype=np.uint8)

    # Set the exposure to something bogus, in an attempt to work around a kernel bug when restarting the vision code?
    front_camera.setExposureAuto()
    sleep(1)
    cvsink.grabFrame(frame)

    # Set the exposure of the front camera to something usable.
    front_camera.setExposureManual(0)
    sleep(1)

    while True:
        time, frame = cvsink.grabFrame(frame)
        if time == 0:
            # We got an error; report it through the output stream.
            cvSource.notifyError(cvsink.getError())
        else:
            t = now()
            x, img, num_targets, target_sep = find_target(frame)
            if num_targets > 0:
                nt.putNumber("x", x)
                nt.putNumber("time", t)
                nt.putNumber("num_targets", num_targets)
                nt.putNumber("target_sep", target_sep)
                nt.putNumber("dt", now() - t)
                NetworkTables.flush()
            cvSource.putFrame(img)
Пример #5
0
    def __init__(self):
        try:
            logging.basicConfig(level=logging.DEBUG)
            self.streamFailure = None
            self._source = cs.CvSource(("video%d" % VideoStream.Instances),
                                       cs.VideoMode.PixelFormat.kMJPEG)

        except Exception:
            self.streamFailure = True

        else:
            self.streamFailure = False
            VideoStream._iterInstances()
    def create_output_stream(self):
        '''Create the main image MJPEG server'''

        # Output server
        # Need to do this the hard way to set the TCP port
        self.output_stream = cscore.CvSource('camera', cscore.VideoMode.PixelFormat.kMJPEG,
                                             self.image_width, self.image_height,
                                             min(self.camera_fps, self.output_fps_limit))
        self.camera_server.addCamera(self.output_stream)
        server = self.camera_server.addServer(name='camera',
                                              port=self.output_port)
        server.setSource(self.output_stream)

        return
Пример #7
0
    def __init__(self):
        self.nt = NetworkTable.getTable('/camera')

        #Cameras
        self.piston_cam = cs.UsbCamera('Piston Cam', 0)
        self.piston_cam.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, 160, 120,
                                     35)  #160 vs. 120

        self.piston_cam.setExposureAuto()
        self.piston_cam.getProperty('backlight_compensation').set(5)

        #self.piston_cam.setExposureManual(35)
        #self.piston_cam.setBrightness(65)
        #test = self.piston_cam.getProperty("exposure_absolute")
        #print(self.piston_cam.getProperty("exposure_absolute"))

        self.piston_server = cs.MjpegServer('httpserver', 1181)
        self.piston_server.setSource(self.piston_cam)

        if self.secondary_cam:
            self.light_ring_cam = cs.UsbCamera('Light Ring Cam', 0)
            self.light_ring_cam.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG,
                                             320, 240, 20)

            # This only seems to affect automatic exposure mode
            # -> higher value means it lets more light in when facing a big light
            self.light_ring_cam.getProperty('backlight_compensation').set(5)

            #Image Processing
            self.cvsink = cs.CvSink('cvsink')
            self.cvsink.setSource(self.light_ring_cam)

            self.cvsource = cs.CvSource('cvsource',
                                        cs.VideoMode.PixelFormat.kMJPEG, 320,
                                        240, 20)

            #Streaming Servers

            #self.ring_stream = cs.MjpegServer("ring server", 1182)
            #self.ring_stream.setSource(self.light_ring_cam)

            if self.STREAM_CV:
                self.cv_stream = cs.MjpegServer('cv stream', 1183)
                self.cv_stream.setSource(self.cvsource)

            #Blank mat
            self.img = np.zeros(shape=(320, 240, 3), dtype=np.uint8)

            self.processor = ImageProcessor()
Пример #8
0
 def putVideo(self, name, width, height):
     """Create a MJPEG stream with OpenCV input. This can be called to pass custom
     annotated images to the dashboard.
     
     :param name: Name to give the stream
     :param width: Width of the image being sent
     :param height: Height of the image being sent
     
     :returns: CvSource object that you can publish images to
     :rtype: :class:`cscore.CvSource`
     """
     source = cscore.CvSource(name, cscore.VideoMode.PixelFormat.kMJPEG,
                              width, height, 30)
     self.startAutomaticCapture(camera=source)
     return source
Пример #9
0
    def createOutputStream(self):
        #Create the MJPEG server

        #give the stream a blank stream to give it the name we want
        blankFrame = cscore.CvSource(const.STREAM_NAME,
                                     cscore.VideoMode.PixelFormat.kMJPEG,
                                     const.STREAM_RESOLUTION[0],
                                     const.STREAM_RESOLUTION[1],
                                     const.STREAM_FPS)

        self.cameraServer.addCamera(blankFrame)
        server = self.cameraServer.addServer(name="serve_" +
                                             blankFrame.getName())
        self.cameraServer._fixedSources[server.getHandle(
        )] = blankFrame  #! The only thing that actualy makes this work and it is stupid and wrong. DO NOT DO UNLESS NECESSARY! (Remove if addSwitchedCamera() works) Breaks PEP 8 guidelines
        server.setSource(blankFrame)

        return server
def mainloop():

    #Get currnt time as a string
    currentTime = time.localtime(time.time())
    timeString = str(currentTime.tm_year) + str(currentTime.tm_mon) + str(currentTime.tm_mday) + str(currentTime.tm_hour) + str(currentTime.tm_min)
    
    #Open a log file
    logFilename = '/data/Logs/Run_Log_' + timeString + '.txt'
    log_file = open(logFilename, 'w')
    log_file.write('Run started on %s.\n' % datetime.datetime.now())
    log_file.write('')

    #Load VMX module
    vmxpi = imp.load_source('vmxpi_hal_python', '/usr/local/lib/vmxpi/vmxpi_hal_python.py')
    vmx = vmxpi.VMXPi(False,50)
    if vmx.IsOpen() is False:
        log_file.write('Error:  Unable to open VMX Client.\n')
        log_file.write('\n')
        log_file.write('        - Is pigpio (or the system resources it requires) in use by another process?\n')
        log_file.write('        - Does this application have root privileges?')
        log_file.close()
        sys.exit(0)
        
    #Open connection to USB camera (video device 0)
    camera = cs.UsbCamera("usbcam", 0)
    camera.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, imgwidth, imgheight, frames_per_sec)

    #Start raw Video Streaming Server
    mjpegServer = cs.MjpegServer("httpserver", 8081)
    mjpegServer.setSource(camera)

    #Define video sink
    cvsink = cs.CvSink("cvsink")
    cvsink.setSource(camera)

    #Define video source
    cvsource = cs.CvSource("cvsource", cs.VideoMode.PixelFormat.kMJPEG, imgwidth, imgheight, frames_per_sec)

    #Start processed Video Streaming Server
    cvMjpegServer = cs.MjpegServer("cvhttpserver", 8082)
    cvMjpegServer.setSource(cvsource)

    #Create blank image
    img = np.zeros(shape=(imgheight, imgwidth, 3), dtype=np.uint8)

    #Set video codec and create VideoWriter
    fourcc = cv.VideoWriter_fourcc(*'XVID')
    videoFilename = '/data/Match Videos/robotcam-' + timeString + '.avi'
    imgout = cv.VideoWriter(videoFilename,fourcc,20.0,(320,240))

    #Connect NetworkTables
    NetworkTables.initialize(server="10.41.21.2")
    navxTable = NetworkTables.getTable("navx")
    visionTable = NetworkTables.getTable("vision")
    smartDash = NetworkTables.getTable("SmartDashboard")

    
    #Reset yaw gyro
    if not vmx.getAHRS().IsCalibrating:
        vmx.getAHRS().ZeroYaw()

    #Reset displacement
    vmx.getAHRS().ResetDisplacement()

    #Start main processing loop
    while (True):

        #Grab a frame from video feed
        video_timestamp, img = cvsink.grabFrame(img)

        #Check for frame error
        if video_timestamp == 0:
            log_file.write('Video error: \n')
            log_file.write(cvsink.getError())
            log_file.write('\n')
            sleep (float(frames_per_sec * 2) / 1000.0)
            continue

        #Find contours in image
        img_contours, cubeDistance, cubeAngle, cubeHeight = detect_contours(img)

        #Put contour info on image
        if cubeDistance != -1:
            distanceString = 'Distance: %d' % cubeDistance
            cv.putText(img_contours, distanceString, (10,15), cv.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1,cv.LINE_AA)
            angleString = 'Offset angle: %d' % cubeAngle
            cv.putText(img_contours, angleString, (10,30), cv.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1,cv.LINE_AA)

        #Put IMU info on image
        angleString = 'Angle: %.2f' % vmx.getAHRS().GetAngle()
        cv.putText (img, angleString, (30,70), cv.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1,cv.LINE_AA)
        yVelocityString = 'Y Vel:  %.2f' % vmx.getAHRS().GetVelocityY()
        cv.putText (img, yVelocityString, (30,90), cv.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1,cv.LINE_AA)
        xVelocityString = 'X Vel: %.2f' % vmx.getAHRS().GetVelocityX()
        cv.putText (img, xVelocityString, (30,110), cv.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1,cv.LINE_AA)
        yDispString = 'Y Disp: %.2f' % vmx.getAHRS().GetDisplacementY()
        cv.putText (img, yDispString, (30,130), cv.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1,cv.LINE_AA)
        xDispString = 'X Disp: %.2f' % vmx.getAHRS().GetDisplacementX()
        cv.putText (img, xDispString, (30,150), cv.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1,cv.LINE_AA)

	#Update network tables
        navxTable.putNumber("SensorTimestamp", vmx.getAHRS().GetLastSensorTimestamp())
        navxTable.putNumber("DriveAngle", vmx.getAHRS().GetAngle())
        navxTable.putNumber("YVelocity", vmx.getAHRS().GetVelocityY())
        navxTable.putNumber("XVelocity", vmx.getAHRS().GetVelocityX())
        navxTable.putNumber("YDisplacement", vmx.getAHRS().GetDisplacementY())
        navxTable.putNumber("XDisplacement", vmx.getAHRS().GetDisplacementX())
        visionTable.putNumber("CubeAngle", cubeAngle)
        visionTable.putNumber("CubeDistance", cubeDistance)
        smartDash.putNumber("DriveAngle", vmx.getAHRS().GetAngle())
        smartDash.putNumber("CubeAngle", cubeAngle)
        smartDash.putNumber("CubeDistance", cubeDistance)
        
        #Show image
        cv.imshow('My Webcam', img_contours)

        #Put frame in video stream
        cvsource.putFrame(img_contours)

        #Write image to file
        if writeVideo:
            imgout.write(img_contours)

        #Check for stop code from robot
        if cv.waitKey(1) == 27:
            break
        #robotStop = visionTable.getNumber("RobotStop", 0)
        #if robotStop == 1:
        #    break

    #Close all open windows
    cv.destroyAllWindows()

    #Close video file
    imgout.release()

    #Close the log file
    log_file.write('Run stopped on %s.' % datetime.datetime.now())
Пример #11
0
ret, camframe1 = visionCam.read()

VisonCamera = 0

vision = Processing.VisionThread(debug=False)
vision.setCameraCharacteristics(Constants.camera2Matrix,Constants.camera2DistortionCoefficients)
if(ret):
	vision.run(camframe1)

logging.basicConfig(level=logging.DEBUG)
NetworkTables.initialize(server='roborio-3373-frc.local')
sd = NetworkTables.getTable("VisionData")
#cameraPre = NetworkTables.getTable("PreLoad")

cvSource = cs.CvSource("Vision Camera", cs.VideoMode.PixelFormat.kMJPEG, 480, 320, 10)
cvMjpegServer = cs.MjpegServer("httpCamera1", port=5801)
cvMjpegServer.setSource(cvSource)

csTable = NetworkTables.getDefault()
csTable.getEntry("/CameraPublisher/PiCam/streams").setStringArray(["mjpeg:http://10.33.73.20:5801/?action=stream"])
#cvSource2 = cs.CvSource("Camera2", cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 15)
#cvMjpegServer2 = cs.MjpegServer("httpCamera2", port=5802)
#cvMjpegServer2.setSource(cvSource2)

cameras=[{"camera":visionCam,"stream":cvSource,"id":VisonCamera,"timeout":0}]

count = 0
#NetworkTables.initialize()

def switchCamera(camobject,deviceNum):
Пример #12
0
def __get_ports__():
	ports = []
	for i in range(0, 7):
		threading.Thread(target=findPortRange, args=(ports,15*i,15*(i+1)).start()
	return ports
def __get_cap__(id,index):
  _camera_ = camera_server.startAutomaticCapture(dev=id, return_server=False)
  try:
    with open(__config_path__ + '/cameras/' + str(index) + '.json') as json_file:
      _camera_.setConfigJson("".join(i for i in json_file.readlines()))
  except:
    print("no json file for camera number " + str(id))
  _camera_.setResolution(width, height)
  _camera_.setFPS(fps)
  _camera_.setPixelFormat(cscore.VideoMode.PixelFormat.kYUYV)
  return _camera_
black_img = numpy.zeros(shape=(320, 240, 3), dtype=numpy.uint8)  
def get_video(index, image):
  obj = camera_server.getVideo(camera=cameras[0]).grabFrame(image)
  if obj[0] == 0:
    return black_img
  return obj[1]
 
def process_Init():
    global prev,nex,conf,pt,done

    s_time = time.time()
    counter = 1
    source = cscore.CvSource("conf_stream",cscore.VideoMode(cscore.VideoMode.PixelFormat(cscore.VideoMode.PixelFormat.kMJPEG),width,height,fps))
    server = cscore.MjpegServer(name="conf_stream",port=1191)
    server.setSource(source)
    with open("/root/.hawk/pipelines/grip_i.py","r") as grip_file:
          exec("".join(i for i in grip_file.readlines()))
          grip_pipe = eval('GripPipeline()')
    while not done:
        try:
          #update vars
          try:
            data = s.recvfrom(1024)[0] 
            encoding = 'utf-8'
            data = data.decode(encoding)
            changed_vals(data)
          except Exception as e:
            print("no data recieved",end='\r')
          finally:
            if not conf:
              conf = False
            #adjust prev/next
              if prev:
                prev = False
                counter -=1
                if counter <= 0:
                  counter = 1
              if nex:
                nex = False
                counter += 1
                if counter > 4:
                  counter = 4
              #TODO: add pt add 
              if prev or nex:
                  with open("/root/.hawk/pipelines/grip_" + (counter * 'i') + '.py') as grip_file:
                    exec("".join(i for i in grip_file.readlines()))
                    grip_pipe = eval('GripPipeline()')
                  os.system("reboot")
            ##get image from original 
            pic = cap1.read()[1]
            cv2.cvtColor(pic,84)
            ##process and get rgb threshold
            grip_pipe.process(pic)
            pic = grip_pipe.rgb_threshold_output
            ##publish to a new stream
            source.putFrame(pic)
            if prev or nex or conf or pt or done:
              print(str(prev) + ' ' + str(nex) + ' ' + str(conf) + ' ' + str(pt) + ' ' + str(done))
        except Exception as e:
          print(e)
def changed_vals(val):
  print('\n' + val)
  if not val: return
  print(val)
  global prev,nex,conf,pt,done
  if val == "PREV":
    prev = True
    return
  if val == "NEX":
    nex = True
    return
  if val == "SET_CONF":
    conf = True
    return
  if val == "ADD_PT":
    pt = True
    return
  if val == "DONE":
    Done = True
    return
def checkProcessInit():
    try:
      s.settimeout(0.0001)
      data = s.recvfrom(1024)[0] 
      encoding = 'utf-8'
      data = data.decode(encoding)
      if data == "START":
        process_Init()
    except: 
      pass

    
def error(e, pipeline):
  print(e)
  pipeline.putBoolean("valid", False)
#endregion

#region init
__pipe_name__ = "vision"
team_number = 0
width = 320
height = 240
fps = 75
cameras = []
prev,nex,conf,pt,done = False,False,False,False,False
os.system("rm -f ~/output.txt") # delete!
#paths
__config_path__ = os.path.expanduser('~') + '/.hawk'
# PSEYE camera settings
os.system("v4l2-ctl -c exposure=6")
os.system("v4l2-ctl --set-fmt-video=width=160,height=120,pixelformat=BGR")
#networking
__instance__ = NetworkTablesInstance.getDefault()
__instance__.startClientTeam(team_number)
__pipeline__ = NetworkTables.getTable(__pipe_name__)
NetworkTables.initialize()
camera_server = CameraServer.getInstance()
''' uncomment if you use the HawkEYE client
HOST = socket.gethostname()
TCP_IP = socket.gethostbyname(HOST)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((TCP_IP, 5000))
(Thread(checkProcessInit())).start()
'''
#endregion
    
def main():
    try: #load JSON settings file
        with open(__config_path__ + '/config.json', 'r') as file:
            config = json.loads(file.read())
            team_number = config['team_number']
            width = config['width']
            height = config['height']
            fps = config['fps']
            __pipe_name__ = config['pipe_name']
    except Exception as e:
        print("Error loading config")
        print(e)
    
    for index, id in enumerate(__get_ports__()): #load Camera, can take a while!
        cameras.append(__get_cap__(id, index,camera_server))
    cameras[0].setPixelFormat(cscore.VideoMode.PixelFormat.kYUYV)

    __index__ = int(__pipeline__.getNumber('cap_number', 1))
    #black picture array
    blackIMG = numpy.zeros(shape=(width, height, 3), dtype=numpy.uint8)
    __index__ = int(__pipeline__.getNumber('cap_number', 1)) #when connected to a robot, change 1 to -1
    #load GRIP
    gripScript = mlgrip.GripPipeline()
    while True:
        t = 0
        try:
            __image__ = blackIMG
            t = time.process_time()    
            if __index__ != -1:   
                    __image__ = get_video(__index__, __image__)
                    gripScript.process(__image__)
            processor(__image__, gripScript.filter_contours_output or [],__pipeline__)
            
        except Exception as e:
                    exc_type,exc_obj,exc_tb = sys.exc_info()
                    fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
                    print(exc_type,fname,exc_tb.tb_lineno)
                    error(e, __pipeline__)
        finally:
            print((time.process_time() - t))
                    


    
if __name__ == "__main__":
  main()
Пример #13
0
frames_per_sec = 15

# Open connection to USB Camera (video device 0 [/dev/video0]) */
camera = cs.UsbCamera("usbcam", 0)
camera.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, width, height,
                    frames_per_sec)

# Start raw Video Streaming Server
mjpegServer = cs.MjpegServer("httpserver", 8081)
mjpegServer.setSource(camera)

cvsink = cs.CvSink("cvsink")
cvsink.setSource(camera)

# Start processed Video server
cvsource = cs.CvSource("cvsource", cs.VideoMode.PixelFormat.kMJPEG, width,
                       height, frames_per_sec)

cvMjpegServer = cs.MjpegServer("cvhttpserver", 8082)
cvMjpegServer.setSource(cvsource)

# Create Video Writer, if enabled
if outputVideoFilePath is not None:
    frameSize = (width, height)
    videoWriter = cv.VideoWriter(outputVideoFilePath,
                                 cv.VideoWriter.fourcc('M', 'J', 'P', 'G'),
                                 15.0, frameSize, True)

img = np.zeros(shape=(height, width, 3), dtype=np.uint8)

ntVideoOsTimestamp = ntproperty('/vmx/videoOSTimestamp', 0)
ntNavxSensorTimestamp = ntproperty('/vmx/navxSensorTimestamp', 0)
Пример #14
0
 def __init__(self, resolution = (1280, 720), framerate = 30, port = 5800):
     self.cvSource = cs.CvSource("cvsource", cs.VideoMode.PixelFormat.kMJPEG,
                                 resolution[0], resolution[1], framerate)
     mjpegServer = cs.MjpegServer("cvhttpserver", port)
     mjpegServer.setSource(self.cvSource)
Пример #15
0
from networktables import NetworkTables
import cscore as cs

from cvsink_thread import CvSinkThread, crop
from detect_target import detect_target

front_cam = cs.UsbCamera("front_cam", 0)
front_cam.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, 480, 270, 30)
front_cam.setBrightness(40)
front_cam.setExposureManual(2)

front_cam_sink = cs.CvSink("front_cam_sink")
front_cam_sink.setSource(front_cam)

mjpeg_source = cs.CvSource("mjpeg_source", cs.VideoMode.PixelFormat.kMJPEG,
                           320, 240, 8)
mjpeg_server = cs.MjpegServer("mjpeg_server", 8083)
mjpeg_server.setSource(mjpeg_source)
mjpeg_server.setDefaultCompression(50)
mjpeg_server.setCompression(50)

thread = CvSinkThread(front_cam_sink, (270, 480, 3))


def main_loop():
    # grab frame
    frame = thread.get_frame()

    detect_target(frame)

    cv2.line(frame, (frame.shape[1] // 2, 0),
Пример #16
0
import cscore as cs
import numpy as np

output_source = cs.CvSource('camera-feed', cs.VideoMode.PixelFormat.kMJPEG, 640, 480, 30)
mjpeg_server = cs.MjpegServer('httpserver', 8081)
mjpeg_server.setSource(output_source)
print('mjpeg server listening on http://0.0.0.0:8081')

def send(feed_img):
    global output_source
    output_source.putFrame(feed_img)
Пример #17
0
ballServer = cs.MjpegServer("ballSource", 1181)
ballServer.setSource(ballCamera)

groundServer = cs.MjpegServer("groundSource", 1183)
groundServer.setSource(groundCamera)

print("ball server listening at http://10.72.80.12:1181")
print("ground server listening at http://10.72.80.12:1183")

ballSink = cs.CvSink("ballSink")
ballSink.setSource(ballCamera)

groundSink = cs.CvSink("groundSink")
groundSink.setSource(groundCamera)

cvBallSource = cs.CvSource("cvballsource", cs.VideoMode.PixelFormat.kMJPEG,
                           width, height, 30)
cvBallServer = cs.MjpegServer("vision", 1182)
cvBallServer.setSource(cvBallSource)
print("OpenCV output ball server listening at http://0.0.0.0:1182")

# cvGroundSource = cs.CvSource("cvgroundsource", cs.VideoMode.PixelFormat.kMJPEG, width, height, 30)
# cvGroundServer = cs.MjpegServer("vision", 8182)
# cvGroundServer.setSource(cvGroundSource)
# print("OpenCV output ground server listening at http://0.0.0.0:8182")

# keep looping
while True:
    # Ball Part

    # DO NOT TRY TO USE def
    # Will cause serious decreases in FPS
Пример #18
0
            #cv2.waitKey(10)
        
   
            

    
threads = Threads()
thread1 = Thread(target=threads.thread1)

thread1.start()
logging.basicConfig(level=logging.DEBUG)
NetworkTables.initialize(server='roborio-3373-frc.local')
sd = NetworkTables.getTable("VisionData")
#cameraPre = NetworkTables.getTable("PreLoad")

cvSource = cs.CvSource("Camera1", cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 15)
cvMjpegServer = cs.MjpegServer("httpCamera1", port=5801)
cvMjpegServer.setSource(cvSource)

cs = NetworkTables.getDefault()
cs.getEntry("/CameraPublisher/PiCam/streams").setStringArray(["mjpeg:http://10.33.73.18:5801/?action=stream"])
#cvSource2 = cs.CvSource("Camera2", cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 15)
#cvMjpegServer2 = cs.MjpegServer("httpCamera2", port=5802)
#cvMjpegServer2.setSource(cvSource2)

test = np.zeros(shape=(240, 640, 3), dtype=np.uint8)
count = 0
#NetworkTables.initialize()
CAMFOCALLENGTH = CAM1FOCALLENGTH
print("running")
if __name__ == '__main__':
Пример #19
0
#!/usr/bin/env python3
#
# WARNING: You should only use this approach for testing cscore on platforms that
#          it doesn't support using UsbCamera (Windows or OSX).
#

import cscore as cs

if hasattr(cs, "UsbCamera"):
    camera = cs.UsbCamera("usbcam", 0)
    camera.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 30)
else:
    import cv2
    import threading

    camera = cs.CvSource("cvsource", cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 30)

    # tell OpenCV to capture video for us
    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)

    # Do it in another thread
    def _thread():
        img = None
        while True:
            retval, img = cap.read(img)
            if retval:
                camera.putFrame(img)

    th = threading.Thread(target=_thread, daemon=True)
Пример #20
0
def main():
    NetworkTables.initialize(server=k_NETWORKTABLES_SERVER_IP)

    vs.camera.brightness = 50
    vs.camera.contrast = 0
    vs.camera.saturation = 0

    cs = CameraServer.getInstance()
    ps = CameraServer.getInstance()
    cs.enableLogging()

    # Capture from the first USB Camera on the system
    #camera0 = cs.startAutomaticCapture()
    #camera1 = cs.startAutomaticCapture()
    #camera = cs.startAutomaticCapture(name="picam",path="/dev/v4l/by-path/platform-bcm2835-codec-video-index0")
    #camera = cs.startAutomaticCapture(name="usbcam",path="/dev/v4l/by-path/platform-fd500000.pcie-pci-0000:01:00.0-usb-0:1.2:1.0-video-index0")
    #camera0.setResolution(k_CONVEYOR_CAM_REZ_WIDTH, k_CONVEYOR_CAM_REZ_HEIGHT)
    camerausb = cscore.UsbCamera(
        name="usbcam",
        path=
        "/dev/v4l/by-path/platform-fd500000.pcie-pci-0000:01:00.0-usb-0:1.2:1.0-video-index0"
    )
    camerapi = cscore.CvSource("cvsource", cscore.VideoMode.PixelFormat.kMJPEG,
                               k_BALL_CAM_REZ_WIDTH, k_BALL_CAM_REZ_HEIGHT,
                               k_BALL_CAM_FPS)

    # setup MjpegServers
    usbMjpegServer = cscore.MjpegServer("ConveyorCam", 8081)
    piMjpegServer = cscore.MjpegServer("BallCam", 8082)

    # connect MjpegServers to their cameras
    usbMjpegServer.setSource(camerausb)
    piMjpegServer.setSource(camerapi)

    # Get a CvSink. This will capture images from the camera
    #cvSink = cs.getVideo()

    # (optional) Setup a CvSource. This will send images back to the Dashboard
    #outputStream = cs.putVideo("Name", k_BALL_CAM_REZ_WIDTH, k_BALL_CAM_REZ_HEIGHT)

    # Allocating new images is very expensive, always try to preallocate
    img = np.zeros(shape=(k_BALL_CAM_REZ_WIDTH, k_BALL_CAM_REZ_HEIGHT, 3),
                   dtype=np.uint8)

    while True:
        frame = vs.read()
        camerapi.putFrame(frame)
        # Tell the CvSink to grab a frame from the camera and put it
        # in the source image.  If there is an error notify the output.
        #time, img = cvSink.grabFrame(img)
        #if time == 0:
        #    # Send the output the error.
        #    outputStream.notifyError(cvSink.getError());
        #    # skip the rest of the current iteration
        #    continue

        #
        # Insert image processing logic here
        #

        # (optional) send some image back to the dashboard
        #outputStream.putFrame(frame)
        #outputStream.putFrame(frame)
        print("putting Frame")