示例#1
0
 def __init__(self, config):
     super().__init__(config)
     self.input_file = int(config["input"]["default_input"])
     frame = None
     while frame is None:
         self.cap = acapture.open(self.input_file)
         ret, frame = self.cap.read()
         if frame is None:
             self.input_file += 1
示例#2
0
    def start(self, device):
        self._cap = acapture.open(device)

        if self.config['read']:
            camera_params = self.read_params()
            context = {'camera_params': camera_params}
            self.transition_to(state.StateCalibrated(), context)
        else:
            self.transition_to(state.StateInitial())

        self._viewer.start()
示例#3
0
def main():
    print("Hello World!")

    cap = acapture.open(0) #/dev/video0
    print("successfully opened camera device!")

    while True:
        check,frame = cap.read() # non-blocking
        if check:
            print("sucessfully read a frame")
            frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
            cv2.imwrite('test.jpg', frame)
示例#4
0
def generate():
    cap = acapture.open(0)
    time.sleep(2.0)
    """Video streaming generator function."""
    while True:
        start = time.time()
        check, frame = cap.read()
        if check:
            end = time.time()
            print("capture time: {}".format(end - start))

            start = time.time()
            frame = imutils.resize(frame, width=720)  # To save bandwidth?
            #cv2.imshow("frame", frame)
            by = cv2.imencode('.jpg', frame)[1].tostring()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + by + b'\r\n')

        print("Process and display time: {}".format(end - start))
    def __init__(self):
        super().__init__('acapture_camera_node',
                         allow_undeclared_parameters=True,
                         automatically_declare_parameters_from_overrides=True)

        self.node_name = self.get_name()
        self.log = self.get_logger()

        self.cap = acapture.open(0)  #/dev/video0

        #self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
        #self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
        #self.cap.set(cv2.CAP_PROP_FPS, 5)

        self.log.info("successfully opened camera device!")

        # init ROS opencv bridge
        self.bridge = CvBridge()
        self.log.info("sucessfully created CvBridge.")

        self.pub_img = self.create_publisher(CompressedImage,
                                             "image/compressed", 1)
        self.image_msg = CompressedImage()
示例#6
0
import acapture
import sys
import numpy as np
import colorsys

sys.path.append('./python_glview') 

import pyglview
from OpenGL.GLUT import *
from logging import warning

bg = cv2.imread('bg.jpg', cv2.IMREAD_COLOR)
bg = cv2.resize(bg, dsize=(1280, 720), interpolation=cv2.INTER_CUBIC)

viewer = pyglview.Viewer()
cap = acapture.open(0) # Camera 0,  /dev/video0


got_sample = False
sample_range = None
sample_size = (10, 10)
threshold = 20

kernel = np.ones((10,10), np.uint8)

def restrict(color):
    # https://docs.opencv.org/trunk/df/d9d/tutorial_py_colorspaces.html
    # Max and min values of H, S, V in opencv
    min = (0, 0, 0)
    max = (179, 255, 255)
示例#7
0
import os
import cv2
import acapture
cap = acapture.open(os.path.join(os.path.expanduser('~'), "test.mp4"))
while True:
    check, frame = cap.read()
    if check:
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        cv2.imshow("test", frame)
        cv2.waitKey(1)
示例#8
0
#!/usr/bin/env python3
import os
import sys

import acapture
import pyglview

import evaluator

basepath = os.getcwd()
sys.path.append(basepath)

if len(sys.argv) > 1:
    f = sys.argv[1]
cap = acapture.open(f)
view = pyglview.Viewer(keyboard_listener=cap.keyboard_listener)
model = evaluator.Evaluator()


def loop():
    try:
        check, frame = cap.read()
        if check:
            frame = model.render(frame)
            view.set_image(np.array(frame))
    except Exception as e:
        print(e)
        exit(9)


view.set_loop(loop)
示例#9
0
import cv2
import os
import numpy as np
import socket
import sys
import pickle
import struct
import acapture

cap = acapture.open(0)  #cap=cv2.VideoCapture(0)
frames_per_seconds = 60.0
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect(("localhost", 8080))

while True:
    ret, frame = cap.read()
    data = pickle.dumps(frame)
    message_size = struct.pack("<L", len(data))
    clientsocket.sendall(message_size + data)

if __name__ == '__main__':
    main()
示例#10
0

def gamma(img, g):
    lookUpTable = np.empty((1, 256), np.uint8)
    for i in range(256):
        lookUpTable[0, i] = np.clip(pow(i / 255.0, g) * 255.0, 0, 255)
    img = cv2.LUT(img, lookUpTable)
    return img


if __name__ == '__main__':
    import acapture
    import pyglview
    import sys

    cap = acapture.open(sys.argv[1] if len(sys.argv) > 1 else os.path.
                        join(os.path.expanduser('~'), "test.mp4"))

    view = pyglview.Viewer(keyboard_listener=cap.keyboard_listener,
                           fullscreen=False)

    def loop():
        try:
            check, frame = cap.read()
            if check:
                view.set_image(frame)
                # view.set_image(gamma(frame, 0.6))
        except:
            traceback.print_exc()
            exit(9)
        pass
示例#11
0
import acapture
import cv2

cap = acapture.open(0)  # /dev/video0
while True:
    check, frame = cap.read()  # non-blocking
    if check:
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        cv2.imshow("test", frame)
        cv2.waitKey(1)