예제 #1
0
def main():
    #cap = cv2.VideoCapture("./../720.mp4")
    vs1 = WebcamVideoStream(src=gstreamer_pipeline(sensor_id=0), device=cv2.CAP_GSTREAMER).start()


    client=imagiz.TCP_Client(server_ip="10.42.0.1", server_port=5555, client_name="cc1")
    encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]

    fps = FPS().start()
    while True:
        try:
            #success, frame1 = cap.read()
            frame1 = vs1.read()
            frame1 = cv2.rotate(frame1, cv2.ROTATE_90_COUNTERCLOCKWISE)
            r, image = cv2.imencode('.jpg', frame1, encode_param)
            response=client.send(image)
            print(response)
            #cv2.imshow("mean.jpg", frame1)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            fps.update()
        except Exception as e:
            print(e)
            cv2.destroyAllWindows()
            #cap.release()
            vs1.stop()
            break
    
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    cv2.destroyAllWindows()
    #cap.release()
    vs1.stop()
def main():

    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        try:
            # Currently, memory growth needs to be the same across GPUs
            for gpu in gpus:
                tf.config.experimental.set_memory_growth(gpu, True)
            logical_gpus = tf.config.experimental.list_logical_devices('GPU')
            print(len(gpus), "Physical GPUs,", len(logical_gpus),
                  "Logical GPUs")
        except RuntimeError as e:
            # Memory growth must be set before GPUs have been initialized
            print(e)

    pts = np.array([[85, 520], [1330, 520], [80, 1680], [1245, 1775]])
    vs1 = WebcamVideoStream(src=gstreamer_pipeline(sensor_id=1),
                            device=cv2.CAP_GSTREAMER).start()

    client = imagiz.TCP_Client(server_ip="10.42.0.1",
                               server_port=5551,
                               client_name="cc1")
    encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]

    #img = cv2.imread("frame16.jpg")
    #image_np = img[520:520+1200, 90:90+1200]

    rect = order_points(pts)
    dst, maxWidth, maxHeight = four_point_transform(rect)
    M = cv2.getPerspectiveTransform(rect, dst)
    fps = FPS().start()

    while True:
        try:
            frame1 = vs1.read()
            frame1 = cv2.rotate(frame1, cv2.ROTATE_90_COUNTERCLOCKWISE)
            image_np = cv2.warpPerspective(frame1, M, (maxWidth, maxHeight))
            #mean_np = getMeanNP(image_np)
            image_tf = tf.convert_to_tensor(image_np)
            resize_tf = tf.image.resize(image_tf, (256, 256))
            _, image = cv2.imencode('.jpg', resize_tf.numpy(), encode_param)
            response = client.send(image)
            print(response)
            fps.update()
        except Exception as e:
            print(e)
            vs1.stop()
            break
        except KeyboardInterrupt:
            print("Keyboard stopped")
            vs1.stop()
            break

    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    vs1.stop()
    print("exit main")
예제 #3
0
 def __init__(self):
     threading.Thread.__init__(self,
                               serverIp="10.42.0.1",
                               serverPort=5555,
                               clientName="client")
     client = imagiz.TCP_Client(server_ip=serverIp,
                                server_port=serverPort,
                                client_name=clientName)
     encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
예제 #4
0
def main():

    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        try:
            # Currently, memory growth needs to be the same across GPUs
            for gpu in gpus:
                tf.config.experimental.set_memory_growth(gpu, True)
            logical_gpus = tf.config.experimental.list_logical_devices('GPU')
            print(len(gpus), "Physical GPUs,", len(logical_gpus),
                  "Logical GPUs")
        except RuntimeError as e:
            # Memory growth must be set before GPUs have been initialized
            print(e)

    vs1 = WebcamVideoStream(src=gstreamer_pipeline(sensor_id=3),
                            device=cv2.CAP_GSTREAMER).start()

    client = imagiz.TCP_Client(server_ip="10.42.0.1",
                               server_port=5553,
                               client_name="cc1")
    encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]

    fps = FPS().start()
    while True:
        try:
            #success, frame1 = cap.read()
            frame1 = vs1.read()
            #frame1 = cv2.rotate(frame1, cv2.ROTATE_90_COUNTERCLOCKWISE)
            image_tf = tf.convert_to_tensor(frame1)
            crop_tf = tf.image.crop_to_bounding_box(image_tf, 100, 100,
                                                    1000 - 100, 1000 - 100)
            resize_tf = tf.image.resize(crop_tf, (256, 256))
            _, image = cv2.imencode('.jpg', resize_tf.numpy(), encode_param)
            response = client.send(image)
            print(response)
            fps.update()
        except Exception as e:
            print(e)
            vs1.stop()
            break
        except KeyboardInterrupt:
            print("Keyboard stopped")
            vs1.stop()
            break

    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    vs1.stop()
    print("exit main")
def main():
    pts = np.array([[85, 520], [1330, 520], [80, 1680], [1245, 1775]])
    vs1 = WebcamVideoStream(src=gstreamer_pipeline(sensor_id=1),
                            device=cv2.CAP_GSTREAMER).start()

    client = imagiz.TCP_Client(server_ip="10.42.0.1",
                               server_port=5551,
                               client_name="cc1")
    encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]

    #img = cv2.imread("frame16.jpg")
    #image_np = img[520:520+1200, 90:90+1200]

    rect = order_points(pts)
    dst, maxWidth, maxHeight = four_point_transform(rect)
    M = cv2.getPerspectiveTransform(rect, dst)
    fps = FPS().start()

    while True:
        try:
            frame1 = vs1.read()
            frame1 = cv2.rotate(frame1, cv2.ROTATE_90_COUNTERCLOCKWISE)
            image_np = cv2.warpPerspective(frame1, M, (maxWidth, maxHeight))
            #mean_np = getMeanNP(image_np)
            resize = cv2.resize(image_np, (512, 512),
                                interpolation=cv2.INTER_AREA)
            r, image = cv2.imencode('.jpg', resize, encode_param)
            response = client.send(image)
            print(response)
            fps.update()
        except Exception as e:
            print(e)
            vs1.stop()
            break
        except KeyboardInterrupt:
            print("Keyboard stopped")
            vs1.stop()
            break

    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    vs1.stop()
    print("exit main")
import imagiz
import time
import cv2

vid = cv2.VideoCapture(0)
client = imagiz.TCP_Client(server_port=8095, client_name="cc1")
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]

if not vid.isOpened():
    print("Cannot open camera")
    exit()

while True:
    time.sleep(2)
    r, frame = vid.read()
    if r:
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        r, image = cv2.imencode('.jpg', gray, encode_param)
        response = client.send(image)
예제 #7
0

# -*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
# Pix2Pix variables declaration
# <==================================================================>
# generator = tf.saved_model.load("./model/pix2pixTF-TRT512")
# <==================================================================>
# -*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
exitFlag = 0
queueLock = threading.Lock()
inputPix2PixQueue1 = Queue(3)
outputPix2PixQueue1 = Queue(3)
inputPix2PixQueue2 = Queue(3)
outputPix2PixQueue2 = Queue(3)
client1 = imagiz.TCP_Client(server_ip='10.42.0.1',
                            server_port=5555,
                            client_name='cc1',
                            request_retries=100)
client2 = imagiz.TCP_Client(server_ip='10.42.0.1',
                            server_port=5555,
                            client_name='cc2',
                            request_retries=100)
SIZE = 512
NORM = 255.5
# -*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*


class myProcess(Process):
    def __init__(self, name, function, iq, oq=None):
        Process.__init__(self)
        self.name = name
        self.function = function
예제 #8
0
import imagiz
import cv2

vid = cv2.VideoCapture(0)
client = imagiz.TCP_Client(server_port=9990, client_name="cc1")
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]

while True:
    r, frame = vid.read()
    if r:
        r, image = cv2.imencode('.jpg', frame, encode_param)
        response = client.send(image)
        print(response)
예제 #9
0
    BLACK = '\033[30m'
    RED = '\033[31m'
    GREEN = '\033[32m'
    YELLOW = '\033[33m'
    BLUE = '\033[34m'
    MAGENTA = '\033[35m'
    CYAN = '\033[36m'
    WHITE = '\033[37m'
    UNDERLINE = '\033[4m'
    RESET = '\033[0m'


# -*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
exitFlag = 0
client1 = imagiz.TCP_Client(server_ip='localhost',
                            server_port=5550,
                            client_name='cc1')
client2 = imagiz.TCP_Client(server_ip='localhost',
                            server_port=5550,
                            client_name='cc2')
clientQueue1 = queue.Queue(3)
clientQueue2 = queue.Queue(5)
# -*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*


class myThread(threading.Thread):
    def __init__(self, name, function, iq, oq=None):
        threading.Thread.__init__(self)
        self.name = name
        self.function = function
        self.iq = iq
예제 #10
0
input1Queue = queue.Queue(3)
input2Queue = queue.Queue(3)
input3Queue = queue.Queue(3)
resize1Queue = queue.Queue(3)
resize2Queue = queue.Queue(3)
resize3Queue = queue.Queue(3)
jointsQueue = queue.Queue(3)
x0 = 0
y0 = 0
WIDTH = 1080
HEIGHT = 1080

# imagiz Config
# <==================================================================>
client1 = imagiz.TCP_Client(server_ip='10.42.0.1',
                            server_port=5550,
                            client_name='cc1')
client2 = imagiz.TCP_Client(server_ip='10.42.0.1',
                            server_port=5550,
                            client_name='cc2')
client3 = imagiz.TCP_Client(server_ip='10.42.0.1',
                            server_port=5550,
                            client_name='cc3')

# Socketio Config
# <==================================================================>
# sio = socketio.Client()
# sio.connect('http://10.42.0.1:3000', namespaces=['/'])

# @sio.event
# def connect():
예제 #11
-1
def main():
    parser = argparse.ArgumentParser(
        description='pix2pix checkpoint to SavedModel.')
    parser.add_argument('--size',
                        dest='size',
                        help='size of model',
                        type=int,
                        default=256)
    parser.add_argument('--cropSize',
                        dest='cropSize',
                        help='',
                        type=int,
                        default=1080)
    parser.add_argument('--sensor',
                        dest='sensor',
                        help='',
                        type=int,
                        default=0)
    parser.add_argument('--clientName',
                        dest='clientName',
                        help='',
                        type=str,
                        default='cc0')
    parser.add_argument('--serverIP',
                        dest='serverIP',
                        help='',
                        type=str,
                        default='10.42.0.1')
    parser.add_argument('--serverPORT',
                        dest='serverPORT',
                        help='',
                        type=int,
                        default=5550)
    parser.add_argument('--model',
                        dest='model',
                        help='',
                        type=str,
                        default='./model/pix2pixTF-TRT')
    parser.add_argument('--resize',
                        dest='resize',
                        help='',
                        type=str2bool,
                        const=True,
                        default=False)
    parser.add_argument('--crop',
                        dest='crop',
                        help='',
                        type=str2bool,
                        const=True,
                        default=False)
    parser.add_argument('--pix2pix',
                        dest='pix2pix',
                        help='',
                        type=str2bool,
                        const=True,
                        default=False)
    args = parser.parse_args()
    print(args)

    if args.pix2pix:
        generator = tf.saved_model.load("./model/pix2pixTF-TRT")
        norm = (args.size / 2) - 0.5

    vs = WebcamVideoStream(src=gstreamer_pipeline(sensor_id=0),
                           device=cv2.CAP_GSTREAMER).start()

    client = imagiz.TCP_Client(server_ip=args.serverIP,
                               server_port=args.serverPORT,
                               client_name=args.clientName)
    encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]

    fps = FPS().start()
    while True:
        try:
            image = vs.read()
            image_tf = tf.convert_to_tensor(image)
            if args.crop:
                image_tf = crop(image_tf, 0, 0, args.cropSize, args.cropSize)
                image = image_tf.numpy()
            if args.resize:
                image_tf = resize(image_tf, args.size)
                image = image_tf.numpy()
            if args.pix2pix:
                image = pix2pix(image_tf, args.size, norm, generator)
            # _, image = cv2.imencode('.jpg', image, encode_param)
            response = client.send(image)
            # print(response)
            fps.update()
        except Exception as e:
            print(e)
            vs.stop()
            break
        except KeyboardInterrupt:
            print("Keyboard stopped")
            vs.stop()
            break

    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    vs.stop()
    print("exit main")