Beispiel #1
0
 def init(self):
     self.camera = cv2.VideoCapture(
         gstreamer_pipeline(1280, 720, 1280 // 2, 720 // 2, 20, 0),
         cv2.CAP_GSTREAMER)
     while True:
         hasFrame, frame = self.camera.read()
         if hasFrame:
             Space.write(self.nameImage, frame, 0.15)
Beispiel #2
0
    def __init__(self):
        self.cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=2),
                                    cv2.CAP_GSTREAMER)

        self.frame = None
        self.stopped = False
        self.success = False
        print("Initializing Camera Thread")
Beispiel #3
0
speed = 70


def updateSpeed(*args):
    global speed
    speed = args[0]
    robot.setSpeed(speed)


cv2.namedWindow("Circle")
cv2.createTrackbar("threshold", "Circle", threshold, 512, updateThreshold)
cv2.createTrackbar("speed", "Circle", speed, 255, updateSpeed)

camera = cv2.VideoCapture(
    gstreamer_pipeline(1280, 720, 1280 // 2, 720 // 2, 20, 0),
    cv2.CAP_GSTREAMER)

trials = 0
while True:
    hasFrame, frame = camera.read()
    if not hasFrame:
        break

    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    frame = cv2.blur(frame, (5, 5))

    circles = cv2.HoughCircles(frame,
                               cv2.HOUGH_GRADIENT,
                               2,
Beispiel #4
0
def multi_style(path,
                width=320,
                device=device,
                cycle_length=np.inf,
                half_precision=False,
                rotate=0,
                camera=0,
                cutoff=0):
    if path.is_file():
        model_iter = itertools.cycle([os.path.basename(path)])
        path = os.path.dirname(path)
    else:
        model_iter = itertools.cycle(os.listdir(path))
    model_file = next(model_iter)
    print(f'Using {model_file} ')
    model_path = os.path.join(path, model_file)
    model = TransformerNet()
    model.load_state_dict(read_state_dict(model_path))
    model.to(device)
    if half_precision:
        model.half()

    # attempts to load jetcam for Jetson Nano, if fails uses normal camera.
    if rotate != 0:
        width = int(width / .75)

    height = int(width * .75)
    if camera < 0:
        #from jetcam.csi_camera import CSICamera
        #vs = CSICamera(width=width, height=int(width/1.5), capture_width=1080, capture_height=720, capture_fps=15)
        #vs.read()
        print('Using CSI camera')
        vs = cv2.VideoCapture(
            gstreamer_pipeline(capture_width=width,
                               capture_height=height,
                               display_width=width,
                               display_height=height), cv2.CAP_GSTREAMER)
        time.sleep(2.0)
        img = vs.read()
        assert img[1] is not None

    else:
        print('Using USB camera')
        vs = VideoStream(src=camera, resolution=(width, height)).start()
        time.sleep(2.0)
    if rotate != 0:
        width = int(width * .75)

    timer = Timer()
    cycle_begin = time.time()
    while (True):
        frame = vs.read()
        if frame is None:
            frame = np.random.randint(0,
                                      255, (int(width / 1.5), width, 3),
                                      dtype=np.uint8)

        if type(frame) is type(()):
            frame = frame[1]

        frame = cv2.flip(frame, 1)
        frame = resize(frame, width=width)
        # Style the frame
        img = style_frame(frame, model, device, half_precision).numpy()
        img = np.clip(img, 0, 255)
        img = img.astype(np.uint8)

        img = img.transpose(1, 2, 0)
        img = img[:, :, ::-1]
        # rotate
        if rotate != 0:
            h, w, _ = img.shape
            margin = int(w - h * h / w) // 2
            img = img[:, margin:-margin, :]

        if cutoff > 0:
            margin = int(cutoff * img.shape[1]) // 2
            img = img[:, margin:-margin, :]
        elif cutoff < 0:
            margin = int(-cutoff * img.shape[0]) // 2
            img = img[margin:-margin, :, :]
        # print(img.shape)
        cv2.imshow("Output", img)
        timer()
        key = cv2.waitKey(1) & 0xFF
        if key == ord("n") or (time.time() - cycle_begin) > cycle_length:
            model_file = next(model_iter)
            print(f'Using {model_file} ')
            model_path = os.path.join(path, model_file)
            model.load_state_dict(read_state_dict(model_path))
            model.to(device)
            cycle_begin = time.time()
        elif key == ord("q"):
            break
Beispiel #5
0
def photo_booth(path,
                models,
                width=1080,
                device=torch.device('cpu'),
                prep_time=10,
                view_time=10,
                rotate=0,
                camera=0,
                output_path=None,
                cutoff=0):
    if rotate != 0:
        width = int(width / .75)
    # attempts to load jetcam for Jetson Nano, if fails uses normal camera.
    height = int(width * .75)
    if camera < 0:
        print('Using CSI camera')
        vs = cv2.VideoCapture(
            gstreamer_pipeline(capture_width=width,
                               capture_height=height,
                               display_width=width,
                               display_height=height), cv2.CAP_GSTREAMER)
        time.sleep(2.0)
        img = vs.read()
        assert img[1] is not None

    else:
        print('Using USB camera')
        vs = VideoStream(src=camera, resolution=(width, height)).start()
    print('Warming up')
    time.sleep(2.0)
    if rotate != 0:
        width = int(width * .75)

    print('Program started')
    model = TransformerNet()
    cv2.namedWindow("Output", cv2.WND_PROP_FULLSCREEN)
    cv2.setWindowProperty("Output", cv2.WND_PROP_FULLSCREEN,
                          cv2.WINDOW_FULLSCREEN)
    while True:
        preparation(vs, prep_time, rotate, cutoff)
        img = vs.read()
        if type(img) == type(()):
            img = img[1]
        img = cv2.flip(img, 1)
        img = resize(img, width)

        # rotate
        if rotate != 0:
            h, w, _ = img.shape
            margin = int(w - h * h / w) // 2
            img = img[:, margin:-margin, :]
        if cutoff > 0:
            margin = int(cutoff * img.shape[1]) // 2
            img = img[:, margin:-margin, :]
        elif cutoff < 0:
            margin = int(-cutoff * img.shape[0]) // 2
            img = img[margin:-margin, :, :]

        # Choosing and loading the model
        model_name = np.random.choice(models)
        print('Using {}'.format(model_name))
        model_path = os.path.join(path, model_name)
        state_dict = read_state_dict(model_path)
        model.load_state_dict(state_dict)
        model.to(device)
        if output_path is not None:
            if not os.path.exists(output_path):
                os.mkdir(output_path)
            filename = time.strftime('%Y_%m_%d_%H_%M_%S_') + os.path.splitext(
                model_name)[0] + '.jpg'
            filepath = os.path.join(output_path, filename)
            cv2.imwrite(filepath, img)
        # Inference
        cv2.imshow('Output', img)
        key = cv2.waitKey(1) & 0xFF
        busy = BusyShow(img, style_frame, **{
            'img': img,
            'style_model': model,
            'device': device
        })
        output = busy.execute()
        output = output.numpy()
        # output = style_frame(img,model,device).numpy()

        # Postprocessing
        output = post_process(output)
        cv2.imshow('Output', output)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break

        view_result(img, view_time)