class Camera:
    def __init__(self, height=480, width=640):
        # Intrinsics results obtained from calib module
        self._K = np.array([[676.74, 0, 317.4], [0, 863.29, 252.459],
                            [0, 0, 1]])
        # Extrinisics
        angles = np.array([90, 90, 0]) * np.pi / 180
        R = eulerAnglesToRotationMatrix(angles)  # Rotation matrix  WCS->CCS
        T = np.array([[0.0], [-0.156],
                      [0.0]])  # WCS expressed in camera coordinate system
        self._Rt = np.hstack((R, T))
        self._camera = CSICamera(width, height)
        print("Camera Initialized - Height = {}, Width = {}".format(
            height, width))

    def get_Rt(self):
        return self._Rt

    def get_K(self):
        return self._K

    def get_frame_cv2(self):
        """
        Returns an ndarray in BGR format (OpenCV format)
        """
        return self._camera.read()

    def get_frame_PIL(self):
        """
        Returns a Image object in RGB (PIL format)
        """
        image = self._camera.read()
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = Image.fromarray(image)
        return image
    def __init__(self, display_widget=None):
        self.display_widget = display_widget

        with open(self.HUMAN_POSE, 'r') as f:
            human_pose = json.load(f)

        topology = trt_pose.coco.coco_category_to_topology(human_pose)

        self.model_trt = TRTModule()
        self.model_trt.load_state_dict(torch.load(self.OPTIMIZED_MODEL))

        self.parse_objects = ParseObjects(topology)
        self.keypoint_coordinates = KeypointCoordinates(
            human_pose["keypoints"])

        self.camera = CSICamera(width=self.WIDTH,
                                height=self.HEIGHT,
                                capture_fps=30)
        self.camera.running = True

        if self.display_widget is None:
            self.display = plt.imshow(self.camera.value)
            plt.ion()
            plt.show()

        # ROS stuff
        s = rospy.Service('get_keypoint', GetKeypoint,
                          self.__handle_get_keypoint)
        self.image_pub = rospy.Publisher("image", Image)
        self.bridge = CvBridge()
Пример #3
0
 def __init__(self, camera_width, camera_height):
     self.camera = CSICamera(width=camera_width,
                             height=camera_height,
                             capture_width=camera_width,
                             capture_height=camera_height,
                             capture_fps=60)
     self.image = None
Пример #4
0
class Observer:
    def __init__(self, camera_width, camera_height):
        self.camera = CSICamera(width=camera_width,
                                height=camera_height,
                                capture_width=camera_width,
                                capture_height=camera_height,
                                capture_fps=60)
        self.image = None

    def start(self):
        self.camera.observe(self._callback, names='value')
        self.camera.running = True

    def stop(self):
        self.camera.running = False

    def _callback(self, change):
        img = change['new']
        # Change BGR TO RGB HWC
        self.image = img[:, :, ::-1]

    def observation(self):
        while self.image is None:
            pass
        return self.image
Пример #5
0
 def __init__(self, fps=10, img_size=256):
     self.fps = fps
     self.img_size = img_size
     self.cam = CSICamera(width=img_size,
                          height=img_size,
                          capture_width=1080,
                          capture_height=720,
                          capture_fps=fps)
 def __init__(self, height=480, width=640):
     # Intrinsics results obtained from calib module
     self._K = np.array([[676.74, 0, 317.4], [0, 863.29, 252.459],
                         [0, 0, 1]])
     # Extrinisics
     angles = np.array([90, 90, 0]) * np.pi / 180
     R = eulerAnglesToRotationMatrix(angles)  # Rotation matrix  WCS->CCS
     T = np.array([[0.0], [-0.156],
                   [0.0]])  # WCS expressed in camera coordinate system
     self._Rt = np.hstack((R, T))
     self._camera = CSICamera(width, height)
     print("Camera Initialized - Height = {}, Width = {}".format(
         height, width))
def cam_loop(images, end):
    cap = CSICamera(width=224,
                    height=224,
                    capture_width=1080,
                    capture_height=720,
                    capture_fps=30)
    while True:
        img = cap.read()
        print("reading image")
        if img is not None:
            images.put(img)
        if end.value == 1:
            cap.release()
            return
Пример #8
0
def take_some_pictures(path):

    camera = CSICamera(width=1920, height=1080, capture_device=0)

    # Warm up the camera (Takes 12 images to warm up)
    for i in range(30):
        camera.read()
        time.sleep(1 / 30)

    # Take Pictures (Max 100)
    cnt = 0
    while True:
        time.sleep(1 / 30)
        read_camera(path, camera, cnt)
        cnt += 1
        if cnt > 10:
            cnt = 0
            break
Пример #9
0
 def __init__(self, camera_width=112, camera_height=112, fps=30):
     self.car = NvidiaRacecar()
     print("====== LOADING CAMERA ======")
     self.camera = CSICamera(width=camera_width,
                             height=camera_height,
                             capture_fps=fps)
     self.camera.running = True
     print("====== CAMERA LOADED SUCCESSFULLY ======")
     self.car.throttle = 0
     self.car.steering = 0
Пример #10
0
class Camera:
    def __init__(self, fps=10, img_size=256):
        self.fps = fps
        self.img_size = img_size
        self.cam = CSICamera(width=img_size,
                             height=img_size,
                             capture_width=1080,
                             capture_height=720,
                             capture_fps=fps)

    def capture(self):
        self.frame = self.cam.read()
        return self.frame
Пример #11
0
class Racer(Cmd):

    # session variables
    sessionInst = None

    camera = CSICamera(width=224,
                       height=224,
                       capture_width=1080,
                       capture_height=720,
                       capture_fps=30)

    imageLst = []
    imageArray = None

    def do_bye(self, inp):
        '''exit the application.'''

        print("Bye")
        print('')
        return True

    def do_tp(self, t):
        '''take picture and insert into list '''

        image = self.camera.read()

        img_rotate_180 = cv2.rotate(image, cv2.ROTATE_180)

        self.imageLst.append(img_rotate_180)

    def do_conv(self, t):
        '''convert list of images into numpy array of images '''

        train = np.array(self.imageLst, dtype='float32')

        print(train.shape)

    def do_prt(self, t):
        '''print list '''

        print(len(self.imageLst))

    def do_save(self, t):
        '''save numpy array '''

        print(len(self.imageLst))
class VideoCSIReader(VideoReader):
    def __init__(self):
        self._camera = CSICamera(width=224,
                                 height=224,
                                 capture_width=400,
                                 capture_height=300,
                                 capture_fps=30)

    def read_frame(self, show_preview=False):
        img = self._camera.read()

        if img is None:
            return None

        if show_preview:
            cv2.imshow("preview", img)
            cv2.waitKey(1)

        return img
Пример #13
0
def load_model_and_run():
    with open('human_pose.json', 'r') as f:
        human_pose = json.load(f)

    topology = trt_pose.coco.coco_category_to_topology(human_pose)
    ut = Utils(topology)
    num_parts = len(human_pose['keypoints'])
    num_links = len(human_pose['skeleton'])
    model = trt_pose.models.resnet18_baseline_att(num_parts,
                                                  2 * num_links).cuda().eval()
    MODEL_WEIGHTS = 'resnet18_baseline_att_224x224_A_epoch_249.pth'
    model.load_state_dict(torch.load(MODEL_WEIGHTS))
    WIDTH = 224
    HEIGHT = 224

    data = torch.zeros((1, 3, HEIGHT, WIDTH)).cuda()
    model_trt = torch2trt.torch2trt(model, [data],
                                    fp16_mode=True,
                                    max_workspace_size=1 << 25)
    OPTIMIZED_MODEL = 'resnet18_baseline_att_224x224_A_epoch_249_trt.pth'
    torch.save(model_trt.state_dict(), OPTIMIZED_MODEL)
    model_trt = TRTModule()
    model_trt.load_state_dict(torch.load(OPTIMIZED_MODEL))

    t0 = time.time()
    torch.cuda.current_stream().synchronize()
    for i in range(50):
        y = model_trt(data)
    torch.cuda.current_stream().synchronize()
    t1 = time.time()

    print(50.0 / (t1 - t0))

    mean = torch.Tensor([0.485, 0.456, 0.406]).cuda()
    std = torch.Tensor([0.229, 0.224, 0.225]).cuda()

    # camera = USBCamera(width=WIDTH, height=HEIGHT, capture_fps=15)
    camera = CSICamera(width=WIDTH, height=HEIGHT, capture_fps=15)

    return ut, camera, model_trt
Пример #14
0
from adafruit_servokit import ServoKit # seteja el pinout a 1000 (GPIO.TEGRA_SOC)
import RPi.GPIO as GPIO

# For csi camera
from jetcam.csi_camera import CSICamera
# For Servo motor
import board
import busio

print("Aneu desconectant")
for j in range(40):
    print(40-j)
    time.sleep(1)

torch.cuda.set_device(0)
camera = CSICamera(width=640, height=480)

#print("Camera initialized")
image = camera.read()
cv2.imwrite('/home/dlinano/Pictures/pic1.jpeg', image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
img = Image.fromarray(image)
ori_width, ori_height = img.size

# image transform, to torch float tensor 3xHxW
img = img_transform(img)
img = torch.unsqueeze(img, 0)
#print("img size = " + str(img.size()))

with torch.no_grad():
    orig_mobilenet = mobilenet.__dict__['mobilenetv2'](pretrained=False)
Пример #15
0
    model = AutopilotModel(pretrained=False)
    model.load_from_path(MODEL_PATH)
    model.eval()

    x = torch.ones((1, FRAME_CHANNELS, FRAME_SIZE, FRAME_SIZE)).cuda()
    model_trt = torch2trt(model, [x], fp16_mode=True)
    torch.save(model_trt.state_dict(), MODEL_PATH_TRT)

try:
    # Car
    car = NvidiaRacecar()
    car.throttle_gain = THROTTLE_GAIN
    car.steering_offset = STEERING_OFFSET

    # Camera
    camera = CSICamera(width=CAMERA_WIDTH, height=CAMERA_HEIGHT)

    # Control Loop
    while True:
        if SHOW_LOGS:
            start_time = time.time()

        camera_frame = camera.read()
        cropped_frame = center_crop_square(camera_frame)
        resized_frame = cv2.resize(cropped_frame, (FRAME_SIZE, FRAME_SIZE))
        preprocessed_frame = preprocess_image(resized_frame)
        output = model_trt(preprocessed_frame).detach().clamp(
            -1.0, 1.0).cpu().numpy().flatten()

        steering = float(output[0])
        car.steering = steering
Пример #16
0
    image.sub_(mean[:, None, None]).div_(std[:, None, None])
    return image[None, ...]


from trt_pose.draw_objects import DrawObjects
from trt_pose.parse_objects import ParseObjects

parse_objects = ParseObjects(topology)
draw_objects = DrawObjects(topology)

#from jetcam.usb_camera import USBCamera
from jetcam.csi_camera import CSICamera
from jetcam.utils import bgr8_to_jpeg

#camera = USBCamera(width=WIDTH, height=HEIGHT, capture_fps=30)
camera = CSICamera(width=WIDTH, height=HEIGHT, capture_fps=30)

camera.running = True

import matplotlib.pyplot as plt
import numpy as np
import matplotlib.image as mpimg


def execute(change):
    image = change['new']
    data = preprocess(image)
    cmap, paf = model_trt(data)
    cmap, paf = cmap.detach().cpu(), paf.detach().cpu()
    counts, objects, peaks = parse_objects(
        cmap, paf)  #, cmap_threshold=0.15, link_threshold=0.15)
Пример #17
0
lFps_M = 0 #max fps

def hisEqulColor(img):
    ycrcb = cv2.cvtColor (img, cv2.COLOR_BGR2YCR_CB)
    channels = cv2.split (ycrcb)
    #print (len(channels))
    cv2.equalizeHist (channels[0], channels[0])
    #clahe = cv2.createCLAHE (clipLimit = 2.0, tileGridSize = (8, 8))
    #channels[0] = clahe.apply (channels [0])
    cv2.merge (channels, ycrcb)
    cv2.cvtColor (ycrcb, cv2.COLOR_YCR_CB2BGR, img)
    return img

# initialize the camera and grab a reference to the raw camera capture
#camera = cv2.VideoCapture(0)
camera = CSICamera(width=picW, height=picH)

#stdscr = curses.initscr()
#curses.cbreak()
#stdscr.keypad(1)

#stdscr.addstr(0,10,"Hit 'q' to quit")
#stdscr.refresh()

#key = ''

if len(sys.argv) == 1:
   hue_value1 = 1
   hue_value2 = 1
else:
   for a in sys.argv[1:]:
Пример #18
0
"""
Simple test script to validate the motor controls and the steering and the camera and the ultrasonic sensors
"""

from jetcam.csi_camera import CSICamera
import ipywidgets
from IPython.display import display
from jetcam.utils import bgr8_to_jpeg

camera_in = CSICamera(width=224,
                      height=224,
                      capture_width=1080,
                      capture_height=720,
                      capture_fps=30)  # Default Nvidia Camera Setup

image_widget = ipywidgets.Image(format='jpeg')

while True:
    image_widget.value = bgr8_to_jpeg(camera_in.read())
    display(image_widget)

# Test motors
import actuators
drive_train = actuators.ServoMotor()
Пример #19
0
import vae
from vae.controller import VAEController
from stable_baselines import SAC
car = NvidiaRacecar()
throttle = 0.8
#path = "jetcar_weights.pkl"
path = "logs/sac/JetVae-v0_46/JetVae-v0.pkl"
input_array = np.zeros((1, 256))
try:
    i = 0

    v = VAEController()
    v.load("logs/vae-256.pkl")
    #model = keras.models.load_model(path)
    model = SAC.load(path)
    camera = CSICamera(width=112, height=112)
    camera.running = True

    print("Imported Camera! Ready to Start!")
    while True:
        print("Starting to read image")
        image = camera.value.copy()
        print("Image Read!")
        #image = cv2.resize(image, (224//2, 224//2))
        print(type(image))
        tmp = v.encode_from_raw_image(image)
        print("Got image")
        input_array[0,:] = tmp
        print("Predicing from Model")
        action, _ = model.predict(input_array, deterministic = True)
        t = (action[1] + 1) / 2
Пример #20
0
import torch
from torch2trt import TRTModule

model_trt = TRTModule()
model_trt.load_state_dict(torch.load('road_following_model_trt.pth'))
#
#Create the racecar class
#
from jetracer.nvidia_racecar import NvidiaRacecar

car = NvidiaRacecar()
#
#Create the camera class.
from jetcam.csi_camera import CSICamera

camera = CSICamera(width=224, height=224, capture_fps=65)
"""
Finally, execute the cell below to make the racecar move forward, steering the racecar based on the x value of the apex.

Here are some tips,

If the car wobbles left and right, lower the steering gain
If the car misses turns, raise the steering gain
If the car tends right, make the steering bias more negative (in small increments like -0.05)
If the car tends left, make the steering bias more postive (in small increments +0.05)
"""
from utils import preprocess
import numpy as np

STEERING_GAIN = 0.75
STEERING_BIAS = 0.00
class PoseEstimation(object):

    OPTIMIZED_MODEL = '/home/athelas/athelas_ws/src/pose_estimation/data/resnet18_baseline_att_224x224_A_epoch_249_trt.pth'
    WIDTH = 224
    HEIGHT = 224
    HUMAN_POSE = '/home/athelas/athelas_ws/src/pose_estimation/data/human_pose.json'

    def __init__(self, display_widget=None):
        self.display_widget = display_widget

        with open(self.HUMAN_POSE, 'r') as f:
            human_pose = json.load(f)

        topology = trt_pose.coco.coco_category_to_topology(human_pose)

        self.model_trt = TRTModule()
        self.model_trt.load_state_dict(torch.load(self.OPTIMIZED_MODEL))

        self.parse_objects = ParseObjects(topology)
        self.keypoint_coordinates = KeypointCoordinates(
            human_pose["keypoints"])

        self.camera = CSICamera(width=self.WIDTH,
                                height=self.HEIGHT,
                                capture_fps=30)
        self.camera.running = True

        if self.display_widget is None:
            self.display = plt.imshow(self.camera.value)
            plt.ion()
            plt.show()

        # ROS stuff
        s = rospy.Service('get_keypoint', GetKeypoint,
                          self.__handle_get_keypoint)
        self.image_pub = rospy.Publisher("image", Image)
        self.bridge = CvBridge()

    def __handle_get_keypoint(self, req):
        keypoints = self.capture()
        if req.location in keypoints:
            coord = keypoints[req.location]
        else:
            coord = [-999, -999]
        return GetKeypointResponse(coord[0], coord[1])

    def __preprocess(self, image):
        mean = torch.Tensor([0.485, 0.456, 0.406]).cuda()
        std = torch.Tensor([0.229, 0.224, 0.225]).cuda()
        device = torch.device('cuda')
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = PIL.Image.fromarray(image)
        image = transforms.functional.to_tensor(image).to(device)
        image.sub_(mean[:, None, None]).div_(std[:, None, None])
        return image[None, ...]

    def __execute(self, change):
        image = change['new']
        data = self.__preprocess(image)
        cmap, paf = self.model_trt(data)
        cmap, paf = cmap.detach().cpu(), paf.detach().cpu()
        counts, objects, peaks = self.parse_objects(cmap, paf)
        keypoints = self.keypoint_coordinates(image, counts, objects, peaks)
        self.image_pub.publish(self.bridge.cv2_to_imgmsg(image, "bgr8"))
        if self.display_widget:
            self.display_widget.value = bgr8_to_jpeg(image[:, ::-1, :])
        else:
            self.display.set_data(image[:, :, ::-1])
            plt.pause(0.000001)
        return keypoints

    def start_stream(self):
        self.camera.observe(self.__execute, names='value')

    def stop_stream(self):
        self.camera.unobserve_all()

    def capture(self):
        return self.__execute({'new': self.camera.value})

    def terminate(self):
        self.camera.close()
Пример #22
0
#FPS computation
from datetime import datetime

lFps_sec = 0  #current second
lFPSbeg = 0  #the seccond we started to run
lFPSrun = 0  #running for X secconds
lFPSfnm = 0  #number of frames
#
lFps_c = 0  #current fps
lFps_k = 0  #current frames
lFps_M = 0  #max fps

use_display = True
# Create a VideoCapture object
#capture = cv2.VideoCapture (0)
capture = CSICamera(width=1280, height=720)

# Check if camera opened successfully
#if (capture.isOpened() == False):
#  print("Unable to read camera feed")

# Default resolutions of the frame are obtained.The default resolutions are system dependent.
# We convert the resolutions from float to integer.
w = 1280  #int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH ))
h = 720  #int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT ))
fps = 60
vname = "/mnt/cv2video-{}p-{}.avi".format(
    h,
    datetime.now().strftime("%Y%m%d-%H%M%S-%f"))
# Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file.
#fourcc = cv2.VideoWriter_fourcc(*'XVID')  # cv2.VideoWriter_fourcc() does not exist
Пример #23
0
from jetcam.csi_camera import CSICamera
import cv2

camera = CSICamera(width=224,
                   height=224,
                   capture_width=1080,
                   capture_height=720,
                   capture_fps=30)

camera.read()

image = camera.value

#camera.running = True

cv2.imshow("image", image)

cv2.waitKey(5000)

#video = cv2.VideoCapture(0)

#check, frame = video.read()

#print(frame)
Пример #24
0
import cv2
from jetcam.csi_camera import CSICamera

file_path = "/home/nano/Desktop/output/"
#480x320
camera_right = CSICamera(capture_device=0, width=1280, height=720)
camera_left = CSICamera(capture_device=1, width=1280, height=720)
counter = 1

while True:
    image_right = camera_right.read()
    image_left = camera_left.read()
    cv2.imshow("right", image_right)
    cv2.imshow("left", image_left)

    if cv2.waitKey(1) & 0xFF == ord('s'):  # press s to save image
        print("save capture{}".format(counter))
        cv2.imwrite(file_path + "right{}.jpg".format(counter), image_right)
        cv2.imwrite(file_path + "left{}.jpg".format(counter), image_left)
        counter = counter + 1

    elif cv2.waitKey(1) & 0xFF == ord(
            'q'):  # press q to stop, or directly ctrl + C (lol)
        break

    else:
        #print("|")
        continue
cv2.destroyAllWindows()
Пример #25
0
# In[ ]:

from trt_pose.draw_objects import DrawObjects
from trt_pose.parse_objects import ParseObjects

parse_objects = ParseObjects(topology)
draw_objects = DrawObjects(topology)

# In[ ]:

from jetcam.usb_camera import USBCamera
from jetcam.csi_camera import CSICamera
from jetcam.utils import bgr8_to_jpeg

camera = CSICamera(width=WIDTH, height=HEIGHT, capture_fps=30)

camera.running = True

# In[ ]:


def get_keypoints(image, human_pose, topology, object_counts, objects,
                  normalized_peaks):
    """Get the keypoints from torch data and put into a dictionary where keys are keypoints
    and values the x,y coordinates. The coordinates will be interpreted on the image given.

    Args:
        image: cv2 image
        human_pose: json formatted file about the keypoints
Пример #26
0
from jetcam.csi_camera import CSICamera

camera = CSICamera(width=224, height=224)

image = camera.read()

print(image.shape)

print(camera.value.shape)

import ipywidgets
from IPython.display import display
from jetcam.utils import bgr8_to_jpeg

image_widget = ipywidgets.Image(format='jpeg')

image_widget.value = bgr8_to_jpeg(image)

display(image_widget)
Пример #27
0
from jetcam.csi_camera import CSICamera
from tensorflow.keras.preprocessing import image
from approxeng.input.selectbinder import ControllerResource
from FindLane import findlane, img_preprocess
import numpy as np
import cv2
import time
import serial

WIDTH = 244
HEIGHT = 244
camera = CSICamera(width=WIDTH,
                   height=HEIGHT,
                   capture_width=1080,
                   capture_height=720,
                   capture_fps=120)
#camera = cv2.VideoCapture(0)
camera.running = True

#output_names = ['dense_11/Softmax']
output_names = ['dense_12/BiasAdd']
input_names = ['input']

import tensorflow as tf


def get_frozen_graph(graph_file):
    """Read Frozen Graph file from disk."""
    with tf.gfile.FastGFile(graph_file, "rb") as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
Пример #28
0
def collect_data(train,throttle,steering,s_gain):

    # Importing NvidiaRacecar modeule and CSICamera
    # Note: CSICamera object can be created once at a time
    camera = CSICamera(width=112, height=112)
    camera.running = True
    car = NvidiaRacecar()

    # initialize parameters: 0
    car.throttle = throttle
    car.steering = steering
    car.throttle_gain = -0.5

    # file name
    if not os.path.exists("data/"):
    os.makedirs("data/")
    file_name=input("File_name: ")


    try:
        while(True):
            # Display steering and Throttle
            print("Steering: {}, Throttle {}" .format(car.steering,car.throttle))

            # Increase throttle on press
            if keyboard.is_pressed('i'):
                if car.throttle<=0.04:
                    car.throttle=0.04
                if car.throttle>=0.062:
                    car.throttle=car.throttle
                else:
                    car.throttle=car.throttle+0.001
            # Decrease throttle on release
            if not keyboard.is_pressed('i'):
                if car.throttle<=0.04:
                    car.throttle=0.04
                else:
                    car.throttle=car.throttle-0.01
                    
            # turn left
            if keyboard.is_pressed('j'):
                if car.steering>=0.6:
                    car.steering=car.steering
                else:
                    car.steering =car.steering+0.01
            
            ### turn right
            if keyboard.is_pressed('l'):
                if car.steering<=-0.6:
                    car.steering=car.steering
                else:
                    car.steering =car.steering-0.01

            # Resease steering back to steering angle of 0 degrees
            if not keyboard.is_pressed('j') and not keyboard.is_pressed('l'):
                if not keyboard.is_pressed('j'):
                    
                    if car.steering<=0:
                        car.steering=car.steering
                    else:
                        car.steering =car.steering-0.015
                if not keyboard.is_pressed('l'):
                    
                    if car.steering>=0:
                        car.steering=car.steering
                    else:
                        car.steering =car.steering+0.015 
            
            # Emergency break 
            if keyboard.is_pressed('space'):
                car.throttle=-100
                time.sleep(0.1)
                car.throttle=0

            # Save the recording labelled data: Press
            if keyboard.is_pressed('s'):
                car.throttle=0
                car.steering=0
                print("saving")
                np.savez("data/"+str(file_name)+".npz", train=np.array(train))
                print("saved")
                camera.running=False
                del camera
                break
            
            # Abort    
            if keyboard.is_pressed('a'):
                car.throttle=0
                car.steering=0
                camera.running=False
                del camera
                break
                
            # append recording    
            train.append([camera.value,car.throttle,car.steering])
    except:
        camera.running=False
        del camera


if __name__ == '__main__':
    # input: train,throttle,steering,s_gain
    collect_data([],0,0,-0.5)
    
Пример #29
0
from jetcam.csi_camera import CSICamera
import cv2
import time
from jetbot import Robot
import logging
# from threading import Thread
import threading

# from jetcam.usb_camera import USBCamera

robot = Robot()
camera = CSICamera(width=224, height=224)
# camera = USBCamera(width=224, height=224)

camera.running = True

# def thread_function(name):
#     print("capturing")
#     cv2.imwrite("dataset/{0}-{1}.jpg".format("A", time.time()), camera.value)
#     print("done")
#     logging.info("Thread %s: starting", name)
#     time.sleep(.2)

# x = threading.Thread(target=thread_function, args=(100,))
# x.start()
#

i = 0
while True:
    print("capturing")
    cv2.imwrite("dataset/{0}-{1}.jpg".format("A", time.time()), camera.value)
Пример #30
0
import numpy as np

import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2

from jetcam.csi_camera import CSICamera

import time

# create the camera object
camera = CSICamera(width=224,
                   height=224,
                   capture_width=1080,
                   capture_height=720,
                   capture_fps=30)

# variable to store the captured images and the current image
imageList = []
currentImg = None

# read the camera and set to running. This will mean that we will only have to get the last value of the camera to get
# the latest frame
img = camera.read()
camera.running = True

print('Start taking frames')

# perform for 300 images
for i in range(400):