def __init__(self, calib_file=''): self.publisher = rospy.Publisher(IMAGE_TOPIC, Image, queue_size=1) rospy.init_node(NODE_NAME) width = rospy.get_param('~width', 640) height = rospy.get_param('~height', 480) rospy.logwarn(width) self.calib_file = calib_file self.camera = Camera(width=width,height=height)
def __init__(self): self.camera = Camera.instance(width=224, height=224) self.robot = Robot() #Collision Avoidance self.ca_model = torchvision.models.alexnet(pretrained=False) self.ca_model.classifier[6] = torch.nn.Linear( self.ca_model.classifier[6].in_features, 2) self.ca_model.load_state_dict(torch.load('best_model.pth')) self.device = torch.device('cuda') self.ca_model = self.ca_model.to(self.device) self.mean = torch.Tensor([0.485, 0.456, 0.406]).cuda().half() self.std = torch.Tensor([0.229, 0.224, 0.225]).cuda().half() self.normalize = torchvision.transforms.Normalize(self.mean, self.std) #Road following support variables self.angle = 0.0 self.angle_last = 0.0 # Instantiating the road following network. self.rf_model = torchvision.models.resnet18(pretrained=False) self.rf_model.fc = torch.nn.Linear(512, 2) self.rf_model.load_state_dict(torch.load('best_steering_model_xy.pth')) self.rf_model = self.rf_model.to(self.device) self.rf_model = self.rf_model.eval().half() self.speed_gain = 0.12 self.steering_gain = 0 self.steering_dgain = 0.1 self.steering_bias = 0.0 self.t_unit_dist = 0.04 self.starttime = 0 self.cumulative_angle = -1 self.pitstop = [] self.startposition = [] self.pathpoints = [[]] self.proportionality_const = -1 # TODO : Add the proper value
def main(vocab_path, settings_path): slam = orbslam2.System(vocab_path, settings_path, orbslam2.Sensor.MONOCULAR) slam.set_use_viewer(True) slam.initialize() camera = Camera.instance(width=960, height=540, capture_width=1280, capture_height=720) print('-----') print('Start processing sequence ...') ts = 0 #cv2.namedWindow("test") while (True): img = camera.value.copy() t1 = time.time() slam.process_image_mono(img, float(ts)) t2 = time.time() ttrack = t2 - t1 print("frame id:" + str(ts), ttrack) time.sleep(0.01) ts += 1 save_trajectory(slam.get_trajectory_points(), 'trajectory.txt') slam.shutdown() return 0
def start(self): self.device = torch.device('cuda') print('Loading model...') # create model self.model = torchvision.models.alexnet(pretrained=False) self.model.classifier[6] = torch.nn.Linear( self.model.classifier[6].in_features, 2) self.model.load_state_dict(torch.load(self.collision_model)) self.model = self.model.to(self.device) # create robot self.robot = Robot() print('Initializing camera...') # create camera self.camera = Camera.instance(width=224, height=224) print('Running...') self.camera.observe(self._update, names='value') def kill(sig, frame): print('Shutting down...') self.camera.stop() signal.signal(signal.SIGINT, kill) self.camera.thread.join()
def collision_avoidance(): global normalize, device, model, mean, camera, robot i_frame = -1 model = torchvision.models.alexnet(pretrained=False) model.classifier[6] = torch.nn.Linear(model.classifier[6].in_features, 2) model.load_state_dict(torch.load('best_model.pth')) device = torch.device('cuda') model = model.to(device) mean = 255.0 * np.array([0.485, 0.456, 0.406]) stdev = 255.0 * np.array([0.229, 0.224, 0.225]) camera = Camera.instance(width=224, height=224) normalize = torchvision.transforms.Normalize(mean, stdev) robot = Robot() robot.stop() now = time.time() stop_time = now + 120 while time.time() < stop_time: i_frame += 1 if i_frame % 2 == 0: update({'new': camera.value}) # we call the function once to intialize #cv2.imshow(camera.value) #camera.observe(update, names='value') robot.stop() camera.stop()
def __init__(self): print('Setting up camera') self.camera = Camera.instance(width=224, height=224) print('Set up camera') self.robot = Robot() self.completed = False # Collision Avoidance. print('Loading CA model') self.ca_model = torchvision.models.alexnet(pretrained=False) self.ca_model.classifier[6] = torch.nn.Linear( self.ca_model.classifier[6].in_features, 2) #self.ca_model.load_state_dict(torch.load('best_model_nvidia.pth')) self.device = torch.device('cuda') self.ca_model = self.ca_model.to(self.device) print('Loaded CA model') self.mean = 255.0 * torch.Tensor(np.array([0.485, 0.456, 0.406 ])).cuda().half() self.std = 255.0 * torch.Tensor(np.array([0.485, 0.456, 0.406 ])).cuda().half() self.normalize = torchvision.transforms.Normalize(self.mean, self.std) # Road following support variables. self.angle = 0.0 self.angle_last = 0.0 # Instantiating the road following network. print('Loading RF model') self.rf_model = torchvision.models.resnet18(pretrained=False) self.rf_model.fc = torch.nn.Linear(512, 2) self.rf_model.load_state_dict(torch.load('best_steering_model_xy.pth')) self.rf_model = self.rf_model.to(self.device) self.rf_model = self.rf_model.eval().half() print('Loaded RF Model') # Initializing additional variables. self.speed_gain = 0.12 self.steering_gain = 0 self.steering_dgain = 0.1 self.steering_bias = 0.0 self.starttime = 0 self.cumulative_angle = -1 self.pitstop = [] self.startposition = [] self.start_num = -1 self.baton_callback = None self.pathpoints_callback = None self.pathpoints = [[]] # Add proper value below. self.proportionality_const = -1 self.image_widget = ipywidgets.Image() self.previous_position = -1 traitlets.dlink((self.camera, 'value'), (self.image_widget, 'value'), transform=bgr8_to_jpeg)
class JetbotCamera: bridge = CvBridge() def __init__(self, calib_file=''): self.publisher = rospy.Publisher(IMAGE_TOPIC, Image, queue_size=1) rospy.init_node(NODE_NAME) width = rospy.get_param('~width', 640) height = rospy.get_param('~height', 480) rospy.logwarn(width) self.calib_file = calib_file self.camera = Camera(width=width,height=height) def start(self): self.camera.observe(self.image_proc, names='value') self.camera.start() rospy.spin() def image_proc(self, change): image_value = change['new'] cv2_image = self.bridge.cv2_to_imgmsg(image_value, 'bgr8') self.publisher.publish(cv2_image) def _load_camera_info(self): with open(self.calib_file, "r") as file_handle: calib_data = yaml.load(file_handle) # Parse camera_info_msg = CameraInfo() camera_info_msg.width = calib_data["image_width"] camera_info_msg.height = calib_data["image_height"] camera_info_msg.K = calib_data["camera_matrix"]["data"] camera_info_msg.D = calib_data["distortion_coefficients"]["data"] camera_info_msg.R = calib_data["rectification_matrix"]["data"] camera_info_msg.P = calib_data["projection_matrix"]["data"] camera_info_msg.distortion_model = calib_data["distortion_model"] return camera_info_msg
def test_init_start_stop(self): """Test camera's thread init/start/stop methods.""" camera = Camera() camera.start() time.sleep(1) image = bgr8_to_jpeg(camera.value) camera.stop() # Check if image changed after stopping assert image == bgr8_to_jpeg(camera.value)
def __init__(self, width=640, height=480, saveFramesToFile=True): super().__init__() # Set camera frame size self.width = width self.height = height # Initialization # Camera instance self.camera = Camera.instance(width=self.width, height=self.height) # Region Of Interest self.sceneWhole = None self.sceneROI = None # Captured frame self.savedFrameWhole = None self.savedFrameROI = None # File numbering counter self.fileNumberCount = 1000000000 # Flag whether to save to file or not self.saveFramesToFile = saveFramesToFile
def cam_capture(fn): global cam global capidx capidx += 1 path = "/home/jetbot/camerasnaps" try: os.makedirs(path) except FileExistsError: pass except Exception as ex: print("Exception creating directory '%s', error: %s" % (path, str(ex))) return if cam == None: try: print("Initializing camera...") cam = Camera.instance(width=960, height=720) print("\r\nCamera initialized!") except Exception as ex: sys.stderr.write("Exception initializing camera: " + str(ex)) cam = None return try: fp = os.path.join(path, fn + '.jpg') with open(fp, 'wb') as f: f.write(bytes(imencode('.jpg', cam.value)[1])) teensyadc.set_camera_led() try: uid = pwd.getpwnam("jetbot").pw_uid gid = grp.getgrnam("jetbot").gr_gid os.chown(fp, uid, gid) except Exception as ex: sys.stderr.write( "Exception changing ownership of file '%s', error: %s" % (fp, str(ex))) except Exception as ex: sys.stderr.write("Exception writing to file '%s', error: %s" % (fp, str(ex)))
def face_main(): global face global smile global eye camera_0 = Camera.instance(width=224, height=224) face_seen = 1 face_haar_model = './data/haarcascade_frontalface_default.xml' smile_haar_model = './data/haarcascade_smile.xml' eye_haar_model = './data/haarcascade_eye.xml' face = cv2.CascadeClassifier(face_haar_model) smile = cv2.CascadeClassifier(smile_haar_model) eye = cv2.CascadeClassifier(eye_haar_model) while True: frame_0 = camera_0.value gray_0 = cv2.cvtColor(frame_0, cv2.COLOR_BGR2GRAY) canvas_0 = detect(gray_0, frame_0, face_seen) #cv2.imshow('Video', canvas_0) if cv2.waitKey(1) & 0xff == ord('q'): break camera_0.stop() cv2.destroyAllWindows()
class Observer: def __init__(self, camera_width, camera_height): self.camera = Camera(width=camera_width, height=camera_height) self.image = None def start(self): self.camera.observe(self._callback, names='value') self.camera.start() def stop(self): self.camera.stop() def _callback(self, change): img = change['new'] # Change BGR TO RGB HWC self.image = img[:, :, ::-1] def observation(self): while self.image is None: pass return self.image
def __init__(self, camera_width, camera_height): self.camera = Camera(width=camera_width, height=camera_height) self.image = None
model_roadfollow.fc = torch.nn.Linear(512, 2) model_roadfollow.load_state_dict(torch.load(steering_model_path)) model_dinodet = torchvision.models.resnet18(pretrained=False) model_dinodet.fc = torch.nn.Linear(512, 6) model_dinodet.load_state_dict(torch.load(dino_detect_model_path)) device = torch.device('cuda') model = model_roadfollow.to(device) model = model_roadfollow.eval().half() model_dinodet = model_dinodet.to(device) model_dinodet = model_dinodet.eval() camera = Camera.instance(width=int(rospy.get_param("image_width")), height=int(rospy.get_param("image_height"))) from jetbot import Robot robot = Robot() speed_gain_slider = float(rospy.get_param("speed")) steering_gain_slider = float(rospy.get_param("steering_gain")) steering_dgain_slider = float(rospy.get_param("steering_d_gain")) steering_bias_slider = float(rospy.get_param("steering_bias")) angle = 0.0 angle_last = 0.0 prev_class = -1 except Exception as e: rospy.loginfo("Error occured:", str(e)) dino_names = rospy.get_param("dinosaurs")
def trigger_speech(trigger_type): ''' Respond to the user command, or trigger - "greeting": The user says hi to J-Bot and she responds about general weather information - "weather": The user asks about the weather, J-Bot responds with the weather information - "air pollution": The user asks about the air quality, J-Bot responds with the air quality information - "camera": The user asks for the opinion about the outfit, J-Bot responds according to the condition outside whether the outfit is suitable Args: trigger_type: (str) Command by the user Returns: (str) What J-Bot tells the user according to what the user asks ''' # Get the information about the weather and air quality d = get_outside_condition() temp = int(d['temperature']) highest_temp_forecast = 0 highest_temp_time = 0 forecasted_weather = "" forecasted_time = "" current_w = d['weather']['detail'] is_all_day = True forecasted_data = d['forecast'] air_quality = d['air_condition']['level'] count_equal = 0 for f in forecasted_data: # Get the highest temperature for the day if highest_temp_forecast < f['temperature']: highest_temp_forecast = f['temperature'] highest_temp_time = f['time'] if current_w == f['weather']['detail']: count_equal += 1 elif (is_all_day == True) and (current_w != f['weather']['detail']): forecasted_weather = f['weather']['detail'] forecasted_time = f['time'] is_all_day = False # If the highest forecasted temperature is lower than the current weather, assign current temperature as forecasted highest temperature if highest_temp_forecast <= temp: highest_temp_forecast = temp # Call greeting context, weather context (temperature included), air pollution context if trigger_type == "greeting": return greeting_context() + " " + weather_context( temp, highest_temp_forecast, highest_temp_time, forecasted_weather, forecasted_time, forecasted_data, current_w, is_all_day, count_equal) + " " + air_context(d['air_condition']['level']) # Call weather context elif trigger_type == "weather": return weather_context(temp, highest_temp_forecast, highest_temp_time, forecasted_weather, forecasted_time, forecasted_data, current_w, is_all_day, count_equal) # Call air pollution context elif trigger_type == 'air pollution': return air_context(air_quality) # Check the user's outfit and respond accordingly elif trigger_type == 'camera': # Initiate the camera camera = Camera.instance(width=224, height=224) camera.start() image = widgets.Image(format='jpeg', width=224, height=224) camera_link = traitlets.dlink((camera, 'value'), (image, 'value'), transform=bgr8_to_jpeg) image_path = 'temp.jpg' # Saving the camera image locally with open(image_path, 'wb') as f: f.write(image.value) camera.stop() # Classify the upper and lower outfits of the user top, bot = detectClothes(image_path, clothe_model) print(top, bot) f.close() return recommend_clothes(highest_temp_forecast, forecasted_weather, air_quality, top, bot)
import os import time from jetbot import Camera, bgr8_to_jpeg interval_seconds = 5 directory = 'images' camera = Camera() # count = 0 # while True: file_name = os.path.join(directory, 'image_1.jpeg') with open(file_name, 'wb') as f: f.write(bgr8_to_jpeg(camera.value)) # count += 1 # time.sleep(interval_seconds)
logging.config.dictConfig(logging_config) logger = logging.getLogger(__name__) # init camera logger.info('initialize camera') camera_width = 600 camera_height = 600 model_width = 300 model_height = 300 xscale = model_width * (camera_width / model_width) yscale = model_height * (camera_height / model_height) #camera = Camera.instance(width=camera_width, height=camera_height, capture_width=3280, capture_height=2464) # W = 3280 H = 2464 1920 x 1080 1280 x 720 camera = Camera.instance( width=camera_width, height=camera_height, capture_width=1920, capture_height=1080) # W = 3280 H = 2464 1920 x 1080 1280 x 720 cat_count = 0 seconds_between_pics = 1.0 v2_coco_labels_to_capture = [16, 17, 18, 91, 92] # create save directory image_dir = 'dataset/cats' # we have this "try/except" statement because these next functions can throw an error if the directories exist already try: os.makedirs(image_dir) except FileExistsError: logger.info('image directories not created because they already exist')
from jetbot import Camera width = 640 height = 480 camera = Camera.instance(width=width, height=height) while True: print(camera.value[width // 2][height // 2][0])
# otherwsie steer towards target else: # move robot forward and steer proportional target's x-distance from center center = detection_center(det) robot.set_motors( float(speed_widget.value + turn_gain_widget.value * center[0]), float(speed_widget.value - turn_gain_widget.value * center[0])) # update image widget image_widget.value = bgr8_to_jpeg(image) model = ObjectDetector( '/home/dewald/projects/frameworks/nvidia/deepstream_sdk_v4.0.2_jetson/samples/models/SSD-Mobilenet-v2/ssd_mobilenet_v2_coco.engine' ) camera = Camera.instance(width=300, height=300) detections = model(camera.value) print(detections) collision_model = torchvision.models.alexnet(pretrained=False) collision_model.classifier[6] = torch.nn.Linear( collision_model.classifier[6].in_features, 2) collision_model.load_state_dict( torch.load('/home/dewald/projects/jetbot/jetbot/best_model.pth')) device = torch.device('cuda') collision_model = collision_model.to(device) mean = 255.0 * np.array([0.485, 0.456, 0.406]) stdev = 255.0 * np.array([0.229, 0.224, 0.225]) normalize = torchvision.transforms.Normalize(mean, stdev) # Robot Movement
import os import cv2 from jetbot import Camera dataset_path = "jetbot_dataset_960test2/" image_folder = "rgb/" if not os.path.exists(dataset_path + image_folder): os.makedirs(dataset_path + image_folder) file = open(dataset_path + "rgb.txt", "w") camera = Camera.instance(width=960, height=540, capture_width=1280, capture_height=720) iid = 0 while (True): img = camera.value cv2.imshow("test", img) fname = str(iid).zfill(4) + ".jpg" cv2.imwrite(dataset_path + image_folder + fname, img) # Write file file.write(str(iid) + " " + image_folder + fname + "\n") iid += 1 k = cv2.waitKey(100) if k == ord('q'): camera.stop() break
#3 seconds # Great! We've now defined our pre-processing function which can convert images from the camera format to the neural network input format. # # Now, let's start and display our camera. You should be pretty familiar with this by now. We'll also create a slider that will display the # probability that the robot is blocked. # In[5]: print('16 second traitlets') import traitlets from IPython.display import display import ipywidgets.widgets as widgets from jetbot import Camera, bgr8_to_jpeg camera = Camera.instance(width=224, height=224) image = widgets.Image(format='jpeg', width=224, height=224) blocked_slider = widgets.FloatSlider(description='blocked', min=0.0, max=1.0, orientation='vertical') #DMF Added Next Line gpu_slider = widgets.FloatSlider(description="GPU%", min=0.0, max=1.0, orientation='vertical') camera_link = traitlets.dlink((camera, 'value'), (image, 'value'), transform=bgr8_to_jpeg) display(widgets.HBox([image, blocked_slider, gpu_slider]))
import imutils from PIL import Image from jetbot import Camera, Robot import time face_cascade = cv2.CascadeClassifier('./data/haarcascade_frontalface_default.xml') KNOWN_FACES_DIR = 'known_faces' #UNKNOWN_FACES_DIR = 'unknown_faces' trengs kun for bilder TOLERANCE = 1 #kan endres FRAME_THICKNESS = 3 FONT_THICKNESS = 2 MODEL = 'cnn' # default: 'hog', other one can be 'cnn' - CUDA accelerated (if available) deep-learning pretrained model #video = cv2.VideoCapture(0) video = Camera.instance() # Returns (R, G, B) from name def name_to_color(name): # Take 3 first letters, tolower() # lowercased character ord() value rage is 97 to 122, substract 97, multiply by 8 color = [(ord(c.lower())-97)*8 for c in name[:3]] return color print('Loading known faces...') known_faces = [] known_names = []
x = torch.from_numpy(x).float() x = normalize(x) x = x.to(device) x = x[None, ...] return x #カメラ起動、画面表示(pyではwidgetが使えないため代替案検討) update_bar(1, 'start and display our camera') import traitlets #from IPython.display import display #import ipywidgets.widgets as widgets from jetbot import Camera, bgr8_to_jpeg camera = Camera.instance(width=300, height=224, fps=15) #image = widgets.Image(format='jpeg', width=224, height=224) #blocked_slider = widgets.FloatSlider(description='blocked', min=0.0, max=1.0, orientation='vertical') #camera_link = traitlets.dlink((camera, 'value'), (image, 'value'), transform=bgr8_to_jpeg) #display(widgets.HBox([image, blocked_slider])) #robotインスタンス生成 update_bar(1, 'create our robot instance') from jetbot import Robot robot = Robot() """ #tkinter生成 import tkinter """
mean = torch.Tensor([0.485, 0.456, 0.406]).cuda().half() std = torch.Tensor([0.229, 0.224, 0.225]).cuda().half() def preprocess(image): image = PIL.Image.fromarray(image) image = transforms.functional.to_tensor(image).to(device).half() image.sub_(mean[:, None, None]).div_(std[:, None, None]) return image[None, ...] from IPython.display import display import ipywidgets import traitlets from jetbot import Camera, bgr8_to_jpeg camera = Camera() image_widget = ipywidgets.Image() traitlets.dlink((camera, 'value'), (image_widget, 'value'), transform=bgr8_to_jpeg) display(image_widget) from jetbot import Robot robot = Robot() speed_gain_slider = ipywidgets.FloatSlider(min=0.0, max=1.0, step=0.01, description='speed gain') steering_gain_slider = ipywidgets.FloatSlider(min=0.0, max=1.0, step=0.01, value=0.2, description='steering gain') steering_dgain_slider = ipywidgets.FloatSlider(min=0.0, max=0.5, step=0.001, value=0.0, description='steering kd') steering_bias_slider = ipywidgets.FloatSlider(min=-0.3, max=0.3, step=0.01, value=0.0, description='steering bias')
from jetbot import Robot from jetbot import Camera import cv2 import time import math import os, struct, array from fcntl import ioctl robot = Robot() camera = Camera() camera.start() # Iterate over the joystick devices. print('Available devices:') for fn in os.listdir('/dev/input'): if fn.startswith('js'): print(' /dev/input/%s' % (fn)) #这句显示手柄在硬件中的端口位置: /dev/input/js0 # We'll store the states here. axis_states = {} button_states = {} # These constants were borrowed from linux/input.h axis_names = { 0x00: 'x', 0x01: 'y', 0x02: 'rx', 0x05: 'ry',
import numpy as np import cv2 import glob from jetbot import Camera cam = Camera.instance() # termination criteria criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((6 * 7, 3), np.float32) objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d point in real world space imgpoints = [] # 2d points in image plane. #images = glob.glob('./chessboard_img/*.jpg') #for fname in images: while True: img = cam.value #img = cv2.imread(fname) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Find the chess board corners ret, corners = cv2.findChessboardCorners(gray, (7, 6), None) print(ret)
from jetson import inference # ## General Setups # If GPU is available, we will use it for efficiency if torch.cuda.is_available(): device = torch.device("cuda") else: device = torch.device("cpu") if __name__ == '__main__': print(device) # Instanciate camera with defined size. camwidth, camheight = 320, 240 #224,224 cam = Camera.instance(width=camwidth, height=camheight) try: print("Camspecs are to specifications: ", ((cam.width == camwidth) and (cam.height == camheight))) print(cam.width, cam.height) except NameError: pass # Instanciate Camera robot = Robot() robot.stop( ) # Sometimes the robot start driving when instantiated, so this is in case it does this. def geometric_average(l: list):