def test_init_start_stop(self): """Test camera's thread init/start/stop methods.""" camera = Camera() camera.start() time.sleep(1) image = bgr8_to_jpeg(camera.value) camera.stop() # Check if image changed after stopping assert image == bgr8_to_jpeg(camera.value)
def execute(change): image = change['new'] # execute collision model to determine if blocked collision_output = collision_model(preprocess(image)).detach().cpu() prob_blocked = float(F.softmax(collision_output.flatten(), dim=0)[0]) blocked_widget.value = prob_blocked # turn left if blocked if prob_blocked > 0.5: robot.left(0.3) image_widget.value = bgr8_to_jpeg(image) return # compute all detected objects detections = model(image) # draw all detections on image for det in detections[0]: bbox = det['bbox'] cv2.rectangle(image, (int(width * bbox[0]), int(height * bbox[1])), (int(width * bbox[2]), int(height * bbox[3])), (255, 0, 0), 2) # select detections that match selected class label matching_detections = [ d for d in detections[0] if d['label'] == int(label_widget.value) ] # get detection closest to center of field of view and draw it det = closest_detection(matching_detections) if det is not None: bbox = det['bbox'] cv2.rectangle(image, (int(width * bbox[0]), int(height * bbox[1])), (int(width * bbox[2]), int(height * bbox[3])), (0, 255, 0), 5) # otherwise go forward if no target detected if det is None: robot.forward(float(speed_widget.value)) # otherwsie steer towards target else: # move robot forward and steer proportional target's x-distance from center center = detection_center(det) robot.set_motors( float(speed_widget.value + turn_gain_widget.value * center[0]), float(speed_widget.value - turn_gain_widget.value * center[0])) # update image widget image_widget.value = bgr8_to_jpeg(image)
def display_xy(camera_image): image = np.copy(camera_image) x = x_slider.value y = y_slider.value x = int(x * 224 / 2 + 112) y = int(y * 224 / 2 + 112) image = cv2.circle(image, (x, y), 8, (0, 255, 0), 3) image = cv2.circle(image, (112, 224), 8, (0, 0,255), 3) image = cv2.line(image, (x,y), (112,224), (255,0,0), 3) jpeg_image = bgr8_to_jpeg(image) return jpeg_image
layout=widgets.Layout(align_self='center')) # In[33]: display(image_box) # In[105]: count = 0 seta = 0 while True: img = camera.value try: img_result, img_op, seta = search_road(img, seta) print(seta, end='\t') result1.value = bgr8_to_jpeg(img_result) result2.value = bgr8_to_jpeg(img_op) pw = 1.2 w = (seta / 90) left_power = pw * (0.1 + w * 0.1) right_power = pw * (0.1 - w * 0.1) robot.set_motors(left_power, right_power) # time.sleep(0.5) # robot.stop() except: print('not Found', end='\r') robot.stop() input_image.value = bgr8_to_jpeg(img) if count == 120: break else:
def execute(change): global ping_counter ping_counter += 1 if ping_counter > 60: logger.info("still observing") ping_counter = 0 image = change['new'] resized_image = image if model_width != camera_width and model_height != camera_height: resized_image = cv2.resize(image, (model_width, model_height), interpolation=cv2.INTER_AREA) # execute collision model to determine if blocked #collision_output = collision_model(preprocess(image,model_224,224)).detach().cpu() #prob_blocked = float(F.softmax(collision_output.flatten(), dim=0)[0]) # turn left if blocked #if prob_blocked > 0.5: # robot.left(0.4) # return # compute all detected objects detections = model(resized_image) # draw all detections on image # for det in detections[0]: # bbox = det['bbox'] # cv2.rectangle(image, (int(width * bbox[0]), int(height * bbox[1])), (int(width * bbox[2]), int(height * bbox[3])), (255, 0, 0), 2) # select detections that match selected class label matching_detections = [ d for d in detections[0] if d['label'] in v2_coco_labels_to_capture and d['confidence'] > 0.50 ] tf_image_list = [] if len(matching_detections) > 0: logger.info(matching_detections) filename = str(uuid1()) for d in matching_detections: bbox = d['bbox'] xmin = int(xscale * bbox[0]) ymin = int(yscale * bbox[1]) xmax = int(xscale * bbox[2]) ymax = int(yscale * bbox[3]) if xmin < 0: xmin = 0 if ymin < 0: ymin = 0 if xmax > camera_width: xmax = camera_width - 1 if ymax > camera_height: ymax = camera_height - 1 tf_image_desc = [ filename + '.jpg', camera_width, camera_height, int(d['label']), xmin, ymin, xmax, ymax ] tf_image_list.append(tf_image_desc) save_image(bgr8_to_jpeg(image), filename, tf_image_list) # get detection closest to center of field of view and center bot det = closest_detection(matching_detections) if det is not None: center = detection_center(det) logger.debug("center: %s", center) move_speed = 2.0 * center[0] if abs(move_speed) > 0.3: if move_speed > 0.0: robot.right(move_speed) else: robot.left(abs(move_speed)) else: robot.stop() if logger.isEnabledFor(logging.DEBUG) and (len(detections[0])) > 0: logger.debug(detections[0])
import os import time from jetbot import Camera, bgr8_to_jpeg interval_seconds = 5 directory = 'images' camera = Camera() # count = 0 # while True: file_name = os.path.join(directory, 'image_1.jpeg') with open(file_name, 'wb') as f: f.write(bgr8_to_jpeg(camera.value)) # count += 1 # time.sleep(interval_seconds)
def button_proc(change): camera_image.value = bgr8_to_jpeg(camera.value)