from donkeycar.parts.rl_agent_sac import RL_Agent agent = RL_Agent(alg_type=cfg.RL_ALG_TYPE, sim=cfg.DONKEY_GYM, car_name=cfg.DONKEY_UNIQUE_NAME, encoder_update=cfg.ENCODER_UPDATE, max_episode_steps=cfg.MAX_EPISODE_STEPS) V.add(agent, inputs=["image", "speed"], outputs=["steering", "target_speed", "training"], threaded=False) #REALSENSE if cfg.REALSENSE: from donkeycar.parts.realsense2 import RS_T265 from donkeycar.parts.pid import PID rs = RS_T265() V.add(rs, outputs=["pos", "vel", "acc", "img", "speed"], inputs= ["training"], threaded=True) pid = PID() V.add(pid, inputs=["target_speed", "speed", "training"], outputs=["throttle"]) logger = EpisodeLogger() V.add(logger, inputs=["training", "steering", "throttle", "target_speed", "pos", "vel"]) #STEERING if not cfg.DONKEY_GYM: steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM,
def drive(cfg): ''' Construct a working robotic vehicle from many parts. Each part runs as a job in the Vehicle loop, calling either it's run or run_threaded method depending on the constructor flag `threaded`. All parts are updated one after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. Parts may have named outputs and inputs. The framework handles passing named outputs to parts requesting the same named input. ''' #Initialize car V = dk.vehicle.Vehicle() if cfg.HAVE_SOMBRERO: from donkeycar.utils import Sombrero s = Sombrero() ctr = get_js_controller(cfg) V.add(ctr, inputs=['null'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) if cfg.HAVE_ODOM: pi = pigpio.pi() enc = PiPGIOEncoder(cfg.ODOM_PIN, pi) V.add(enc, outputs=['enc/ticks']) odom = OdomDist(mm_per_tick=cfg.MM_PER_TICK, debug=cfg.ODOM_DEBUG) V.add(odom, inputs=['enc/ticks', 'user/throttle'], outputs=['enc/dist_m', 'enc/vel_m_s', 'enc/delta_vel_m_s']) if not os.path.exists(cfg.WHEEL_ODOM_CALIB): print( "You must supply a json file when using odom with T265. There is a sample file in templates." ) print( "cp donkeycar/donkeycar/templates/calibration_odometry.json .") exit(1) else: # we give the T265 no calib to indicated we don't have odom cfg.WHEEL_ODOM_CALIB = None #This dummy part to satisfy input needs of RS_T265 part. class NoOdom(): def run(self): return 0.0 V.add(NoOdom(), outputs=['enc/vel_m_s']) # This requires use of the Intel Realsense T265 rs = RS_T265(image_output=False, calib_filename=cfg.WHEEL_ODOM_CALIB) V.add(rs, inputs=['enc/vel_m_s'], outputs=['rs/pos', 'rs/vel', 'rs/acc', 'rs/camera/left/img_array'], threaded=True) # Pull out the realsense T265 position stream, output 2d coordinates we can use to map. class PosStream: def run(self, pos): #y is up, x is right, z is backwards/forwards return pos.x, pos.z V.add(PosStream(), inputs=['rs/pos'], outputs=['pos/x', 'pos/y']) # This part will reset the car back to the origin. You must put the car in the known origin # and push the cfg.RESET_ORIGIN_BTN on your controller. This will allow you to induce an offset # in the mapping. origin_reset = OriginOffset() V.add(origin_reset, inputs=['pos/x', 'pos/y'], outputs=['pos/x', 'pos/y']) class UserCondition: def run(self, mode): if mode == 'user': return True else: return False V.add(UserCondition(), inputs=['user/mode'], outputs=['run_user']) #See if we should even run the pilot module. #This is only needed because the part run_condition only accepts boolean class PilotCondition: def run(self, mode): if mode == 'user': return False else: return True V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot']) # This is the path object. It will record a path when distance changes and it travels # at least cfg.PATH_MIN_DIST meters. Except when we are in follow mode, see below... path = Path(min_dist=cfg.PATH_MIN_DIST) V.add(path, inputs=['pos/x', 'pos/y'], outputs=['path'], run_condition='run_user') # When a path is loaded, we will be in follow mode. We will not record. path_loaded = False if os.path.exists(cfg.PATH_FILENAME): path.load(cfg.PATH_FILENAME) path_loaded = True def save_path(): path.save(cfg.PATH_FILENAME) print("saved path:", cfg.PATH_FILENAME) def erase_path(): global mode, path_loaded if os.path.exists(cfg.PATH_FILENAME): os.remove(cfg.PATH_FILENAME) mode = 'user' path_loaded = False print("erased path", cfg.PATH_FILENAME) else: print("no path found to erase") def reset_origin(): print("Resetting origin") origin_reset.init_to_last # Here's a trigger to save the path. Complete one circuit of your course, when you # have exactly looped, or just shy of the loop, then save the path and shutdown # this process. Restart and the path will be loaded. ctr.set_button_down_trigger(cfg.SAVE_PATH_BTN, save_path) # Here's a trigger to erase a previously saved path. ctr.set_button_down_trigger(cfg.ERASE_PATH_BTN, erase_path) # Here's a trigger to reset the origin. ctr.set_button_down_trigger(cfg.RESET_ORIGIN_BTN, reset_origin) # Here's an image we can map to. img = PImage(clear_each_frame=True) V.add(img, outputs=['map/image']) # This PathPlot will draw path on the image plot = PathPlot(scale=cfg.PATH_SCALE, offset=cfg.PATH_OFFSET) V.add(plot, inputs=['map/image', 'path'], outputs=['map/image']) # This will use path and current position to output cross track error cte = CTE() V.add(cte, inputs=['path', 'pos/x', 'pos/y'], outputs=['cte/error'], run_condition='run_pilot') # This will use the cross track error and PID constants to try to steer back towards the path. pid = PIDController(p=cfg.PID_P, i=cfg.PID_I, d=cfg.PID_D) pilot = PID_Pilot(pid, cfg.PID_THROTTLE) V.add(pilot, inputs=['cte/error'], outputs=['pilot/angle', 'pilot/throttle'], run_condition="run_pilot") def dec_pid_d(): pid.Kd -= 0.5 logging.info("pid: d- %f" % pid.Kd) def inc_pid_d(): pid.Kd += 0.5 logging.info("pid: d+ %f" % pid.Kd) # Buttons to tune PID constants ctr.set_button_down_trigger("L2", dec_pid_d) ctr.set_button_down_trigger("R2", inc_pid_d) # Plot a circle on the map where the car is located carcolor = 'green' loc_plot = PlotCircle(scale=cfg.PATH_SCALE, offset=cfg.PATH_OFFSET, color=carcolor) V.add(loc_plot, inputs=['map/image', 'pos/x', 'pos/y'], outputs=['map/image']) #This web controller will create a web server. We aren't using any controls, just for visualization. web_ctr = WebFpv() V.add(web_ctr, inputs=['map/image'], threaded=True) #Choose what inputs should change the car. class DriveMode: def run(self, mode, user_angle, user_throttle, pilot_angle, pilot_throttle): if mode == 'user': #print(user_angle, user_throttle) return user_angle, user_throttle elif mode == 'local_angle': return pilot_angle, user_throttle else: return pilot_angle, pilot_throttle V.add(DriveMode(), inputs=[ 'user/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle' ], outputs=['angle', 'throttle']) if not cfg.DONKEY_GYM: steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) throttle = PWMThrottle(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) # Print Joystick controls ctr.print_controls() if path_loaded: print( "###############################################################################" ) print("Loaded path:", cfg.PATH_FILENAME) print("Make sure your car is sitting at the origin of the path.") print("View web page and refresh. You should see your path.") print("Hit 'select' twice to change to ai drive mode.") print( "You can press the X button (e-stop) to stop the car at any time.") print("Delete file", cfg.PATH_FILENAME, "and re-start") print("to record a new path.") print( "###############################################################################" ) carcolor = "blue" loc_plot = PlotCircle(scale=cfg.PATH_SCALE, offset=cfg.PATH_OFFSET, color=carcolor) V.add(loc_plot, inputs=['map/image', 'pos/x', 'pos/y'], outputs=['map/image']) else: print( "###############################################################################" ) print("You are now in record mode. Open the web page to your car") print("and as you drive you should see a path.") print("Complete one circuit of your course.") print("When you have exactly looped, or just shy of the ") print("loop, then save the path (press %s)." % cfg.SAVE_PATH_BTN) print("You can also erase a path with the Triangle button.") print("When you're done, close this process with Ctrl+C.") print("Place car exactly at the start. ") print("Then restart the car with 'python manage drive'.") print("It will reload the path and you will be ready to ") print("follow the path using 'select' to change to ai drive mode.") print("You can also press the Square button to reset the origin") print( "###############################################################################" ) V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
def drive(cfg, model_path=None, meta=[]): model_type = cfg.DEFAULT_MODEL_TYPE #Initialize car V = dk.vehicle.Vehicle() if cfg.RS_PATH_PLANNER: from donkeycar.parts.realsense2 import RS_T265 print("RS t265 with path tracking") odometry = RS_T265(path_output=True, stereo_output=False, image_output=False) V.add(odometry, outputs=['rs/trans', 'rs/yaw'], threaded=True) V.add(PosStream(), inputs=['rs/trans', 'rs/yaw'], outputs=['pos/x', 'pos/y', 'pos/yaw']) else: raise Exception("This script is for RS_PATH_PLANNER only") from donkeycar.parts.realsense2 import RS_D435i cam = RS_D435i(img_type=cfg.D435_IMG_TYPE, image_w=cfg.D435_IMAGE_W, image_h=cfg.D435_IMAGE_H, frame_rate=cfg.D435_FRAME_RATE) V.add(cam, inputs=[], outputs=['cam/image_array'], threaded=True) # class Distance: # def run(self, img): # h,w = img.shape # arr = img[50:w-50,0:h-60].flatten() # mean = np.mean(arr) # print(mean) # V.add(Distance(), # inputs=['cam/image_array'], outputs=['null']) ctr = get_js_controller(cfg) V.add(ctr, inputs=['null'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) # class debug: # def run(self, angle): # print(angle) # V.add(debug(), inputs=['user/angle'], outputs=['null']) #This web controller will create a web server web_ctr = LocalWebControllerPlanner() V.add( web_ctr, inputs=[ 'map/image', 'cam/image_array', 'user/mode', 'recording', 'throttle', 'angle' ], outputs=['web/angle', 'web/throttle', 'web/mode', 'web/recording'], # no part consumes this threaded=True) class UserCondition: def run(self, mode): if mode == 'user': return True else: return False V.add(UserCondition(), inputs=['user/mode'], outputs=['run_user']) class PilotCondition: def run(self, mode): if mode == 'user': return False else: return True V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot']) myRoute = Route(min_dist=cfg.PATH_MIN_DIST) V.add(myRoute, inputs=['pos/x', 'pos/y'], outputs=['nav/path', 'nav/waypoints'], threaded=True) if os.path.exists(cfg.RS_ROUTE_FILE): myRoute.load(cfg.RS_ROUTE_FILE) print("loaded route:", cfg.RS_ROUTE_FILE) def save_waypt(): myRoute.save_waypoint() ctr.set_button_down_trigger(cfg.SAVE_WPT_BTN, save_waypt) def save_route(): myRoute.save_route(cfg.RS_ROUTE_FILE) ctr.set_button_down_trigger(cfg.SAVE_ROUTE_BTN, save_route) def clear_route(): myRoute.clear() ctr.set_button_down_trigger(cfg.CLEAR_ROUTE_BTN, clear_route) img = PImage(resolution=(cfg.D435_IMAGE_W, cfg.D435_IMAGE_H), clear_each_frame=True) V.add(img, outputs=['map/image']) plot = WaypointPlot(scale=cfg.PATH_SCALE, offset=(cfg.D435_IMAGE_W // 4, cfg.D435_IMAGE_H // 2), color=(0, 0, 255)) V.add(plot, inputs=['map/image', 'nav/waypoints'], outputs=['map/image'], threaded=True) plot = PathPlot(scale=cfg.PATH_SCALE, offset=(cfg.D435_IMAGE_W // 4, cfg.D435_IMAGE_H // 2), color=(0, 0, 255)) V.add(plot, inputs=['map/image', 'nav/path'], outputs=['map/image'], threaded=True) # this is error function for PID control nav = Navigator(wpt_reach_tolerance=cfg.WPT_TOLERANCE) V.add(nav, inputs=['nav/waypoints', 'pos/x', 'pos/y', 'pos/yaw'], outputs=['nav/shouldRun', 'nav/error'], threaded=True, run_condition="run_pilot") ctr.set_button_down_trigger("left_shoulder", nav.decrease_target) ctr.set_button_down_trigger("right_shoulder", nav.increase_target) pid = PIDController(p=cfg.PID_P, i=cfg.PID_I, d=cfg.PID_D, debug=False) pilot = PID_Pilot(pid, cfg.PID_THROTTLE) V.add(pilot, inputs=['nav/shouldRun', 'nav/error'], outputs=['pilot/angle', 'pilot/throttle'], run_condition="run_pilot") pos_plot = PlotPose(scale=cfg.PATH_SCALE, offset=(cfg.D435_IMAGE_W // 4, cfg.D435_IMAGE_H // 2)) V.add(pos_plot, inputs=['map/image', 'pos/x', 'pos/y', 'pos/yaw'], outputs=['map/image']) if model_path: def load_model(kl, model_path): start = time.time() print('loading model', model_path) kl.load(model_path) print('finished loading in %s sec.' % (str(time.time() - start))) #When we have a model, first create an appropriate Keras part kl = dk.utils.get_model_by_type(model_type, cfg) load_model(kl, model_path) outputs = ['pilot/angle', 'pilot/throttle'] V.add(kl, inputs=['cam/image_array'], outputs=outputs, run_condition='run_pilot') #Choose what inputs should change the car. class DriveMode: def run(self, mode, user_angle, user_throttle, pilot_angle, pilot_throttle): if mode == 'user': return user_angle, user_throttle elif mode == 'local_angle': return pilot_angle, user_throttle else: return pilot_angle, pilot_throttle * cfg.AI_THROTTLE_MULT V.add(DriveMode(), inputs=[ 'user/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle' ], outputs=['angle', 'throttle']) from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) throttle = PWMThrottle(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) # data collection inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode'] types = ['image_array', 'float', 'float', 'str'] def get_record_alert_color(num_records): col = (0, 0, 0) for count, color in cfg.RECORD_ALERT_COLOR_ARR: if num_records >= count: col = color return col class RecordTracker: def __init__(self): self.last_num_rec_print = 0 self.dur_alert = 0 self.force_alert = 0 def run(self, num_records): if num_records is None: return 0 if self.last_num_rec_print != num_records or self.force_alert: self.last_num_rec_print = num_records if num_records % 10 == 0: print("recorded", num_records, "records") if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert: self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC self.force_alert = 0 if self.dur_alert > 0: self.dur_alert -= 1 if self.dur_alert != 0: return get_record_alert_color(num_records) return 0 V.add(RecordTracker(), inputs=["tub/num_records"], outputs=['records/alert']) th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta) V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording') #tell the controller about the tub ctr.set_tub(tub) ctr.print_controls() #run the vehicle V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
def drive(cfg, model_path=None, use_joystick=False, model_type=None, camera_type='single', meta=[]): ''' Construct a working robotic vehicle from many parts. Each part runs as a job in the Vehicle loop, calling either it's run or run_threaded method depending on the constructor flag `threaded`. All parts are updated one after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. Parts may have named outputs and inputs. The framework handles passing named outputs to parts requesting the same named input. ''' if cfg.DONKEY_GYM: #the simulator will use cuda and then we usually run out of resources #if we also try to use cuda. so disable for donkey_gym. os.environ["CUDA_VISIBLE_DEVICES"] = "-1" if model_type is None: if cfg.TRAIN_LOCALIZER: model_type = "localizer" elif cfg.TRAIN_BEHAVIORS: model_type = "behavior" else: model_type = cfg.DEFAULT_MODEL_TYPE #Initialize car V = dk.vehicle.Vehicle() if camera_type == "stereo": if cfg.CAMERA_TYPE == "WEBCAM": from donkeycar.parts.camera import Webcam camA = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=0) camB = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=1) elif cfg.CAMERA_TYPE == "CVCAM": from donkeycar.parts.cv import CvCam camA = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=0) camB = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=1) else: raise (Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE)) V.add(camA, outputs=['cam/image_array_a'], threaded=True) V.add(camB, outputs=['cam/image_array_b'], threaded=True) from donkeycar.parts.image import StereoPair V.add(StereoPair(), inputs=['cam/image_array_a', 'cam/image_array_b'], outputs=['cam/image_array']) else: print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE) if cfg.DONKEY_GYM: from donkeycar.parts.dgym import DonkeyGymEnv inputs = [] threaded = True print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE) if cfg.DONKEY_GYM: from donkeycar.parts.dgym import DonkeyGymEnv cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, env_name=cfg.DONKEY_GYM_ENV_NAME) threaded = True inputs = ['angle', 'throttle'] elif cfg.CAMERA_TYPE == "PICAM": from donkeycar.parts.camera import PiCamera cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) elif cfg.CAMERA_TYPE == "WEBCAM": from donkeycar.parts.camera import Webcam cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) elif cfg.CAMERA_TYPE == "CVCAM": from donkeycar.parts.cv import CvCam cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) elif cfg.CAMERA_TYPE == "CSIC": from donkeycar.parts.camera import CSICamera cam = CSICamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM) elif cfg.CAMERA_TYPE == "V4L": from donkeycar.parts.camera import V4LCamera cam = V4LCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE) elif cfg.CAMERA_TYPE == "MOCK": from donkeycar.parts.camera import MockCamera cam = MockCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) elif cfg.CAMERA_TYPE == "T265": from donkeycar.parts.realsense2 import RS_T265 cam = RS_T265(image_output=True) else: raise (Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE)) V.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=threaded) if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT: #modify max_throttle closer to 1.0 to have more power #modify steering_scale lower than 1.0 to have less responsive steering from donkeycar.parts.controller import get_js_controller ctr = get_js_controller(cfg) if cfg.USE_NETWORKED_JS: from donkeycar.parts.controller import JoyStickSub netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP) V.add(netwkJs, threaded=True) ctr.js = netwkJs else: #This web controller will create a web server that is capable #of managing steering, throttle, and modes, and more. ctr = LocalWebController() V.add(ctr, inputs=['cam/image_array'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) #this throttle filter will allow one tap back for esc reverse th_filter = ThrottleFilter() V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle']) #See if we should even run the pilot module. #This is only needed because the part run_condition only accepts boolean class PilotCondition: def run(self, mode): if mode == 'user': return False else: return True V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot']) class LedConditionLogic: def __init__(self, cfg): self.cfg = cfg def run(self, mode, recording, recording_alert, behavior_state, model_file_changed, track_loc): #returns a blink rate. 0 for off. -1 for on. positive for rate. if track_loc is not None: led.set_rgb(*self.cfg.LOC_COLORS[track_loc]) return -1 if model_file_changed: led.set_rgb(self.cfg.MODEL_RELOADED_LED_R, self.cfg.MODEL_RELOADED_LED_G, self.cfg.MODEL_RELOADED_LED_B) return 0.1 else: led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B) if recording_alert: led.set_rgb(*recording_alert) return self.cfg.REC_COUNT_ALERT_BLINK_RATE else: led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B) if behavior_state is not None and model_type == 'behavior': r, g, b = self.cfg.BEHAVIOR_LED_COLORS[behavior_state] led.set_rgb(r, g, b) return -1 #solid on if recording: return -1 #solid on elif mode == 'user': return 1 elif mode == 'local_angle': return 0.5 elif mode == 'local': return 0.1 return 0 if cfg.HAVE_RGB_LED and not cfg.DONKEY_GYM: from donkeycar.parts.led_status import RGB_LED led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT) led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B) V.add(LedConditionLogic(cfg), inputs=[ 'user/mode', 'recording', "records/alert", 'behavior/state', 'modelfile/modified', "pilot/loc" ], outputs=['led/blink_rate']) V.add(led, inputs=['led/blink_rate']) def get_record_alert_color(num_records): col = (0, 0, 0) for count, color in cfg.RECORD_ALERT_COLOR_ARR: if num_records >= count: col = color return col class RecordTracker: def __init__(self): self.last_num_rec_print = 0 self.dur_alert = 0 self.force_alert = 0 def run(self, num_records): if num_records is None: return 0 if self.last_num_rec_print != num_records or self.force_alert: self.last_num_rec_print = num_records if num_records % 10 == 0: print("recorded", num_records, "records") if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert: self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC self.force_alert = 0 if self.dur_alert > 0: self.dur_alert -= 1 if self.dur_alert != 0: return get_record_alert_color(num_records) return 0 rec_tracker_part = RecordTracker() V.add(rec_tracker_part, inputs=["tub/num_records"], outputs=['records/alert']) if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController): #then we are not using the circle button. hijack that to force a record count indication def show_record_acount_status(): rec_tracker_part.last_num_rec_print = 0 rec_tracker_part.force_alert = 1 ctr.set_button_down_trigger('circle', show_record_acount_status) #Sombrero if cfg.HAVE_SOMBRERO: from donkeycar.parts.sombrero import Sombrero s = Sombrero() #IMU if cfg.HAVE_IMU: from donkeycar.parts.imu import Mpu6050 imu = Mpu6050() V.add(imu, outputs=[ 'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z' ], threaded=True) class ImgPreProcess(): ''' preprocess camera image for inference. normalize and crop if needed. ''' def __init__(self, cfg): self.cfg = cfg def run(self, img_arr): return normalize_and_crop(img_arr, self.cfg) if "coral" in model_type: inf_input = 'cam/image_array' else: inf_input = 'cam/normalized/cropped' V.add(ImgPreProcess(cfg), inputs=['cam/image_array'], outputs=[inf_input], run_condition='run_pilot') #Behavioral state if cfg.TRAIN_BEHAVIORS: bh = BehaviorPart(cfg.BEHAVIOR_LIST) V.add(bh, outputs=[ 'behavior/state', 'behavior/label', "behavior/one_hot_state_array" ]) try: ctr.set_button_down_trigger('L1', bh.increment_state) except: pass inputs = [inf_input, "behavior/one_hot_state_array"] #IMU elif model_type == "imu": assert (cfg.HAVE_IMU) #Run the pilot if the mode is not user. inputs = [ inf_input, 'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z' ] else: inputs = [inf_input] def load_model(kl, model_path): start = time.time() print('loading model', model_path) kl.load(model_path) print('finished loading in %s sec.' % (str(time.time() - start))) def load_weights(kl, weights_path): start = time.time() try: print('loading model weights', weights_path) kl.model.load_weights(weights_path) print('finished loading in %s sec.' % (str(time.time() - start))) except Exception as e: print(e) print('ERR>> problems loading weights', weights_path) def load_model_json(kl, json_fnm): start = time.time() print('loading model json', json_fnm) from tensorflow.python import keras try: with open(json_fnm, 'r') as handle: contents = handle.read() kl.model = keras.models.model_from_json(contents) print('finished loading json in %s sec.' % (str(time.time() - start))) except Exception as e: print(e) print("ERR>> problems loading model json", json_fnm) if model_path: #When we have a model, first create an appropriate Keras part kl = dk.utils.get_model_by_type(model_type, cfg) model_reload_cb = None if '.h5' in model_path or '.uff' in model_path or 'tflite' in model_path or '.pkl' in model_path: #when we have a .h5 extension #load everything from the model file load_model(kl, model_path) def reload_model(filename): load_model(kl, filename) model_reload_cb = reload_model elif '.json' in model_path: #when we have a .json extension #load the model from there and look for a matching #.wts file with just weights load_model_json(kl, model_path) weights_path = model_path.replace('.json', '.weights') load_weights(kl, weights_path) def reload_weights(filename): weights_path = filename.replace('.json', '.weights') load_weights(kl, weights_path) model_reload_cb = reload_weights else: print("ERR>> Unknown extension type on model file!!") return #this part will signal visual LED, if connected V.add(FileWatcher(model_path, verbose=True), outputs=['modelfile/modified']) #these parts will reload the model file, but only when ai is running so we don't interrupt user driving V.add(FileWatcher(model_path), outputs=['modelfile/dirty'], run_condition="ai_running") V.add(DelayedTrigger(100), inputs=['modelfile/dirty'], outputs=['modelfile/reload'], run_condition="ai_running") V.add(TriggeredCallback(model_path, model_reload_cb), inputs=["modelfile/reload"], run_condition="ai_running") outputs = ['pilot/angle', 'pilot/throttle'] if cfg.TRAIN_LOCALIZER: outputs.append("pilot/loc") V.add(kl, inputs=inputs, outputs=outputs, run_condition='run_pilot') #Choose what inputs should change the car. class DriveMode: def run(self, mode, user_angle, user_throttle, pilot_angle, pilot_throttle): if mode == 'user': return user_angle, user_throttle elif mode == 'local_angle': return pilot_angle, user_throttle else: return pilot_angle, pilot_throttle * cfg.AI_THROTTLE_MULT V.add(DriveMode(), inputs=[ 'user/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle' ], outputs=['angle', 'throttle']) #to give the car a boost when starting ai mode in a race. aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE, cfg.AI_LAUNCH_KEEP_ENABLED) V.add(aiLauncher, inputs=['user/mode', 'throttle'], outputs=['throttle']) if isinstance(ctr, JoystickController): ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON, aiLauncher.enable_ai_launch) class AiRunCondition: ''' A bool part to let us know when ai is running. ''' def run(self, mode): if mode == "user": return False return True V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running']) #Ai Recording class AiRecordingCondition: ''' return True when ai mode, otherwize respect user mode recording flag ''' def run(self, mode, recording): if mode == 'user': return recording return True if cfg.RECORD_DURING_AI: V.add(AiRecordingCondition(), inputs=['user/mode', 'recording'], outputs=['recording']) #Drive train setup if cfg.DONKEY_GYM: pass elif cfg.DRIVE_TRAIN_TYPE == "SERVO_ESC": from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) throttle = PWMThrottle(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE": from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT) throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL": from donkeycar.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD) right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD) two_wheel_control = TwoWheelSteeringThrottle() V.add(two_wheel_control, inputs=['throttle', 'angle'], outputs=['left_motor_speed', 'right_motor_speed']) V.add(left_motor, inputs=['left_motor_speed']) V.add(right_motor, inputs=['right_motor_speed']) elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM": from donkeycar.parts.actuator import ServoBlaster, PWMSteering steering_controller = ServoBlaster(cfg.STEERING_CHANNEL) #really pin #PWM pulse values should be in the range of 100 to 200 assert (cfg.STEERING_LEFT_PWM <= 200) assert (cfg.STEERING_RIGHT_PWM <= 200) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD) V.add(steering, inputs=['angle']) V.add(motor, inputs=["throttle"]) #add tub to save data inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode'] types = ['image_array', 'float', 'float', 'str'] if cfg.TRAIN_BEHAVIORS: inputs += [ 'behavior/state', 'behavior/label', "behavior/one_hot_state_array" ] types += ['int', 'str', 'vector'] if cfg.HAVE_IMU: inputs += [ 'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z' ] types += ['float', 'float', 'float', 'float', 'float', 'float'] if cfg.RECORD_DURING_AI: inputs += ['pilot/angle', 'pilot/throttle'] types += ['float', 'float'] th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta) V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording') if cfg.PUB_CAMERA_IMAGES: from donkeycar.parts.network import TCPServeValue from donkeycar.parts.image import ImgArrToJpg pub = TCPServeValue("camera") V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin']) V.add(pub, inputs=['jpg/bin']) if type(ctr) is LocalWebController: print("You can now go to <your pi ip address>:8887 to drive your car.") elif isinstance(ctr, JoystickController): print("You can now move your joystick to drive your car.") #tell the controller about the tub ctr.set_tub(tub) if cfg.BUTTON_PRESS_NEW_TUB: def new_tub_dir(): V.parts.pop() tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta) V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording') ctr.set_tub(tub) ctr.set_button_down_trigger('cross', new_tub_dir) ctr.print_controls() #run the vehicle for 20 seconds V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
def test_rs(cfg): """ Donekycar v3.1.1 上のパーツクラスを使用。 """ V = dk.vehicle.Vehicle() from donkeycar.parts.realsense2 import RS_T265 rs = RS_T265(image_output=True) V.add(rs, outputs=['pos', 'vel', 'acc', 'image_array'], threaded=False) class Split(object): def run(self, pos, vel, acc): return pos.x, pos.y, pos.z, vel.x, vel.y, vel.z, acc.x, acc.y, acc.z def shutdown(self): pass V.add(Split(), inputs=['pos', 'vel', 'acc'], outputs=[ 'pos_x', 'pos_y', 'pos_z', 'vel_x', 'vel_y', 'vel_z', 'acc_x', 'acc_y', 'acc_z' ]) # image shape (800, 848) class ImageNoneCheck(object): def __init__(self, cfg): self.cfg = cfg def run(self, image_array): if image_array is None: #print('[RS] image_array is None') return np.zeros( (self.cfg.IMAGE_H, self.cfg.IMAGE_W, self.cfg.IMAGE_DEPTH), dtype=np.uint8) else: #print('[RS] image.shape:{}'.format(str(image_array.shape))) return image_array def shutdown(self): pass V.add(ImageNoneCheck(cfg), inputs=['image_array'], outputs=['image_array']) class PrintRS(object): def run(self, image_array, pos_x, pos_y, pos_z, vel_x, vel_y, vel_z, acc_x, acc_y, acc_z): #print((image_array is None)) #print(type(image_array)) #print(image_array.shape) #print('[RS] pos:({:.5g}, {:.5g}, {:.5g}) vel:({:.5g}, {:.5g}, {:.5g}) acc:({:.5g}, {:.5g}, {:.5g})'.format( print( '{:.5g},{:.5g},{:.5g},{:.5g},{:.5g},{:.5g},{:.5g},{:.5g}, {:.5g}' .format(pos_x, pos_y, pos_z, vel_x, vel_y, vel_z, acc_x, acc_y, acc_z)) def shutdown(self): pass V.add(PrintRS(), inputs=[ 'image_array', 'pos_x', 'pos_y', 'pos_z', 'vel_x', 'vel_y', 'vel_z', 'acc_x', 'acc_y', 'acc_z' ]) inputs = [ 'image_array', 'pos_x', 'pos_y', 'pos_z', 'vel_x', 'vel_y', 'vel_z', 'acc_x', 'acc_y', 'acc_z', ] types = [ 'image_array', 'float', 'float', 'float', 'float', 'float', 'float', 'float', 'float', 'float', ] from donkeycar.parts.datastore import TubHandler th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=[]) V.add(tub, inputs=inputs, outputs=["tub/num_records"]) try: V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.DRIVE_LOOP_HZ * 60) # 1min ## max_loop_count=cfg.MAX_LOOPS) # infinity except KeyboardInterrupt: print('exit') finally: print('done')