def test_new_tub_writer(tubs): root_dir = tubs[0] th = TubHandler(root_dir) inputs=['cam/image_array', 'angle', 'throttle'] types=['image_array', 'float', 'float'] tw = th.new_tub_writer(inputs, types) assert len(th.get_tub_list()) == 6 print(tw.path) assert int(tw.path.split('_')[-2]) == 5
class Telemetry: def __init__(self, path): print(path) self.th = TubHandler(path=path) self.mem = Memory() self.inputs = [ 'cam/image_array', 'user/angle', 'user/throttle', 'angle', 'throttle', 'user/mode', 'beacons/beacon1', 'beacons/beacon2', 'beacons/beacon3', ] self.types = [ 'image_array', 'float', 'float', 'float', 'float', 'str', 'int', 'int', 'int' ] self.tub = None def create_tub(self, path=None): print("Path found:" + path) self.tub = self.th.new_tub_writer(inputs=self.inputs, types=self.types, path=path) def save_vehicle_configuration(self, parts): newparts = [] for p in parts: newparts.append( {k: v for k, v in p.items() if k != 'thread' and k != 'part'}) self.tub.parts = newparts def save_end_time(self): self.tub.end_time = time.time() def record(self): inputs = self.mem.get(self.inputs) self.tub.run(*inputs) def get(self, keys): return self.mem.get(keys) def put(self, keys, inputs): self.mem.put(keys, inputs) def cleanup_postsession(self): self.save_end_time() self.tub.write_meta()
def test_tub_like_driver(self): """ The manage.py/donkey2.py drive command creates a tub using TubHandler, so test that way. """ os.makedirs(self.tempfolder) meta = ["location:Here2", "task:sometask2"] th = TubHandler(self.tempfolder) tub = th.new_tub_writer(inputs=self.inputs, types=self.types, user_meta=meta) t2 = Tub(tub.path) assert tub.meta == t2.meta assert tub.meta['location'] == "Here2" assert t2.meta['inputs'] == self.inputs assert t2.meta['location'] == "Here2"
def rgb2gray_tub(cfg, fr, to): opts = {} gen_records = {} opts['categorical'] = True #type(kl) in [KerasCategorical, KerasBehavioral] records = gather_records(cfg, fr, opts, verbose=True) print('collating %d records ...' % (len(records))) collate_records(records, gen_records, opts) tub_data = {} inputs_img = [] gray_img = [] angles = [] throttles = [] inputs=['cam/image_array', 'user/angle', 'user/throttle', 'user/mode'] types=['image_array', 'float', 'float', 'str'] th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types) for key, record in gen_records.items(): #print(record) if record['img_data'] is None: filename = record['image_path'] img_arr = load_scaled_image_arr(filename, cfg) if img_arr is None: break record['img_data'] = img_arr else: img_arr = record['img_data'] tub_data = { 'cam/image_array': cv2.resize(rgb2gray(img_arr), (100, 20)), 'user/angle': record['angle'], 'user/throttle': record['throttle'], 'user/mode': record['json_data']['user/mode'] } #print(tub_data) tub.put_record(tub_data)
def drive(cfg, args): vehicle = dk.vehicle.Vehicle() # Connect pi camera print("Loading pi camera...") add_pi_camera(vehicle, cfg, 'cam/image_array') print("Loading joystick...") joystick = Joystick( throttle_scale=cfg.JOYSTICK_MAX_THROTTLE, steering_scale=cfg.JOYSTICK_STEERING_SCALE ) vehicle.add(joystick, outputs=['user/angle', 'user/throttle', 'js/actions'], threaded=True) add_steering(vehicle, cfg, 'user/angle') add_throttle(vehicle, cfg, 'user/throttle') # Add lidar scan print("Loading Lidar scan...") lidar_scan = LidarScan() lidar_distances_vector = LidarDistancesVector() vehicle.add(lidar_scan, outputs=['lidar/scan'], threaded=True) vehicle.add(lidar_distances_vector, inputs=['lidar/scan'], outputs=['lidar/distances']) print("Loading TubWriter") tub_handler = TubHandler("tubes/") tub_writer = tub_handler.new_tub_writer(inputs=['cam/image_array', 'user/angle', 'user/throttle', 'lidar/distances'], types=['image_array', 'float', 'float', 'float']) vehicle.add(tub_writer, inputs=['cam/image_array', 'user/angle', 'user/throttle', 'lidar/distances']) # Stop car after x steps vehicle.add(ExitAfterSteps(int(args["--steps"]))) print("Starting vehicle...") vehicle.start( rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS ) save_path = os.path.basename(tub_writer.path) print("Create archive for run {} in {}.tar.gz".format(tub_writer.path, save_path)) with tarfile.open(name="{}.tar.gz".format(save_path), mode='w:gz') as tar: tar.add(tub_writer.path, arcname=os.path.basename(tub_writer.path))
def add_tub_save_data(V, cfg): inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode'] types = ['image_array', 'float', 'float', 'str'] if cfg.RECORD_DURING_AI: inputs += ['pilot/angle', 'pilot/throttle'] types += ['float', 'float'] if cfg.CONTROL_NOISE: inputs += ['user/angle_noise', 'user/throttle_noise'] types += ['float', 'float'] inputs += ['angle', 'throttle'] types += ['float', 'float'] th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types) V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording') return V, tub
def drive(cfg, model_path=None, use_joystick=False): ''' Construct a working robotic vehicle from many parts. Each part runs as a job in the Vehicle loop, calling either it's run or run_threaded method depending on the constructor flag `threaded`. All parts are updated one after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. Parts may have named outputs and inputs. The framework handles passing named outputs to parts requesting the same named input. ''' #Initialize car V = dk.vehicle.Vehicle() cam = PiCamera(resolution=cfg.CAMERA_RESOLUTION) V.add(cam, outputs=['cam/image_array'], threaded=True) imu = Mpu6050() V.add(imu, outputs=['imu/imu_vec'], threaded=True) # Lidar imu = B0602Lidar() V.add(imu, outputs=['lidar/lidar_measurements'], threaded=True) if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT: #modify max_throttle closer to 1.0 to have more power #modify steering_scale lower than 1.0 to have less responsive steering ctr = JoystickController( max_throttle=cfg.JOYSTICK_MAX_THROTTLE, steering_scale=cfg.JOYSTICK_STEERING_SCALE, auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE) else: #This web controller will create a web server that is capable #of managing steering, throttle, and modes, and more. ctr = LocalWebController() V.add(ctr, inputs=['cam/image_array'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) #See if we should even run the pilot module. #This is only needed because the part run_condition only accepts boolean def pilot_condition(mode): if mode == 'user': return False else: return True pilot_condition_part = Lambda(pilot_condition) V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot']) # Add CV Here #img_threshold = ImgThreshold() #V.add(img_threshold, inputs=['cam/image_array'], outputs=['cam/image_array']) #img_grayscale = ImgGreyscale() #V.add(img_grayscale, inputs=['cam/image_array'], outputs=['cam/image_array']) #img_canny = ImgCanny() #V.add(img_canny, inputs=['cam/image_array'], outputs=['cam/image_array']) # Make sure image size is correct #xsize = 160 #ysize = 120 #vertices = np.array([[(0,48),(xsize,48),(xsize,ysize),(0,ysize)]], dtype=np.int32) #img_mask = ImgMask(vertices) #V.add(img_mask, inputs=['cam/image_array'], outputs=['cam/image_array']) #img_stack = ImgStack() #V.add(img_stack, inputs=['cam/image_array'], outputs=['cam/image_array']) #Run the pilot if the mode is not user. #kl = KerasCategorical() kl = KerasIMU() if model_path: kl.load(model_path) V.add(kl, inputs=['cam/image_array', 'imu/imu_vec'], outputs=['pilot/angle', 'pilot/throttle'], run_condition='run_pilot') #Choose what inputs should change the car. def drive_mode(mode, user_angle, user_throttle, pilot_angle, pilot_throttle): if mode == 'user': return user_angle, user_throttle elif mode == 'local_angle': return pilot_angle, user_throttle else: return pilot_angle, pilot_throttle drive_mode_part = Lambda(drive_mode) V.add(drive_mode_part, inputs=[ 'user/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle' ], outputs=['angle', 'throttle']) steering_controller = PCA9685(cfg.STEERING_CHANNEL) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL) throttle = PWMThrottle(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) #add tub to save data inputs = [ 'cam/image_array', 'user/angle', 'user/throttle', 'user/mode', 'imu/imu_vec' ] types = ['image_array', 'float', 'float', 'str', 'list'] th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types) V.add(tub, inputs=inputs, run_condition='recording') #run the vehicle V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS) print("You can now go to <your pi ip address>:8887 to drive your car.")
def drive(cfg, model_path=None, use_chaos=False): """ Construct a working robotic vehicle from many parts. Each part runs as a job in the Vehicle loop, calling either it's run or run_threaded method depending on the constructor flag `threaded`. All parts are updated one after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. Parts may have named outputs and inputs. The framework handles passing named outputs to parts requesting the same named input. """ V = dk.vehicle.Vehicle() clock = Timestamp() V.add(clock, outputs=['timestamp']) cam = PiCamera(resolution=cfg.CAMERA_RESOLUTION) V.add(cam, outputs=['cam/image_array'], threaded=True) ctr = LocalWebController(use_chaos=use_chaos) V.add(ctr, inputs=['cam/image_array'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) # See if we should even run the pilot module. # This is only needed because the part run_condition only accepts boolean def pilot_condition(mode): if mode == 'user': return False else: return True pilot_condition_part = Lambda(pilot_condition) V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot']) # Run the pilot if the mode is not user. kl = KerasLinear() if model_path: kl.load(model_path) V.add(kl, inputs=['cam/image_array'], outputs=['pilot/angle', 'pilot/throttle'], run_condition='run_pilot') # Choose what inputs should change the car. def drive_mode(mode, user_angle, user_throttle, pilot_angle, pilot_throttle): if mode == 'user': return user_angle, user_throttle elif mode == 'local_angle': return pilot_angle, user_throttle else: return pilot_angle, pilot_throttle drive_mode_part = Lambda(drive_mode) V.add(drive_mode_part, inputs=[ 'user/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle' ], outputs=['angle', 'throttle']) steering_controller = PCA9685(cfg.STEERING_CHANNEL) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL) throttle = PWMThrottle(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) # add tub to save data inputs = [ 'cam/image_array', 'user/angle', 'user/throttle', 'user/mode', 'timestamp' ] types = ['image_array', 'float', 'float', 'str', 'str'] # multiple tubs th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types) # single tub #tub = TubWriter(path=cfg.TUB_PATH, inputs=inputs, types=types) V.add(tub, inputs=inputs, run_condition='recording') # run the vehicle V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
def drive(cfg, model_path=None, use_joystick=False, model_type=None, camera_type='single', meta=[]): ''' Construct a working robotic vehicle from many parts. Each part runs as a job in the Vehicle loop, calling either it's run or run_threaded method depending on the constructor flag `threaded`. All parts are updated one after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. Parts may have named outputs and inputs. The framework handles passing named outputs to parts requesting the same named input. ''' #zd d if model_type is None: if cfg.TRAIN_LOCALIZER: model_type = "localizer" elif cfg.TRAIN_BEHAVIORS: model_type = "behavior" else: model_type = cfg.DEFAULT_MODEL_TYPE #Initialize car V = dk.vehicle.Vehicle() #zd d #1 ********************************************* camera ******************************************** inputs = [] threaded = True print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE) if cfg.CAMERA_TYPE == "PICAM": from donkeycar.parts.camera import PiCamera cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) #zd d else: raise (Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE)) V.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=threaded) #2 ********************************************* controller ******************************************** #This web controller will create a web server that is capable #of managing steering, throttle, and modes, and more. ctr = LocalWebController() if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController): #then we are not using the circle button. hijack that to force a record count indication def show_record_acount_status(): rec_tracker_part.last_num_rec_print = 0 rec_tracker_part.force_alert = 1 ctr.set_button_down_trigger('circle', show_record_acount_status) V.add( ctr, inputs=['cam/image_array'], #outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], outputs=[ 'user/angle', 'user/throttle', 'user/mode', 'recording', 'change_model', 'user/arrival' ], #outputs=['user/angle', 'user/throttle', 'user/mode', 'recording', 'change_model'], #zd threaded=True) # ********************************************* communication ******************************************** class communication_class: def __init__(self): self.recv = '' self.arrived = 0 # ser = serial.Serial("/dev/ttyUSB0", 9600) self.serial = serial.Serial('/dev/ttyUSB0', 9600, timeout=3600) # serial = serial.Serial('COM9', 9600, timeout=3600) if self.serial.isOpen(): print("open success") else: print("open failed") data_judge = '0' while data_judge != b'\r\nOK\r\n': send_data = 'AT' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read(self.serial.inWaiting()) print(data_judge) data_judge = '0' while data_judge != b'\r\nOK\r\n': send_data = 'AT+CMEE=1' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read(self.serial.inWaiting()) print(data_judge) data_judge = '0' while data_judge != b'\r\nQuectel\r\n\r\nOK\r\n': send_data = 'AT+CGMI' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read(self.serial.inWaiting()) print(data_judge) data_judge = '0' while data_judge != b'\r\nBC95HB-02-STD_900\r\n\r\nOK\r\n': send_data = 'AT+CGMM' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read(self.serial.inWaiting()) print(data_judge) data_judge = '0' while data_judge != b'\r\nSECURITY,V100R100C10B657SP3\r\n\r\nPROTOCOL,V100R100C10B657SP3\r\n\r\nAPPLICATION,V100R100C10B657SP3\r\n\r\nSEC_UPDATER,V100R100C10B657SP3\r\n\r\nAPP_UPDATER,V100R100C10B657SP3\r\n\r\nRADIO,BC95HB-02-STD_900\r\n\r\nOK\r\n': send_data = 'AT+CGMR' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read(self.serial.inWaiting()) print(data_judge) data_judge = '0' while data_judge != b'\r\n+NBAND:8\r\n\r\nOK\r\n': send_data = 'AT+NBAND?' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read(self.serial.inWaiting()) print(data_judge) data_judge = '0' while data_judge != b'\r\n+NCONFIG:AUTOCONNECT,TRUE\r\n+NCONFIG:CR_0354_0338_SCRAMBLING,TRUE\r\n+NCONFIG:CR_0859_SI_AVOID,TRUE\r\n+NCONFIG:COMBINE_ATTACH,FALSE\r\n+NCONFIG:CELL_RESELECTION,FALSE\r\n+NCONFIG:ENABLE_BIP,FALSE\r\n\r\nOK\r\n': send_data = 'AT+NCONFIG?' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read(self.serial.inWaiting()) print(data_judge) data_judge = '0' while data_judge != b'\r\n+CGSN:869405035846048\r\n\r\nOK\r\n': send_data = 'AT+CGSN=1' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read(self.serial.inWaiting()) print(data_judge) data_judge = '0' while data_judge != b'\r\nOK\r\n': send_data = 'AT+CFUN=1' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read(self.serial.inWaiting()) print(data_judge) data_judge = '0' while data_judge != b'\r\n460043193006443\r\n\r\nOK\r\n': send_data = 'AT+CIMI' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read(self.serial.inWaiting()) print(data_judge) data_judge = '0' while data_judge != b'\r\nOK\r\n': send_data = 'AT+CGATT=0' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read(self.serial.inWaiting()) print(data_judge) data_judge = '0' while data_judge != b'\r\n+CGATT:0\r\n\r\nOK\r\n': send_data = 'AT+CGATT?' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read(self.serial.inWaiting()) print(data_judge) data_judge = '0' while data_judge != b'\r\nOK\r\n': send_data = 'AT+CGATT=1' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read(self.serial.inWaiting()) print(data_judge) data_judge = '0' while data_judge != b'\r\n+CGATT:1\r\n\r\nOK\r\n': send_data = 'AT+CGATT?' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read(self.serial.inWaiting()) print(data_judge) data_judge = b'\r\n+CSQ:99,99\r\n\r\nOK\r\n' while data_judge == b'\r\n+CSQ:99,99\r\n\r\nOK\r\n': send_data = 'AT+CSQ' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read(self.serial.inWaiting()) print(data_judge) data_judge = '0' while data_judge != b'\r\n+COPS:0,2,"46000"\r\n\r\nOK\r\n': send_data = 'AT+COPS?' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read(self.serial.inWaiting()) print(data_judge) data_judge = '0' while data_judge != b'\r\n+CEREG:0,1\r\n\r\nOK\r\n': send_data = 'AT+CEREG?' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read(self.serial.inWaiting()) print(data_judge) print('nb-iot initial ok!') def update(self): while True: try: data_judge = '0' # sleep(1) while data_judge != b'\r\n0\r\n\r\nOK\r\n' and data_judge != b'\r\n+CME ERROR: 4\r\n': send_data = 'AT+NSOCR=DGRAM,17,25535,1' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read( self.serial.inWaiting()) print(data_judge) # line = line.splitlines(True) legal = 'OK' data = {'order': 'ask'} data = json.dumps(data) l = len(data) judge = '0' while judge != legal: send_data = "AT+NSOST=0,139.199.105.136,25535," + str( len(data)) + "," + (binascii.b2a_hex( str(data).encode('utf-8'))).decode() print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) # line = ser.read(ser.inWaiting()) # line = line.decode() sleep(2) # line = line.splitlines(True) data = data + self.serial.read(self.serial.inWaiting()) data = str(data).split("\\r\\n") print(data) judge = data[3] if len(data) > 5: print("进入判断") data = data[5] l = data[10:len(data)] print(l) data_judge = '0' while data_judge != b'\r\n\r\nOK\r\n': print("发送请求") send_data = 'AT+NSORF=0,' + l + "\r\n" print(send_data) # send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(1) data = data + self.serial.read( self.serial.inWaiting()) # print(data.decode('utf-8')) data_judge = data[len(data) - 8:len(data)] data = str(data).split(",") # print((bytearray.fromhex(data[4])).decode('utf-8')) data = (bytearray.fromhex(data[4])).decode('utf-8') data = json.loads(data) self.recv = data print("receive:") print(data) #!!! change car state when arrive # if has_arrived=='arrive': # print("!!!!!!car_state change to c(waiting to get)") # judge = '0' # data = {'order': 'car_state'} # data = json.dumps(data) # l = len(data) # legal = "\r\n0," + str(len(data)) + "\r\n\r\nOK\r\n" # # while judge != legal.encode('utf-8'): # send_data = "AT+NSOST=0,139.199.105.136,25535," + str(len(data)) + "," + ( # binascii.b2a_hex(str(data).encode('utf-8'))).decode() # print(send_data) # send_data = send_data + '\r\n' # self.serial.write(send_data.encode()) # data = self.serial.read(1) # sleep(2) # data = data + self.serial.read(self.serial.inWaiting()) # #print(data) # judge = data[0:14] data_judge = '0' # sleep(1) while data_judge != b'\r\nOK\r\n': # line = ser.read(ser.inWaiting()) # line = line.decode() send_data = 'AT+NSOCL=0' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read( self.serial.inWaiting()) print(data_judge) except: while data_judge != b'\r\nOK\r\n': send_data = 'AT+NSOCL=0' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read( self.serial.inWaiting()) print(data_judge) def run_threaded(self, has_arrived): # !!! change car state when arrive if has_arrived == 'arrive': if self.arrived == 0: self.arrived += 1 print("!!!!!!car_state change to c(waiting to get)") try: data_judge = '0' # sleep(1) while data_judge != b'\r\n0\r\n\r\nOK\r\n' and data_judge != b'\r\n+CME ERROR: 4\r\n': send_data = 'AT+NSOCR=DGRAM,17,25535,1' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read( self.serial.inWaiting()) print(data_judge) judge = '0' data = {'order': 'car_state'} data = json.dumps(data) l = len(data) legal = "\r\n0," + str(len(data)) + "\r\n\r\nOK\r\n" while judge != legal.encode('utf-8'): send_data = "AT+NSOST=0,139.199.105.136,25535," + str( len(data)) + "," + (binascii.b2a_hex( str(data).encode('utf-8'))).decode() print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) #sleep(2) sleep(1) data = data + self.serial.read( self.serial.inWaiting()) # print(data) judge = data[0:14] except: print("send arrive error!") else: self.arrived = 0 tmp = self.recv if tmp != '': return tmp['box_state'], tmp['car_state'], tmp['pos'] else: #!!!! return ['open', 'open', 'open'], 'a', 'a' def shuwtdonw(self): #关闭socket GPIO.cleanup() data_judge = '0' # sleep(1) print("clean udp socket") while data_judge != b'\r\nOK\r\n': send_data = 'AT+NSOCL=0' print(send_data) send_data = send_data + '\r\n' self.serial.write(send_data.encode()) data = self.serial.read(1) sleep(0.5) data_judge = data + self.serial.read(self.serial.inWaiting()) print(data_judge) communication = communication_class() #!!! V.add(communication, inputs=['arrive_signal'], outputs=['lock_state', 'car_state', 'pos'], threaded=True) # ********************************************* lock_ctr ******************************************** class lock_ctr_class: def __init__(self): GPIO.setmode(GPIO.BOARD) GPIO.setup(36, GPIO.OUT) # 1 GPIO.setmode(GPIO.BOARD) GPIO.setup(38, GPIO.OUT) # 2 GPIO.setmode(GPIO.BOARD) GPIO.setup(40, GPIO.OUT) # 3 def run(self, state): #1 if state[0] == 'close': GPIO.output(36, True) else: GPIO.output(36, False) #2 if state[1] == 'close': GPIO.output(38, True) else: GPIO.output(38, False) #3 if state[2] == 'close': GPIO.output(40, True) else: GPIO.output(40, False) # for x in range(3): # 0 1 2 # if state[x]=='open': # GPIO.output(11+x, False) # else: # close # GPIO.output(11+x, True) def shutdown(self): GPIO.cleanup() lock_ctr = lock_ctr_class() V.add(lock_ctr, inputs=['lock_state'], outputs=[]) # ********************************************* throttle filter ******************************************** #this throttle filter will allow one tap back for esc reverse #th_filter = ThrottleFilter() #V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle']) # #3 ********************************************* PilotCondition ******************************************** #See if we should even run the pilot module. #This is only needed because the part run_condition only accepts boolean # class PilotCondition: # def run(self, mode): # if mode == 'user': # return False # else: # return True # # V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot']) #!!!! class PilotCondition: def run(self, car_state): if car_state == 'a' or car_state == '': return False, 'user' else: return True, 'local_pilot' V.add(PilotCondition(), inputs=['car_state'], outputs=['run_pilot', 'user/mode']) #zd d #4 ********************************************* RecordTracker ******************************************** def get_record_alert_color(num_records): col = (0, 0, 0) for count, color in cfg.RECORD_ALERT_COLOR_ARR: if num_records >= count: col = color return col class RecordTracker: def __init__(self): self.last_num_rec_print = 0 self.dur_alert = 0 self.force_alert = 0 def run(self, num_records): if num_records is None: return 0 if self.last_num_rec_print != num_records or self.force_alert: self.last_num_rec_print = num_records if num_records % 10 == 0: print("recorded", num_records, "records") if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert: self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC self.force_alert = 0 if self.dur_alert > 0: self.dur_alert -= 1 if self.dur_alert != 0: return get_record_alert_color(num_records) return 0 rec_tracker_part = RecordTracker() V.add(rec_tracker_part, inputs=["tub/num_records"], outputs=['records/alert']) #zd d #5 ********************************************* model ******************************************** # important def load_model(kl, model_path): start = time.time() try: print('loading model', model_path) kl.load(model_path) print('finished loading in %s sec.' % (str(time.time() - start))) except Exception as e: print(e) print('ERR>> problems loading model', model_path) def load_weights(kl, weights_path): start = time.time() try: print('loading model weights', weights_path) kl.model.load_weights(weights_path) print('finished loading in %s sec.' % (str(time.time() - start))) except Exception as e: print(e) print('ERR>> problems loading weights', weights_path) def load_model_json(kl, json_fnm): start = time.time() print('loading model json', json_fnm) from tensorflow.python import keras try: with open(json_fnm, 'r') as handle: contents = handle.read() kl.model = keras.models.model_from_json(contents) print('finished loading json in %s sec.' % (str(time.time() - start))) except Exception as e: print(e) print("ERR>> problems loading model json", json_fnm) if model_path: #When we have a model, first create an appropriate Keras part # most time kl = dk.utils.get_model_by_type(model_type, cfg) # model_type 默认是linear model_reload_cb = None if '.h5' in model_path: #when we have a .h5 extension #load everything from the model file # most time load_model(kl, model_path) def reload_model(filename): print("reloading start") #zd V.sudden_control(0) #print('***************', V.mem.items()) #V.mem.put(['angle', 'throttle', 'pilot/angle', 'pilot/throttle', 'user/angle', 'user/throttle'], [0, 0, 0, 0, 0 ,0]) #print('***************', V.mem.items()) load_model(kl, filename) print( '***************\r\nreloading is done\r\n**************\r\n' ) model_reload_cb = reload_model else: print("ERR>> Unknown extension type on model file!!") return #these parts will reload the model file, but only when ai is running so we don't interrupt user driving #V.add(DelayedTrigger(100), inputs=['modelfile/dirty'], outputs=['modelfile/reload'], run_condition="ai_running") #V.add(TriggeredCallback(model_path, model_reload_cb), inputs=["modelfile/reload"], run_condition="ai_running") #zd #V.add(TriggeredCallback("./models/mypilot_2.h5", model_reload_cb), inputs=["modelfile/reload","change_model"], run_condition="ai_running") #V.add(TriggeredCallback("./models/mypilot_2.h5", model_reload_cb), inputs=["modelfile/reload", "change_model"]) inputs = ['cam/image_array'] outputs = ['pilot/angle', 'pilot/throttle', 'pilot/arrival'] V.add(kl, inputs=inputs, outputs=outputs, run_condition='run_pilot') # ********************************************** turn_delay_trigger ************************************************ class turn_delay_class: def __init__(self, pause_time): self.pause = False self.trigger = 0 self.pause_time = pause_time self.now_direc = 0 #self.direc_list = ['S'] #p is pause self.direc_list = ['L'] #self.on = False def update(self): while 1: if self.pause == True: sleep(2) self.pause = False if self.trigger == 1: #self.on = False #time.sleep(3) #self.on = True time.sleep(self.pause_time) self.trigger = 0 self.now_direc += 1 def run_threaded(self, arrival_signal, mode, has_arrived, pos, car_state): # >0 = arrive <0 = not arrive if self.pause == True: return True, 'P', '' if self.trigger == 1: return True, self.direc_list[self.now_direc], '' # new deal if mode == 'user': #if car_state=='a': #待放 self.now_direc = 0 self.trigger = 0 self.pause = False if pos == 'a': self.direc_list = ['L'] elif pos == 'b': self.direc_list = ['S'] print('~~~~~~~~~~~~~~~~~~~direction restart') return False, '', '' if has_arrived == 'arrive': return False, '', 'arrive' if arrival_signal > 0: # judge arrive # print("now is {}".format(self.now_direc)) # if self.now_direc >= len(self.direc_list): # print("arrive--------------------------") # return False, '', 'arrive' if self.now_direc >= len(self.direc_list): print("arrive--------------------------") return False, '', 'arrive' self.trigger = 1 self.pause = True print("**************************start turn!!!, now is {}". format(self.direc_list[self.now_direc])) return True, self.direc_list[self.now_direc], '' else: return False, '', '' def shutdown(self): return turn_delay = turn_delay_class(1.65) #turn_delay = turn_delay_class(2) #V.add(turn_delay, inputs=['pilot/arrival', 'user/mode'], outputs=['turn/mode', 'turn_direction'], run_condition='run_pilot', threaded=True) V.add(turn_delay, inputs=[ 'pilot/arrival', 'user/mode', 'arrive_signal', 'pos', 'car_state' ], outputs=['turn/mode', 'turn_direction', 'arrive_signal'], threaded=True) # ********************************************** turn_module ************************************************ class turn_module_class: def __init__(self): pass def run(self, direc): if direc == 'P': #pause return 0, 0 elif direc == 'L': #left return -1, 1 elif direc == 'R': #right return 1, -1 elif direc == 'S': #straight return 0.4, 0.4 else: #pause return 0, 0 def shutdown(self): return turn_module = turn_module_class() V.add(turn_module, inputs=['turn_direction'], outputs=['turn/angle', 'turn/throttle'], run_condition='run_pilot') # pause module input=[] out=[pause/mode] #6 ********************************************* DriveMode ******************************************** #Choose what inputs should change the car. class DriveMode: def run(self, mode, turn_mode, user_angle, user_throttle, pilot_angle, pilot_throttle, turn_angle, turn_throttle, arrive_signal): if turn_mode == True: return turn_angle, turn_throttle if mode == 'user': return user_angle, user_throttle elif mode == 'local_angle': return pilot_angle, user_throttle else: # run_pilot if arrive_signal == 'arrive': return 0, 0 return 0.3, 0.4 #return pilot_angle, pilot_throttle * cfg.AI_THROTTLE_MULT V.add(DriveMode(), inputs=[ 'user/mode', 'turn/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle', 'turn/angle', 'turn/throttle', 'arrive_signal' ], outputs=['angle', 'throttle']) #zd d #to give the car a boost when starting ai mode in a race. #7 ********************************************* AiRunCondition ******************************************** class AiRunCondition: ''' A bool part to let us know when ai is running. ''' def run(self, mode): if mode == "user": return False return True V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running']) #Ai Recording zd d #8 ********************************************* Motor ******************************************** #Drive train setup # move power if cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL": print('DC start!') from donkeycar.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM LEFT_PID_DIR1 = 11 LEFT_PID_DIR2 = 13 LEFT_PWM = 15 RIGHT_PID_DIR1 = 12 RIGHT_PID_DIR2 = 16 RIGHT_PWM = 18 # left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD) # right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD) left_motor = Mini_HBridge_DC_Motor_PWM(LEFT_PID_DIR1, LEFT_PID_DIR2, LEFT_PWM) right_motor = Mini_HBridge_DC_Motor_PWM(RIGHT_PID_DIR1, RIGHT_PID_DIR2, RIGHT_PWM) two_wheel_control = TwoWheelSteeringThrottle() V.add(two_wheel_control, inputs=['throttle', 'angle'], outputs=['left_motor_speed', 'right_motor_speed']) V.add(left_motor, inputs=['left_motor_speed']) V.add(right_motor, inputs=['right_motor_speed']) #9 ********************************************* tub ******************************************** #add tub to save data ''' inputs=['cam/image_array', 'user/angle', 'user/throttle', 'user/mode'] types=['image_array', 'float', 'float', 'str'] ''' inputs = [ 'cam/image_array', 'user/angle', 'user/throttle', 'user/arrival', 'user/mode' ] types = ['image_array', 'float', 'float', 'float', 'str'] th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta) V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording') # zd d if type(ctr) is LocalWebController: print("You can now go to <your pi ip address>:8887 to drive your car.") #run the vehicle for 20 seconds V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
def drive(cfg): ''' Construct a working robotic vehicle from many parts. Each part runs as a job in the Vehicle loop, calling either it's run or run_threaded method depending on the constructor flag `threaded`. All parts are updated one after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. Parts may have named outputs and inputs. The framework handles passing named outputs to parts requesting the same named input. ''' #global steering, throttle # Initialize car V = dk.vehicle.Vehicle() # Camera cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) V.add(cam, outputs=['cam/image_array'], threaded=True) # Controller V.add(MyCVController(), inputs=['cam/image_array'], outputs=['steering', 'throttle', 'recording']) # Sombrero if cfg.HAVE_SOMBRERO: from donkeycar.parts.sombrero import Sombrero s = Sombrero() # Drive train setup from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) throttle = PWMThrottle(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM) V.add(steering, inputs=['steering']) V.add(throttle, inputs=['throttle']) # add tub to save data inputs = ['cam/image_array', 'steering', 'throttle'] types = ['image_array', 'float', 'float'] th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types) V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording') # run the vehicle V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
def drive(cfg, model_path=None, use_joystick=False): ''' Construct a working robotic vehicle from many parts. Each part runs as a job in the Vehicle loop, calling either it's run or run_threaded method depending on the constructor flag `threaded`. All parts are updated one after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. Parts may have named outputs and inputs. The framework handles passing named outputs to parts requesting the same named input. ''' #Initialize car V = dk.vehicle.Vehicle() #add led part #right_led = rgb_led_0(red_channel = 5, green_channel = 6, blue_channel = 7) center_led = rgb_led(red_channel = 9, green_channel = 10, blue_channel = 11) #left_led = rgb_led_0(red_channel = 13, green_channel = 14, blue_channel = 15) #turn_signals = turn_signal(left_led = left_led, right_led = right_led) status_led = status_indicator(status_led = center_led) #V.add(right_led, outputs=['none']) V.add(center_led, outputs=['none']) #V.add(left_led, outputs=['none']) #add pi_perfchecker loop_time = driveLoopTime() #loop_time.console=True #core_temp = coreTemp() V.add(loop_time,inputs = ['timein'], outputs = ['timein','displaytext']) #V.add(core_temp) #throtled_status = throttled() #V.add(throtled_status, outputs=['displaytext'],threaded=True) #pitft=display_text() #V.add(pitft, inputs=['displaytext'], outputs=['pitft/screen'], threaded=True) cam = PiCamera(resolution=cfg.CAMERA_RESOLUTION) V.add(cam, outputs=['cam/image_array'], threaded=True) #boostBright=ImgBoostBright() #V.add(boostBright, inputs=['cam/image_array'], outputs=['cam/image_array']) #ncs_gn = googlenet(basedir=cfg.MODELS_PATH, debug=True) #V.add(ncs_gn, inputs=['cam/image_array'],outputs=['ncs/image_array', 'classificaiton'],threaded=True) #ncs_inception = inception(basedir=cfg.MODELS_PATH, probability_threshold=0.01, debug=True) #V.add(ncs_inception, inputs=['cam/image_array'],outputs=['ncs/image_array', 'classificaiton'],threaded=True) ncs_ty = tinyyolo(basedir = cfg.MODELS_PATH, draw_on_img = True, probability_threshold = 0.07,debug=False) V.add(ncs_ty, inputs=['cam/image_array'],outputs=['ncs/image_array','ncs/found_objs'],threaded=True) loop_time_display = ImgPutText() #V.add(loop_time_display,inputs=['ncs/image_array','displaytext'], outputs=['ncs/image_array']) #classify = ImgPutText() #V.add(classify,inputs=['cam/image_array','classificaiton'],outputs=['ncs/image_array']) # draw a line showing where training input is cropped #l1 = ImgDrawLine() #l1.start = (0,39) #l1.end = (160,39) #l1.color = (0,255,0) #l1.width=10 #V.add(l1, inputs=['ncs/image_array'],outputs=['ncs/image_array']) #driverInfo = ImgPutInfo() #throttleText.text='SPD:' #V.add(driverInfo, inputs=['cam/image_array','throttle', 'angle'],outputs=['cam/image_array']) #greyScale = ImgGreyscale() #imgCanny = ImgCanny() # moving croping to network input layer rather than image.. # Allows me to see and process the entire image but only conside # the bottom third for stearing input to the network.. #imgCrop = ImgCrop(top=40,bottom=0,left=0,right=0) #V.add(imgCrop, inputs=['cam/image_array'],outputs=['cam/image_array']) #V.add(greyScale, inputs=['cam/image_array'],outputs=['cam/image_array']) if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT: #modify max_throttle closer to 1.0 to have more power #modify steering_scale lower than 1.0 to have less responsive steering # auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE,throttle_axis='rz',steering_axis='x') ctr = JoystickController(max_throttle=cfg.JOYSTICK_MAX_THROTTLE, steering_scale=cfg.JOYSTICK_STEERING_SCALE, auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE, throttle_axis='y', steering_axis='x', panning_axis='z', tilting_axis='rz') ctr_webview = LocalWebController() V.add(ctr_webview, #inputs=['ncs/image_array'], inputs=['ncs/image_array'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) else: #This web controller will create a web server that is capable #of managing steering, throttle, and modes, and more. ctr = LocalWebController() #ctr.auto_record_on_throttle = False V.add(ctr, inputs=['ncs/image_array'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording','user/pan','user/tilt'], threaded=True) # add the LED controller part #See if we should even run the pilot module. #This is only needed because the part run_condition only accepts boolean def pilot_condition(mode): if mode == 'user': return False else: return True pilot_condition_part = Lambda(pilot_condition) V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot']) #Run the pilot if the mode is not user. #kl = KerasCroppedCategorical() # Reverting to non cropped.. adding pan\tilt to camerra kl = KerasCroppedCategorical() if model_path: kl.load(model_path) kResizeImg= ImgResize() V.add(kResizeImg, inputs=['cam/image_array'], outputs=['cam/image_array']) V.add(kl, inputs=['cam/image_array'], outputs=['pilot/angle', 'pilot/throttle'], run_condition='run_pilot') #Choose what inputs should change the car. def drive_mode(mode, user_angle, user_throttle, pilot_angle, pilot_throttle): if mode == 'user': return user_angle, user_throttle elif mode == 'local_angle': return pilot_angle, user_throttle #return pilot_angle, 0.30 else: #overite throttle #return pilot_angle, pilot_throttle #return pilot_angle, 0.80 return pilot_angle, pilot_throttle drive_mode_part = Lambda(drive_mode) V.add(drive_mode_part, inputs=['user/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle'], outputs=['angle', 'throttle']) steering_controller = PCA9685(cfg.STEERING_CHANNEL) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL) throttle = PWMThrottle(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM) panning_controller = PCA9685(cfg.PAN_CHANNEL) panning = PWMPanning(controller=panning_controller, left_pulse=cfg.PAN_LEFT_PWM, zero_pulse=cfg.PAN_CENTER_PWM, right_pulse=cfg.PAN_RIGHT_PWM) tilting_controller = PCA9685(cfg.TILT_CHANNEL) tilting = PWMThrottle(controller=tilting_controller, max_pulse=cfg.TILT_UP_PWM, zero_pulse=cfg.TILT_DRIVING_PWM, min_pulse=cfg.TILT_DOWN_PWM) wagging_controller = PCA9685(cfg.TAIL_CHANNEL) wagging = PWMThrottle(controller=wagging_controller, max_pulse=cfg.TAIL_UP_PWM, zero_pulse=cfg.TAIL_CENTER_PWM, min_pulse=cfg.TAIL_DOWN_PWM) #throttleText.text = throttle # add govenor part here. Governer has overridding power when drive mode is pilot #break_for_dog = break_for('dog', .3) #V.add(break_for_dog, inputs=['user/mode','angle', 'throttle','ncs/found_objs'], outputs=['angle','throttle']) #V.add(turn_signals, inputs=['angle']) #'user/angle', 'user/throttle', 'user/mode', 'recording' V.add(status_led, inputs=['user/mode', 'recording']) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) V.add(panning, inputs=['user/pan']) V.add(tilting, inputs=['user/tilt']) #V.add(wagging, inputs=['throttle']) #add tub to save data #inputs=['cam/image_array', 'user/angle', 'user/throttle', 'user/mode'] # need local_angle and local_pilot to save values to tub inputs=['cam/image_array', 'angle', 'throttle', 'user/mode'] types=['image_array', 'float', 'float', 'str'] th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types) V.add(tub, inputs=inputs, run_condition='recording') #run the vehicle for 20 seconds V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS) print("You can now go to <your pi ip address>:8887 to drive your car.")
def drive(cfg, model_path=None, meta=[]): ''' Construct a working robotic vehicle from many parts. Each part runs as a job in the Vehicle loop, calling either it's run or run_threaded method depending on the constructor flag `threaded`. All parts are updated one after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. Parts may have named outputs and inputs. The framework handles passing named outputs to parts requesting the same named input. ''' #Initialize car V = dk.vehicle.Vehicle() from donkeycar.parts.camera import PiCamera V.add(PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH), outputs=['cam/image_array'], threaded=True) V.add(LocalWebController(), inputs=['cam/image_array_face_box'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) print("You can now go to <your pi ip address>:8887 to drive your car.") #See if we should even run the pilot module. #This is only needed because the part run_condition only accepts boolean class PilotCondition: def run(self, mode): if mode == 'user': return False else: return True V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot']) class ImgPreProcess(): ''' preprocess camera image for inference. normalize and crop if needed. ''' def __init__(self, cfg): self.cfg = cfg def run(self, img_arr): return normalize_and_crop(img_arr, self.cfg) V.add(ImgPreProcess(cfg), inputs=['cam/image_array'], outputs=['cam/normalized/cropped'], run_condition='run_pilot') def load_model(kl, model_path): start = time.time() print('loading model', model_path) kl.load(model_path) print('finished loading in %s sec.' % (str(time.time() - start))) kl = dk.utils.get_model_by_type('jaws', cfg) load_model(kl, model_path) V.add(kl, inputs=['cam/normalized/cropped'], outputs=['face_x', 'face_y', 'face_w', 'face_h', 'confidence'], run_condition='run_pilot') class FaceFollowingPilot: def run(self, x, y, w, h, confidence): return 0, 0 V.add(FaceFollowingPilot(), inputs=['face_x', 'face_y', 'face_w', 'face_h', 'confidence'], outputs=['pilot/angle', 'pilot/throttle'], run_condition='run_pilot') class DrawBoxForFaceDetection: def __init__(self, cfg): self.cfg = cfg def run(self, img, x, y, w, h, confidence): if img is not None: color = np.array([0, 255, 0], dtype=np.uint8) color2 = np.array([255, 255, 0], dtype=np.uint8) img_out = np.copy(img) img_w, img_h = self.cfg.IMAGE_W, self.cfg.IMAGE_H x_px = int(img_w * x) y_px = int(img_h * y) w_px = int(img_w * w) h_px = int(img_h * h) print('x:{}, y:{}, w:{}, h:{}'.format(x_px, y_px, w_px, h_px)) u = y_px d = y_px + h_px l = x_px r = x_px + w_px u = min(img_h - 1, max(0, u)) d = min(img_h - 1, max(0, d)) l = min(img_w - 1, max(0, l)) r = min(img_w - 1, max(0, r)) img_out[u:d, l] = color img_out[u:d, r] = color img_out[u, l:r] = color2 img_out[d, l:r] = color return img_out return img V.add(DrawBoxForFaceDetection(cfg), inputs=[ 'cam/image_array', 'face_x', 'face_y', 'face_w', 'face_h', 'confidence' ], outputs=['cam/image_array_face_box'], run_condition='run_pilot') #Choose what inputs should change the car. class DriveMode: def run(self, mode, user_angle, user_throttle, pilot_angle, pilot_throttle): if mode == 'user': return user_angle, user_throttle elif mode == 'local_angle': return pilot_angle, user_throttle else: return pilot_angle, pilot_throttle * cfg.AI_THROTTLE_MULT V.add(DriveMode(), inputs=[ 'user/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle' ], outputs=['angle', 'throttle']) class AiRunCondition: ''' A bool part to let us know when ai is running. ''' def run(self, mode): if mode == "user": return False return True V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running']) # drive chain # V.add(steering, inputs=['angle']) # V.add(throttle, inputs=['throttle']) class Dummy: def run(self): return 0 V.add(Dummy(), outputs=['climbing']) V.add(JawsController(), inputs=['angle', 'climbing', 'throttle'], threaded=True) #add tub to save data th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer( inputs=[ 'cam/image_array', 'face_x', 'face_y', 'face_w', 'face_h', 'confidence' ], types=['image_array', 'float', 'float', 'float', 'float', 'float'], user_meta=meta) V.add(tub, inputs=[ 'cam/image_array', 'face_x', 'face_y', 'face_w', 'face_h', 'confidence' ], outputs=["tub/num_records"], run_condition='recording') # if cfg.PUB_CAMERA_IMAGES: # from donkeycar.parts.network import TCPServeValue # from donkeycar.parts.image import ImgArrToJpg # pub = TCPServeValue("camera") # V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin']) # V.add(pub, inputs=['jpg/bin']) #run the vehicle for 20 seconds V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
def drive(cfg, model_path=None, use_joystick=False, model_type=None, camera_type='single'): ''' Construct a working robotic vehicle from many parts. Each part runs as a job in the Vehicle loop, calling either it's run or run_threaded method depending on the constructor flag `threaded`. All parts are updated one after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. Parts may have named outputs and inputs. The framework handles passing named outputs to parts requesting the same named input. ''' if model_type is None: if cfg.TRAIN_BEHAVIORS: model_type = "behavior" else: model_type = "categorical" if model_type == "streamline": camera_type = "streamline" #Initialize car V = dk.vehicle.Vehicle() if camera_type == "stereo": if cfg.CAMERA_TYPE == "WEBCAM": from donkeycar.parts.camera import Webcam camA = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=0) camB = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=1) elif cfg.CAMERA_TYPE == "CVCAM": from donkeycar.parts.cv import CvCam camA = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=0) camB = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=1) else: raise (Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE)) V.add(camA, outputs=['cam/image_array_a'], threaded=True) V.add(camB, outputs=['cam/image_array_b'], threaded=True) def stereo_pair(image_a, image_b): ''' This will take the two images and combine them into a single image One in red, the other in green, and diff in blue channel. ''' if image_a is not None and image_b is not None: width, height, _ = image_a.shape grey_a = dk.utils.rgb2gray(image_a) grey_b = dk.utils.rgb2gray(image_b) grey_c = grey_a - grey_b stereo_image = np.zeros([width, height, 3], dtype=np.dtype('B')) stereo_image[..., 0] = np.reshape(grey_a, (width, height)) stereo_image[..., 1] = np.reshape(grey_b, (width, height)) stereo_image[..., 2] = np.reshape(grey_c, (width, height)) else: stereo_image = [] return np.array(stereo_image) image_sterero_pair_part = Lambda(stereo_pair) V.add(image_sterero_pair_part, inputs=['cam/image_array_a', 'cam/image_array_b'], outputs=['cam/image_array']) #elif camera_type == "streamline": # print("using grayscale pi cam") # assert(cfg.CAMERA_TYPE == "PICAM") # from donkeycar.parts.camera import PiCamera # cam = PiCamera(image_w=cfg.STREAMLINE_IMAGE_W, image_h=cfg.STREAMLINE_IMAGE_H, image_d=1) else: print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE) if cfg.CAMERA_TYPE == "PICAM": from donkeycar.parts.camera import PiCamera cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) elif cfg.CAMERA_TYPE == "WEBCAM": from donkeycar.parts.camera import Webcam cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) elif cfg.CAMERA_TYPE == "CVCAM": from donkeycar.parts.cv import CvCam cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) else: raise (Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE)) V.add(cam, outputs=['cam/image_array'], threaded=True) if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT: #modify max_throttle closer to 1.0 to have more power #modify steering_scale lower than 1.0 to have less responsive steering from donkeycar.parts.controller import PS3JoystickController, PS4JoystickController #cont_class = PS3JoystickController cont_class = MyJoystickController if cfg.CONTROLLER_TYPE == "ps4": cont_class = PS4JoystickController ctr = cont_class(throttle_scale=cfg.JOYSTICK_MAX_THROTTLE, steering_scale=cfg.JOYSTICK_STEERING_SCALE, auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE) if cfg.USE_NETWORKED_JS: from donkeycar.parts.controller import JoyStickSub netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP) V.add(netwkJs, threaded=True) ctr.js = netwkJs else: #This web controller will create a web server that is capable #of managing steering, throttle, and modes, and more. ctr = LocalWebController() V.add(ctr, inputs=['cam/image_array'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) #this throttle filter will allow one tap back for esc reverse th_filter = ThrottleFilter() V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle']) #See if we should even run the pilot module. #This is only needed because the part run_condition only accepts boolean def pilot_condition(mode): if mode == 'user': return False else: return True pilot_condition_part = Lambda(pilot_condition) V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot']) def led_cond(mode, recording, recording_alert, behavior_state, reloaded_model): #returns a blink rate. 0 for off. -1 for on. positive for rate. if reloaded_model: led.set_rgb(cfg.MODEL_RELOADED_LED_R, cfg.MODEL_RELOADED_LED_G, cfg.MODEL_RELOADED_LED_B) return 0.1 else: led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B) if recording_alert: led.set_rgb(*recording_alert) return cfg.REC_COUNT_ALERT_BLINK_RATE else: led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B) if behavior_state is not None and model_type == 'behavior': r, g, b = cfg.BEHAVIOR_LED_COLORS[behavior_state] led.set_rgb(r, g, b) return -1 #solid on if recording: return -1 #solid on elif mode == 'user': return 1 elif mode == 'local_angle': return 0.5 elif mode == 'local': return 0.1 return 0 if cfg.HAVE_RGB_LED: from donkeycar.parts.led_status import RGB_LED led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT) led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B) led_cond_part = Lambda(led_cond) V.add(led_cond_part, inputs=[ 'user/mode', 'recording', "records/alert", 'behavior/state', 'reloaded/model' ], outputs=['led/blink_rate']) V.add(led, inputs=['led/blink_rate']) def get_record_alert_color(num_records): col = (0, 0, 0) for count, color in cfg.RECORD_ALERT_COLOR_ARR: if num_records >= count: col = color return col def record_tracker(num_records): if num_records is None: return 0 if record_tracker.last_num_rec_print != num_records or record_tracker.force_alert: record_tracker.last_num_rec_print = num_records if num_records % 10 == 0: print("recorded", num_records, "records") if num_records % cfg.REC_COUNT_ALERT == 0 or record_tracker.force_alert: record_tracker.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC record_tracker.force_alert = 0 if record_tracker.dur_alert > 0: record_tracker.dur_alert -= 1 if record_tracker.dur_alert != 0: return get_record_alert_color(num_records) return 0 record_tracker.last_num_rec_print = 0 record_tracker.dur_alert = 0 record_tracker.force_alert = 0 rec_tracker_part = Lambda(record_tracker) V.add(rec_tracker_part, inputs=["tub/num_records"], outputs=['records/alert']) if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController): #then we are not using the circle button. hijack that to force a record count indication def show_record_acount_status(): record_tracker.last_num_rec_print = 0 record_tracker.force_alert = 1 ctr.set_button_down_trigger('circle', show_record_acount_status) #IMU if cfg.HAVE_IMU: imu = Mpu6050() V.add(imu, outputs=[ 'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z' ], threaded=True) #Behavioral state if cfg.TRAIN_BEHAVIORS: bh = BehaviorPart(cfg.BEHAVIOR_LIST) V.add(bh, outputs=[ 'behavior/state', 'behavior/label', "behavior/one_hot_state_array" ]) try: ctr.set_button_down_trigger('L1', bh.increment_state) except: pass inputs = ['cam/image_array', "behavior/one_hot_state_array"] #IMU elif model_type == "imu": assert (cfg.HAVE_IMU) #Run the pilot if the mode is not user. inputs = [ 'cam/image_array', 'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z' ] else: inputs = ['cam/image_array'] def load_model(kl, model_path): start = time.time() try: print('loading model', model_path) kl.load(model_path) print('finished loading in %s sec.' % (str(time.time() - start))) except: print('problems loading model', model_path) def load_weights(kl, weights_path): start = time.time() try: print('loading model weights', weights_path) kl.model.load_weights(weights_path) print('finished loading in %s sec.' % (str(time.time() - start))) except: print('problems loading weights', weights_path) def load_model_json(kl, json_fnm): start = time.time() print('loading model json', json_fnm) import keras with open(json_fnm, 'r') as handle: contents = handle.read() kl.model = keras.models.model_from_json(contents) print('finished loading json in %s sec.' % (str(time.time() - start))) if model_path: #When we have a model, first create an appropriate Keras part #if model_type == "streamline": #kl = KerasStreamline() #else: kl = dk.utils.get_model_by_type(model_type, cfg) if '.h5' in model_path: #when we have a .h5 extension #load everything from the model file load_model(kl, model_path) def reload_model(filename): print(filename, "was changed!") load_model(kl, filename) fw_part = FileWatcher(model_path, reload_model, wait_for_write_stop=10.0) V.add(fw_part, outputs=['reloaded/model']) elif '.json' in model_path: #when we have a .json extension #load the model from their and look for a matching #.wts file with just weights load_model_json(kl, model_path) weights_path = model_path.replace('.json', '.weights') load_weights(kl, weights_path) def reload_weights(filename): print(filename, "was changed!") weights_path = filename.replace('.json', '.weights') load_weights(kl, weights_path) fw_part = FileWatcher(model_path, reload_weights, wait_for_write_stop=1.0) V.add(fw_part, outputs=['reloaded/model']) else: #previous default behavior load_model(kl, model_path) V.add(kl, inputs=inputs, outputs=['pilot/angle', 'pilot/throttle'], run_condition='run_pilot') #Choose what inputs should change the car. def drive_mode(mode, user_angle, user_throttle, pilot_angle, pilot_throttle): if mode == 'user': return user_angle, user_throttle elif mode == 'local_angle': return pilot_angle, user_throttle else: return pilot_angle, pilot_throttle drive_mode_part = Lambda(drive_mode) V.add(drive_mode_part, inputs=[ 'user/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle' ], outputs=['angle', 'throttle']) #Drive train setup if cfg.DRIVE_TRAIN_TYPE == "SERVO_ESC": from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) throttle = PWMThrottle(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE": from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT) throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL": from donkeycar.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD) right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD) two_wheel_control = TwoWheelSteeringThrottle() V.add(two_wheel_control, inputs=['throttle', 'angle'], outputs=['left_motor_speed', 'right_motor_speed']) V.add(left_motor, inputs=['left_motor_speed']) V.add(right_motor, inputs=['right_motor_speed']) elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM": from donkeycar.parts.actuator import ServoBlaster, PWMSteering steering_controller = ServoBlaster(cfg.STEERING_CHANNEL) #really pin #PWM pulse values should be in the range of 100 to 200 assert (cfg.STEERING_LEFT_PWM <= 200) assert (cfg.STEERING_RIGHT_PWM <= 200) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD) V.add(steering, inputs=['angle']) V.add(motor, inputs=["throttle"]) #add tub to save data inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode'] types = ['image_array', 'float', 'float', 'str'] if cfg.TRAIN_BEHAVIORS: inputs += [ 'behavior/state', 'behavior/label', "behavior/one_hot_state_array" ] types += ['int', 'str', 'vector'] if cfg.HAVE_IMU: inputs += [ 'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z' ] types += ['float', 'float', 'float', 'float', 'float', 'float'] th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types) V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording') if type(ctr) is LocalWebController: print("You can now go to <your pi ip address>:8887 to drive your car.") elif isinstance(ctr, JoystickController): print("You can now move your joystick to drive your car.") #tell the controller about the tub ctr.set_tub(tub) #run the vehicle for 20 seconds V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
def test_lidar(cfg): V = dk.vehicle.Vehicle() from donkeycar.parts.lidar import BreezySLAM, LidarPlot, MapToImage from donkeycar.parts.lidar import RPLidar rplidar = RPLidar() V.add(rplidar, outputs=['distances', 'angles'], threaded=True) ''' class PrintLidar(object): def run(self, distances, angles): print('[RPLidar] d:{} a:{}'.format(str(distances), str(angles))) print('[RPLidar] d:{} a:{}'.format(str(len(distances)), str(len(angles)))) def shutdown(self): pass V.add(PrintLidar(), inputs=['distances', 'angles']) ''' from donkeycar.parts.lidar import BreezyMap bmap = BreezyMap() V.add(bmap, outputs=['map_bytes']) from donkeycar.parts.lidar import LidarPlot lidarplot = LidarPlot() V.add(lidarplot, inputs=['distances', 'angles'], outputs=['frame']) ''' class PrintLidar(object): def run(self, frame): print('[LidarPlot] frame:{}'.format(str(type(frame)))) print('[LidarPlot] frame:{}'.format(str(frame))) def shutdown(self): pass V.add(PrintLidar(), inputs=['frame']) ''' from donkeycar.parts.lidar import MapToImage m2i = MapToImage(resolution=(500, 500, 3)) # 750000 -> (500,500) V.add(m2i, inputs=['frame'], outputs=['image_array']) from donkeycar.parts.lidar import BreezySLAM slam = BreezySLAM() V.add(slam, inputs=['distances', 'angles', 'map_bytes'], outputs=['x', 'y', 'rad']) class PrintB: def run(self, x, y, rad): degree = math.degrees(rad) if degree >= 180.0: degree = (360.0 - degree) * (-1.0) xcm = float(x) / 10.0 ycm = float(y) / 10.0 print('{:.5g},{:.5g},{:.5g}'.format(xcm, ycm, degree)) #print('[BreezySLAM] x:{:.3g}cm , y:{:.3g}cm , theta:{:.3g}degree(s)'.format(xcm, ycm, degree)) def shutdown(self): pass V.add(PrintB(), inputs=['x', 'y', 'rad']) inputs = ['image_array', 'x', 'y', 'rad'] types = ['image_array', 'float', 'float', 'float'] from donkeycar.parts.datastore import TubHandler th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=[]) V.add(tub, inputs=inputs, outputs=["tub/num_records"]) try: V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.DRIVE_LOOP_HZ * 60) # 1min ## max_loop_count=cfg.MAX_LOOPS) # infinity except KeyboardInterrupt: print('exit') finally: pass
def drive(cfg, args=None, model_type=None, camera_type='single', meta=[]): model_type = cfg.DEFAULT_MODEL_TYPE speed = args.speed maxloop = args.maxloop print("maxloop=", maxloop, ",speed=", speed) ########################################################### # 1) 동키카를 초기화(donkey사용을 위한 환경 설정) ########################################################### V = dk.vehicle.Vehicle() ########################################################### # 2) 주행환경 설정하기 ########################################################### class EnvSetting(object): def __init__(self, mode='user', throttle=12): # 12% self.mode = mode self.throttle = throttle * 0.01 def run(self): return self.mode, self.throttle V.add(EnvSetting(throttle=speed), inputs=[], outputs=['user/mode', 'user/throttle']) ########################################################### # 3) 카메라 설정(PICAM) ########################################################### print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE) inputs = [] threaded = True dpcar_opt = {'org': True, 'hsv': True, 'edges': True, 'lanes': True} if cfg.CAMERA_TYPE == "PICAM": cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, \ vflip=cfg.CAMERA_VFLIP, hflip=cfg.CAMERA_HFLIP,mode='deepicar', dpcar_opt=dpcar_opt) print(cfg.IMAGE_H, "x", cfg.IMAGE_W) else: raise (Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE)) V.add(cam, inputs=inputs, outputs=['cam/image_array', 'user/angle'], threaded=threaded) ########################################################### # 4) 주행기록저장을 위한 클래스 생성 및 V.add() ########################################################### class RecordTracker: def __init__(self): self.last_num_rec_print = 0 self.dur_alert = 0 self.force_alert = 0 def run(self, num_records): print("num_records=>", num_records) if num_records is None: return 0 if self.last_num_rec_print != num_records or self.force_alert: self.last_num_rec_print = num_records if num_records % 10 == 0: print("recorded", num_records, "records") if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert: self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC self.force_alert = 0 if self.dur_alert > 0: self.dur_alert -= 1 if self.dur_alert != 0: return get_record_alert_color(num_records) return 0 rec_tracker_part = RecordTracker() V.add(rec_tracker_part, inputs=["tub/num_records"], outputs=['records/alert']) ########################################################### # 5) DriveMode 설정하기 ########################################################### #Choose what inputs should change the car. class DriveMode: def run(self, mode, user_angle, user_throttle, pilot_angle, pilot_throttle): if mode == 'user': print("user_angle->", user_angle, ", user_throttle->", user_throttle) return user_angle, user_throttle, True elif mode == 'local_angle': return pilot_angle if pilot_angle else 0.0, user_throttle, True else: return pilot_angle if pilot_angle else 0.0, pilot_throttle * cfg.AI_THROTTLE_MULT if pilot_throttle else 0.0, True V.add(DriveMode(), inputs=[ 'user/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle' ], outputs=['angle', 'throttle', 'recording']) ########################################################### # 6) PIGPIO_PWM 설정하기 # - 현재 EMPV1에서는 "DC_STREER_THROTTLE를 사용 ########################################################### if cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE": from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM from donkeycar.parts.actuator import RPi_GPIO_Servo #steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT) steering = RPi_GPIO_Servo(cfg.HBRIDGE_PIN_RIGHT, verbose=False) throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD) print(">>>>DC_STEER_THROTTLE") V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) ########################################################### # 7) Tub 설정하기 ########################################################### inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode'] types = ['image_array', 'float', 'float', 'str'] th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta) V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording') ###########################################################3yy # 8) V.start (Main Loop) ########################################################### #V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS) V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=maxloop)
def drive(cfg, model_path=None, use_joystick=False, use_chaos=False): """ Construct a working robotic vehicle from many parts. Each part runs as a job in the Vehicle loop, calling either it's run or run_threaded method depending on the constructor flag `threaded`. All parts are updated one after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. Parts may have named outputs and inputs. The framework handles passing named outputs to parts requesting the same named input. """ V = dk.vehicle.Vehicle() clock = Timestamp() V.add(clock, outputs='timestamp') cam = PiCamera(resolution=cfg.CAMERA_RESOLUTION) V.add(cam, outputs=['cam/image_array'], threaded=True) if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT: ctr = JoystickController(max_throttle=cfg.JOYSTICK_MAX_THROTTLE, steering_scale=cfg.JOYSTICK_STEERING_SCALE, auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE) V.add(ctr, inputs=['cam/image_array'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording', 'pan', 'tilt'], threaded=True) if cfg.USE_PAN: pan_controller = PCA9685(cfg.PAN_CHANNEL) pan = PWMPanTilt(controller=pan_controller, left_pulse=cfg.PAN_LEFT_PWM, right_pulse=cfg.PAN_RIGHT_PWM, dir=cfg.PAN_DIR) V.add(pan, inputs=['pan']) if cfg.USE_TILT: tilt_controller = PCA9685(cfg.TILT_CHANNEL) tilt = PWMPanTilt(controller=tilt_controller, left_pulse=cfg.TILT_DOWN_PWM, right_pulse=cfg.TILT_UP_PWM, dir=cfg.TILT_DIR) V.add(tilt, inputs=['tilt']) else: # This web controller will create a web server that is capable # of managing steering, throttle, and modes, and more. ctr = LocalWebController(use_chaos=use_chaos) V.add(ctr, inputs=['cam/image_array'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) # See if we should even run the pilot module. # This is only needed because the part run_condition only accepts boolean def pilot_condition(mode): if mode == 'user': return False else: return True pilot_condition_part = Lambda(pilot_condition) V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot']) # Run the pilot if the mode is not user. kl = KerasCategorical() if model_path: kl.load(model_path) V.add(kl, inputs=['cam/image_array'], outputs=['pilot/angle', 'pilot/throttle'], run_condition='run_pilot') # Choose what inputs should change the car. def drive_mode(mode, user_angle, user_throttle, pilot_angle, pilot_throttle): if mode == 'user': return user_angle, user_throttle elif mode == 'local_angle': return pilot_angle, user_throttle else: return pilot_angle, pilot_throttle drive_mode_part = Lambda(drive_mode) V.add(drive_mode_part, inputs=['user/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle'], outputs=['angle', 'throttle']) steering = PimouseSteering() throttle_controller = None throttle = PimouseThrottle(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) # add tub to save data inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode', 'timestamp'] types = ['image_array', 'float', 'float', 'str', 'str'] if cfg.USE_SINGLE_TUB == False: #multiple tubs th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types) else: # single tub tub = TubWriter(path=cfg.TUB_PATH, inputs=inputs, types=types) V.add(tub, inputs=inputs, run_condition='recording') # run the vehicle V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
def drive(cfg, model_path=None, use_joystick=False, use_tx=False): global myConfig global throttle global ctr global V ''' Start the drive loop Each part runs as a job in the Vehicle loop, calling either it's run or run_threaded method depending on the constructor flag `threaded`. All parts are updated one after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. Parts may have named outputs and inputs. The framework handles passing named outputs to parts requesting the same named input. ''' # Initialize car V = dk.vehicle.Vehicle() configCtrl = ConfigController(cfg.CONFIG_PATH) logger = logging.getLogger(myConfig['DEBUG']['PARTS']['MAIN']['NAME']) logger.setLevel(CONFIG2LEVEL[myConfig['DEBUG']['PARTS']['MAIN']['LEVEL']]) V.add(configCtrl, threaded=True) def get_tsc(): return int(round(time.time() * 1000)) logger.info("Init timestamper") get_tsc_part = Lambda(get_tsc) V.add(get_tsc_part, outputs=['ms']) logger.info("Init Cam part") if cfg.USE_WEB_CAMERA: cam = Webcam(resolution=cfg.CAMERA_RESOLUTION, fps=cfg.CAMERA_FPS, framerate=cfg.CAMERA_FRAMERATE) else: cam = PiCamera(resolution=cfg.CAMERA_RESOLUTION) V.add(cam, outputs=['cam/image_array'], threaded=True) logger.info("Init Controller part") if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT: # modify max_throttle closer to 1.0 to have more power # modify steering_scale lower than 1.0 to have less responsive steering ctr = JoystickController( max_throttle=cfg.JOYSTICK_MAX_THROTTLE, steering_scale=cfg.JOYSTICK_STEERING_SCALE, auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE, throttle_axis=cfg.JOYSTICK_THROTTLE_AXIS, steering_axis=cfg.JOYSTICK_STEERING_AXIS, btn_mode=cfg.JOYSTICK_DRIVING_MODE_BUTTON, btn_record_toggle=cfg.JOYSTICK_RECORD_TOGGLE_BUTTON, btn_inc_max_throttle=cfg.JOYSTICK_INCREASE_MAX_THROTTLE_BUTTON, btn_dec_max_throttle=cfg.JOYSTICK_DECREASE_MAX_THROTTLE_BUTTON, btn_inc_throttle_scale=cfg.JOYSTICK_INCREASE_THROTTLE_SCALE_BUTTON, btn_dec_throttle_scale=cfg.JOYSTICK_DECREASE_THROTTLE_SCALE_BUTTON, btn_inc_steer_scale=cfg.JOYSTICK_INCREASE_STEERING_SCALE_BUTTON, btn_dec_steer_scale=cfg.JOYSTICK_DECREASE_STEERING_SCALE_BUTTON, btn_toggle_const_throttle=cfg. JOYSTICK_TOGGLE_CONSTANT_THROTTLE_BUTTON, verbose=cfg.JOYSTICK_VERBOSE) V.add( ctr, inputs=['cam/image_array', 'pilot/annoted_img'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) elif use_tx or cfg.USE_TX_AS_DEFAULT: #This is Tx controller (pilot Donkey from a RC Tx transmiter/receiver) ctr = TxController(verbose=cfg.TX_VERBOSE) V.add(ctr, inputs=[ 'user/mode', 'vehicle_armed', 'cam/image_array', 'pilot/annoted_img' ], outputs=[ 'user/angle', 'user/throttle', 'recording', 'lane', 'ch5', 'ch6', 'speedometer', 'sensor_left', 'sensor_right' ], threaded=True) actionctr = TxAuxCh() V.add(actionctr, inputs=['user/mode', 'vehicle_armed', 'ch5', 'ch6', 'recording'], outputs=['user/mode', 'vehicle_armed', 'flag', 'recording'], threaded=False) else: #This web controller will create a web server that is capable #of managing steering, throttle, and modes, and more. ctr = LocalWebController() V.add( ctr, inputs=['cam/image_array', 'pilot/annoted_img'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) if cfg.USE_THROTTLEINLINE: logger.info("Init throttleInLine part") throttleinline = ThrottleInLine(cfg.THROTTLEINLINE_ANGLE_MIN, cfg.THROTTLEINLINE_ANGLE_MAX) V.add(throttleinline, inputs=['cam/image_array'], outputs=['pilot/throttle_boost', 'pilot/annoted_img'], threaded=True) logger.info("Init emergency part") emergencyCtrl = EmergencyController() V.add(emergencyCtrl, inputs=['user/mode'], outputs=['user/mode'], threaded=True) perfMngt = dumpPerf() V.add(perfMngt, inputs=['user/mode'], threaded=False) # See if we should even run the pilot module. # This is only needed because the part run_condition only accepts boolean def pilot_condition(mode): if mode == 'user': return False else: return True logger.info("Init pilot part") pilot_condition_part = Lambda(pilot_condition) V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot']) logger.info("Init Model part") # Run the pilot if the mode is not user and not Tx. if (myConfig['MODEL']['MODEL_IN_USE'] == 0): kl = KerasCategorical() if (myConfig['MODEL']['MODEL_IN_USE'] == 1): kl = KerasCategorical1() #kl = KerasLinear() if model_path: if (os.path.exists(model_path)): logger.info("IA : Load integrated model") kl.load(model_path) else: # Model reconstruction from JSON file logger.info("IA : Load Weights + Model Architecture model") kl.load2(model_path) if (myConfig['MODEL']['MODEL_IN_USE'] == 0): V.add(kl, inputs=['cam/image_array'], outputs=[ 'pilot/angle', 'pilot/throttle', 'pilot/fullspeed', 'pilot/lane', 'pilot/angle_bind' ], run_condition='run_pilot') if (myConfig['MODEL']['MODEL_IN_USE'] == 1): V.add(kl, inputs=['cam/image_array', 'speedometer'], outputs=[ 'pilot/angle', 'pilot/throttle', 'pilot/fullspeed', 'pilot/brake', 'pilot/angle_bind' ], run_condition='run_pilot') # Choose what inputs should change the car. def drive_mode(mode, user_angle, user_throttle, pilot_angle, pilot_throttle, throttle_boost): if mode == 'user': return user_angle, user_throttle else: if cfg.USE_THROTTLEINLINE: if throttle_boost: pilot_throttle = pilot_throttle * cfg.THROTTLEINLINE_BOOST_FACTOR logger.debug("Apply Boost") if mode == 'local_angle': return pilot_angle, user_throttle else: logger.debug( 'drive_mode: Pilot return angle={:01.2f} throttle={:01.2f}' .format(pilot_angle, pilot_throttle)) if (pilot_angle > myConfig['POST_PILOT']['STEERING_TRIM_RIGHT_THRES']): pilot_angle = pilot_angle * myConfig['POST_PILOT'][ 'STEERING_TRIM_RIGHT_FACTOR'] if (pilot_angle < -myConfig['POST_PILOT']['STEERING_TRIM_LEFT_THRES']): pilot_angle = pilot_angle * myConfig['POST_PILOT'][ 'STEERING_TRIM_LEFT_FACTOR'] return pilot_angle, pilot_throttle drive_mode_part = Lambda(drive_mode) V.add(drive_mode_part, inputs=[ 'user/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle', 'pilot/throttle_boost' ], outputs=['angle', 'throttle']) if cfg.USE_PWM_ACTUATOR: logger.info("Init Actuator part") if myConfig['ACTUATOR']['ACTUATOR_CTRL_SERIAL'] == 1: steering_controller = ctr else: steering_controller = PCA9685(channel=cfg.STEERING_CHANNEL, busnum=cfg.STEERING_I2C_BUS) steering = PWMSteering(controller=steering_controller) if myConfig['ACTUATOR']['ACTUATOR_CTRL_SERIAL'] == 1: throttle_controller = ctr else: throttle_controller = PCA9685(channel=cfg.THROTTLE_CHANNEL, busnum=cfg.THROTTLE_I2C_BUS) throttle = PWMThrottle(controller=throttle_controller) V.add(steering, inputs=['angle']) V.add(throttle, inputs=[ 'throttle', 'user/mode', 'vehicle_armed', 'pilot/fullspeed', None, 'pilot/lane', 'sensor_left', 'sensor_right' ]) if cfg.BATTERY_USE_MONITOR: logger.info("Init Battery Monitor part") battery_controller = BatteryController(nbCells=cfg.BATTERY_NCELLS) V.add(battery_controller, outputs=['battery'], threaded=True) # add tub to save data inputs = [ 'cam/image_array', 'ms', 'user/angle', 'user/throttle', 'user/mode', 'pilot/angle', 'pilot/throttle', 'flag', 'speedometer', 'lane' ] types = [ 'image_array', 'int', 'float', 'float', 'str', 'numpy.float32', 'numpy.float32', 'str', 'float', 'int' ] logger.info("Init Tub Handler part") th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types) V.add(tub, inputs=inputs, run_condition='recording') if use_tx or cfg.USE_TX_AS_DEFAULT: fpv = FPVWebController() V.add(fpv, inputs=[ 'cam/image_array', 'pilot/annoted_img', 'user/angle', 'user/throttle', 'user/mode', 'pilot/angle', 'pilot/throttle', 'pilot/throttle_boost', 'pilot/fullspeed', 'pilot/angle_bind' ], threaded=True) logger.info("Start main loop") # run the vehicle for 20 seconds V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS) print("You can now go to <your pi ip address>:8887 to drive your car.")
def drive(cfg, model_path=None, use_joystick=False, use_fuzzy=False): from donkeycar.parts.camera import PiCamera from donkeycar.parts.ultrasonic import Ultrasonic, MockUltrasonic, CacheUltrasonicClient from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle from donkeycar.parts.obstacle import Obstacle ''' Construct a working robotic vehicle from many parts. Each part runs as a job in the Vehicle loop, calling either it's run or run_threaded method depending on the constructor flag `threaded`. All parts are updated one after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. Parts may have named outputs and inputs. The framework handles passing named outputs to parts requesting the same named input. ''' #Initialize car V = dk.vehicle.Vehicle() cam_front = PiCamera(resolution=cfg.CAMERA_RESOLUTION, name = 'front') V.add(cam_front, outputs=['cam/image_array'], threaded=True) us_front = CacheUltrasonicClient(gpio_trigger=cfg.ULTRASONIC_FRONT_TRIGGER, gpio_echo=cfg.ULTRASONIC_FRONT_ECHO, poll_delay = 0.05, name='front') V.add(us_front, outputs=['ultrasonic_front/distance'], threaded=True) us_front_left = CacheUltrasonicClient(gpio_trigger=cfg.ULTRASONIC_FRONT_LEFT_TRIGGER, gpio_echo=cfg.ULTRASONIC_FRONT_LEFT_ECHO, poll_delay = 0.05, name='front_left') V.add(us_front_left, outputs=['ultrasonic_front_left/distance'], threaded=True) us_front_right = CacheUltrasonicClient(gpio_trigger=cfg.ULTRASONIC_FRONT_RIGHT_TRIGGER, gpio_echo=cfg.ULTRASONIC_FRONT_RIGHT_ECHO, poll_delay = 0.05, name='front_right') V.add(us_front_right, outputs=['ultrasonic_front_right/distance'], threaded=True) if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT: #modify max_throttle closer to 1.0 to have more power #modify steering_scale lower than 1.0 to have less responsive steering ctr = JoystickController(max_throttle=cfg.JOYSTICK_MAX_THROTTLE, steering_scale=cfg.JOYSTICK_STEERING_SCALE, auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE) else: #This web controller will create a web server that is capable #of managing steering, throttle, and modes, and more. ctr = LocalWebController() V.add(ctr, inputs=['cam/image_array', 'ultrasonic_front/distance', 'ultrasonic_front_left/distance', 'ultrasonic_front_right/distance', 'pilot/action', 'pilot/angle', 'pilot/throttle'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) #See if we should even run the pilot module. #This is only needed because the part run_condition only accepts boolean def pilot_condition(mode): if mode == 'user': return False else: return True pilot_condition_part = Lambda(pilot_condition) V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot']) #Obstacle detection ob = Obstacle() V.add(ob, inputs=['cam/image_array', 'ultrasonic_front/distance', 'ultrasonic_front_left/distance', 'ultrasonic_front_right/distance'], outputs=['pilot/action']) #Run the pilot if the mode is not user. if use_fuzzy: kl = KerasFuzzyAndUltrasonicSensors() else: kl = KerasUltrasonicSensors() if model_path: kl.load(model_path) V.add(kl, inputs=['cam/image_array', 'ultrasonic_front/distance', 'ultrasonic_front_left/distance', 'ultrasonic_front_right/distance', 'pilot/action'], outputs=['pilot/angle', 'pilot/throttle'], run_condition='run_pilot', threaded = True) #Choose what inputs should change the car. def drive_mode(mode, user_angle, user_throttle, pilot_angle, pilot_throttle): if mode == 'user': return user_angle, user_throttle elif mode == 'local_angle': return pilot_angle, user_throttle else: return pilot_angle, pilot_throttle drive_mode_part = Lambda(drive_mode) V.add(drive_mode_part, inputs=['user/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle'], outputs=['angle', 'throttle']) steering_controller = PCA9685(cfg.STEERING_CHANNEL) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL) throttle = PWMThrottle(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) #add tub to save data inputs=['cam/image_array', 'ultrasonic_front/distance', 'ultrasonic_front_left/distance', 'ultrasonic_front_right/distance', 'user/angle', 'user/throttle', 'user/mode'] types=['image_array', 'float', 'float', 'float', 'float', 'float', 'str'] th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types) V.add(tub, inputs=inputs, run_condition='recording') #run the vehicle V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS) print("You can now go to <your pi ip address>:8887 to drive your car.")
def drive(cfg, model_path=None, model_type=None): V = dk.vehicle.Vehicle() assert (cfg.CAMERA_TYPE == 'PICAM') from donkeycar.parts.camera import PiCamera if (model_type == 'streamline'): cam = NickCamera(100, 20, True) else: cam = NickCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, grayscale=False) V.add(cam, outputs=['cam/image_array'], threaded=True) cont_class = MyJoystickController ctr = cont_class(throttle_scale=cfg.JOYSTICK_MAX_THROTTLE, steering_scale=cfg.JOYSTICK_STEERING_SCALE, auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE) V.add(ctr, inputs=['cam/image_array'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) def pilot_condition(mode): return not (mode == 'user') pilot_condition_part = Lambda(pilot_condition) V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot']) if model_path: if (model_type == 'streamline'): kl = KerasStreamline() else: kl = dk.utils.get_model_by_type(model_type, cfg) assert ('.h5' in model_path) start = time.time() print('loading model', model_path) kl.load(model_path) print('finished loading in %s sec.' % (str(time.time() - start))) V.add(kl, inputs=['cam/image_array'], outputs=['pilot/angle', 'pilot/throttle'], run_condition='run_pilot') def drive_mode(mode, user_angle, user_throttle, pilot_angle, pilot_throttle): if mode == 'user': return user_angle, user_throttle elif mode == 'local_angle': return pilot_angle, user_throttle else: return pilot_angle, pilot_throttle drive_mode_part = Lambda(drive_mode) V.add(drive_mode_part, inputs=[ 'user/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle' ], outputs=['angle', 'throttle']) assert (cfg.DRIVE_TRAIN_TYPE == 'SERVO_ESC') from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) throttle = PWMThrottle(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode'] types = ['image_array', 'float', 'float', 'str'] th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types) V.add(tub, inputs=inputs, outputs=['tub/num_records'], run_condition='recording') print('you can now move your joystick to drive your car') ctr.set_tub(tub) V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
def drive(cfg, model_path=None, use_joystick=False, use_tx=False, use_pirf=False): ''' Start the drive loop Each part runs as a job in the Vehicle loop, calling either it's run or run_threaded method depending on the constructor flag `threaded`. All parts are updated one after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. Parts may have named outputs and inputs. The framework handles passing named outputs to parts requesting the same named input. ''' # Initialize car V = dk.vehicle.Vehicle() if cfg.USE_WEB_CAMERA: cam = Webcam(resolution=cfg.CAMERA_RESOLUTION) else: cam = PiCamera(resolution=cfg.CAMERA_RESOLUTION) V.add(cam, outputs=['cam/image_array'], threaded=True) if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT: # modify max_throttle closer to 1.0 to have more power # modify steering_scale lower than 1.0 to have less responsive steering ctr = JoystickController( max_throttle=cfg.JOYSTICK_MAX_THROTTLE, steering_scale=cfg.JOYSTICK_STEERING_SCALE, auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE, throttle_axis=cfg.JOYSTICK_THROTTLE_AXIS, steering_axis=cfg.JOYSTICK_STEERING_AXIS, btn_mode=cfg.JOYSTICK_DRIVING_MODE_BUTTON, btn_record_toggle=cfg.JOYSTICK_RECORD_TOGGLE_BUTTON, btn_inc_max_throttle=cfg.JOYSTICK_INCREASE_MAX_THROTTLE_BUTTON, btn_dec_max_throttle=cfg.JOYSTICK_DECREASE_MAX_THROTTLE_BUTTON, btn_inc_throttle_scale=cfg.JOYSTICK_INCREASE_THROTTLE_SCALE_BUTTON, btn_dec_throttle_scale=cfg.JOYSTICK_DECREASE_THROTTLE_SCALE_BUTTON, btn_inc_steer_scale=cfg.JOYSTICK_INCREASE_STEERING_SCALE_BUTTON, btn_dec_steer_scale=cfg.JOYSTICK_DECREASE_STEERING_SCALE_BUTTON, btn_toggle_const_throttle=cfg. JOYSTICK_TOGGLE_CONSTANT_THROTTLE_BUTTON, verbose=cfg.JOYSTICK_VERBOSE) elif use_tx or cfg.USE_TX_AS_DEFAULT: ctr = TxController(throttle_tx_min=cfg.TX_THROTTLE_MIN, throttle_tx_max=cfg.TX_THROTTLE_MAX, steering_tx_min=cfg.TX_STEERING_MIN, steering_tx_max=cfg.TX_STEERING_MAX, throttle_tx_thresh=cfg.TX_THROTTLE_TRESH, verbose=cfg.TX_VERBOSE) fpv = FPVWebController() V.add(fpv, inputs=['cam/image_array'], threaded=True) elif use_pirf or cfg.USE_PI_RF_AS_DEFAULT: ctr = PiRfController(throttle_tx_min=cfg.PI_RF_THROTTLE_MIN, throttle_tx_max=cfg.PI_RF_THROTTLE_MAX, steering_tx_min=cfg.PI_RF_STEERING_MIN, steering_tx_max=cfg.PI_RF_STEERING_MAX, throttle_tx_thresh=cfg.PI_RF_THROTTLE_TRESH, steering_pin=cfg.PI_RF_STEERING_PIN, throttle_pin=cfg.PI_RF_THROTTLE_PIN, verbose=cfg.PI_RF_VERBOSE) fpv = FPVWebController() V.add(fpv, inputs=['cam/image_array'], threaded=True) else: #This web controller will create a web server that is capable #of managing steering, throttle, and modes, and more. ctr = LocalWebController() V.add(ctr, inputs=['cam/image_array'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) emergencyCtrl = EmergencyController() V.add(emergencyCtrl, inputs=['user/mode'], outputs=['user/mode'], threaded=True) # See if we should even run the pilot module. # This is only needed because the part run_condition only accepts boolean def pilot_condition(mode): if mode == 'user': return False else: return True pilot_condition_part = Lambda(pilot_condition) V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot']) if not use_tx and not use_pirf: # Run the pilot if the mode is not user and not Tx. kl = KerasCategorical() #kl = KerasLinear() if model_path: kl.load(model_path) V.add(kl, inputs=['cam/image_array'], outputs=['pilot/angle', 'pilot/throttle'], run_condition='run_pilot') # Choose what inputs should change the car. def drive_mode(mode, user_angle, user_throttle, pilot_angle, pilot_throttle): if mode == 'user': return user_angle, user_throttle elif mode == 'local_angle': return pilot_angle, user_throttle else: return pilot_angle, pilot_throttle drive_mode_part = Lambda(drive_mode) V.add(drive_mode_part, inputs=[ 'user/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle' ], outputs=['angle', 'throttle']) if cfg.USE_PWM_ACTUATOR: steering_controller = PCA9685(cfg.STEERING_CHANNEL) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL) throttle = PWMThrottle(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle', 'user/mode']) # add tub to save data inputs = [ 'cam/image_array', 'user/angle', 'user/throttle', 'user/mode', 'pilot/angle', 'pilot/throttle' ] types = [ 'image_array', 'float', 'float', 'str', 'numpy.float32', 'numpy.float32' ] th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types) V.add(tub, inputs=inputs, run_condition='recording') # run the vehicle for 20 seconds V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS) print("You can now go to <your pi ip address>:8887 to drive your car.")
def drive(cfg, args): ''' Construct a working robotic vehicle from many parts. Each part runs as a job in the Vehicle loop, calling either it's run or run_threaded method depending on the constructor flag `threaded`. All parts are updated one after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. Parts may have named outputs and inputs. The framework handles passing named outputs to parts requesting the same named input. ''' #Initialize car V = dk.vehicle.Vehicle() #Camera if cfg.DONKEY_GYM: from donkeycar.parts.dgym import DonkeyGymEnv cfg.GYM_CONF['racer_name'] = args['--name'] cfg.GYM_CONF['car_name'] = args['--name'] cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, host=cfg.SIM_HOST, env_name=cfg.DONKEY_GYM_ENV_NAME, conf=cfg.GYM_CONF, delay=cfg.SIM_ARTIFICIAL_LATENCY) inputs = ['steering', 'throttle'] else: from donkeycar.parts.camera import PiCamera cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) inputs = [] V.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=True) #Controller V.add(LineFollower(), inputs=['cam/image_array'], outputs=['steering', 'throttle', 'recording']) #Drive train setup if not cfg.DONKEY_GYM: from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, steering_zero_pulse=cfg.STEERING_STOPPED_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) throttle = PWMThrottle(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, throttle_zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM) V.add(steering, inputs=['steering']) V.add(throttle, inputs=['throttle']) #add tub to save data inputs = ['cam/image_array', 'steering', 'throttle'] types = ['image_array', 'float', 'float'] th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types) V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition="recording") #run the vehicle V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
def drive(cfg, model_path=None, use_joystick=False): ''' Construct a working robotic vehicle from many parts. Each part runs as a job in the Vehicle loop, calling either it's run or run_threaded method depending on the constructor flag `threaded`. All parts are updated one after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. Parts may have named outputs and inputs. The framework handles passing named outputs to parts requesting the same named input. ''' #Initialisation the CAR V = dk.vehicle.Vehicle() # Initialisation of the CAMERA if cfg.OS == cfg.LINUX_OS: if cfg.CAMERA_ID == cfg.CAMERA_MACBOOK: print("Camera {} can't be used.".format(cfg.CAMERA_MACBOOK)) sys.exit() elif cfg.CAMERA_ID == cfg.CAMERA_MOCK: cam = MockCamera() else: cam = PiCamera(camera=cfg.CAMERA_ID, resolution=cfg.CAMERA_RESOLUTION) elif cfg.OS == cfg.MAC_OS: if cfg.CAMERA_ID == cfg.CAMERA_MACBOOK: cam = MacWebcam() else: cam = VideoStream(camera=cfg.CAMERA_ID, framerate=cfg.DRIVE_LOOP_HZ) V.add(cam, outputs=['cam/image_array'], threaded=True) if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT: #modify max_throttle closer to 1.0 to have more power #modify steering_scale lower than 1.0 to have less responsive steering ctr = JoystickController( max_throttle=cfg.JOYSTICK_MAX_THROTTLE, steering_scale=cfg.JOYSTICK_STEERING_SCALE, auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE) else: #This web controller will create a web server that is capable #of managing steering, throttle, modes, and more. ctr = LocalWebController() if (cfg.LANE_DETECTOR): ctr_inputs = ['cam/image_overlaid_lanes', 'ld/runtime'] #Lane Detector #Detects the lane the car is in and outputs a best estimate for the 2 lines lane_detector = LanesDetector(cfg.CAMERA_ID, cfg.LD_PARAMETERS) V.add(lane_detector, inputs=['cam/image_array', 'algorithm/reset'], outputs=['cam/image_overlaid_lanes', 'ld/runtime']) else: ctr_inputs = ['cam/image_array'] V.add( ctr, ctr_inputs, outputs=[ 'user/angle', 'user/throttle', 'user/mode', 'recording' #'algorithm/reset' ], threaded=True) #See if we should even run the pilot module. #This is only needed because the part run_condition only accepts boolean def pilot_condition(mode): if mode == 'user': return False else: return True pilot_condition_part = Lambda(pilot_condition) V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot']) #Run the pilot if the mode is not user. kl = KerasLinear() if model_path: kl.load(model_path) V.add(kl, inputs=['cam/image_array'], outputs=['pilot/angle', 'pilot/throttle'], run_condition='run_pilot') #Choose what inputs should change the car. def drive_mode(mode, user_angle, user_throttle, pilot_angle, pilot_throttle): if mode == 'user': return user_angle, user_throttle elif mode == 'local_angle': return pilot_angle, user_throttle else: return pilot_angle, pilot_throttle drive_mode_part = Lambda(drive_mode) V.add(drive_mode_part, inputs=[ 'user/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle' ], outputs=['angle', 'throttle']) if cfg.OS == cfg.LINUX_OS: steering_controller = PCA9685(cfg.STEERING_CHANNEL) throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL) elif cfg.OS == cfg.MAC_OS: steering_controller = MockController() throttle_controller = MockController() steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, center_pulse=cfg.STEERING_CENTER_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) throttle = PWMThrottle( controller=throttle_controller, cal_max_pulse=cfg.THROTTLE_FORWARD_CAL_PWM, max_pulse=cfg.THROTTLE_FORWARD_PWM, cal_min_pulse=cfg.THROTTLE_REVERSE_CAL_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, ) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) #add tub to save data inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode'] types = ['image_array', 'float', 'float', 'str'] th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types) V.add(tub, inputs=inputs, run_condition='recording') print("You can now go to <your pi ip address>:8887 to drive your car.") #run the vehicle for 20 seconds V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
def drive(cfg, model_path=None, model_path_1=None, use_joystick=False, model_type=None, camera_type='single', meta=[]): ''' Construct a working robotic vehicle from many parts. Each part runs as a job in the Vehicle loop, calling either it's run or run_threaded method depending on the constructor flag `threaded`. All parts are updated one after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. Parts may have named outputs and inputs. The framework handles passing named outputs to parts requesting the same named input. ''' if cfg.DONKEY_GYM: #the simulator will use cuda and then we usually run out of resources #if we also try to use cuda. so disable for donkey_gym. os.environ["CUDA_VISIBLE_DEVICES"] = "-1" if model_type is None: if cfg.TRAIN_LOCALIZER: model_type = "localizer" elif cfg.TRAIN_BEHAVIORS: model_type = "behavior" else: model_type = cfg.DEFAULT_MODEL_TYPE #Initialize car V = dk.vehicle.Vehicle() print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE) if camera_type == "stereo": if cfg.CAMERA_TYPE == "WEBCAM": from donkeycar.parts.camera import Webcam camA = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=0) camB = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=1) elif cfg.CAMERA_TYPE == "CVCAM": from donkeycar.parts.cv import CvCam camA = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=0) camB = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=1) else: raise (Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE)) V.add(camA, outputs=['cam/image_array_a'], threaded=True) V.add(camB, outputs=['cam/image_array_b'], threaded=True) from donkeycar.parts.image import StereoPair V.add(StereoPair(), inputs=['cam/image_array_a', 'cam/image_array_b'], outputs=['cam/image_array']) elif cfg.CAMERA_TYPE == "D435": from donkeycar.parts.realsense435i import RealSense435i cam = RealSense435i(enable_rgb=cfg.REALSENSE_D435_RGB, enable_depth=cfg.REALSENSE_D435_DEPTH, enable_imu=cfg.REALSENSE_D435_IMU, device_id=cfg.REALSENSE_D435_ID) V.add(cam, inputs=[], outputs=[ 'cam/image_array', 'cam/depth_array', 'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z' ], threaded=True) #将图像转化为矩阵 else: if cfg.DONKEY_GYM: from donkeycar.parts.dgym import DonkeyGymEnv inputs = [] threaded = True if cfg.DONKEY_GYM: from donkeycar.parts.dgym import DonkeyGymEnv cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, host=cfg.SIM_HOST, env_name=cfg.DONKEY_GYM_ENV_NAME, conf=cfg.GYM_CONF, delay=cfg.SIM_ARTIFICIAL_LATENCY) threaded = True inputs = ['angle', 'throttle'] elif cfg.CAMERA_TYPE == "PICAM": from donkeycar.parts.camera import PiCamera cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, vflip=cfg.CAMERA_VFLIP, hflip=cfg.CAMERA_HFLIP) elif cfg.CAMERA_TYPE == "WEBCAM": from donkeycar.parts.camera import Webcam cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) elif cfg.CAMERA_TYPE == "CVCAM": from donkeycar.parts.cv import CvCam cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) elif cfg.CAMERA_TYPE == "CSIC": from donkeycar.parts.camera import CSICamera cam = CSICamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM) elif cfg.CAMERA_TYPE == "V4L": from donkeycar.parts.camera import V4LCamera cam = V4LCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE) elif cfg.CAMERA_TYPE == "MOCK": from donkeycar.parts.camera import MockCamera cam = MockCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) elif cfg.CAMERA_TYPE == "IMAGE_LIST": from donkeycar.parts.camera import ImageListCamera cam = ImageListCamera(path_mask=cfg.PATH_MASK) else: raise (Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE)) V.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=threaded) if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT: #modify max_throttle closer to 1.0 to have more power #modify steering_scale lower than 1.0 to have less responsive steering if cfg.CONTROLLER_TYPE == "MM1": from donkeycar.parts.robohat import RoboHATController ctr = RoboHATController(cfg) elif "custom" == cfg.CONTROLLER_TYPE: # # custom controller created with `donkey createjs` command # from my_joystick import MyJoystickController ctr = MyJoystickController( throttle_dir=cfg.JOYSTICK_THROTTLE_DIR, throttle_scale=cfg.JOYSTICK_MAX_THROTTLE, steering_scale=cfg.JOYSTICK_STEERING_SCALE, auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE) ctr.set_deadzone(cfg.JOYSTICK_DEADZONE) else: from donkeycar.parts.controller import get_js_controller ctr = get_js_controller(cfg) if cfg.USE_NETWORKED_JS: from donkeycar.parts.controller import JoyStickSub netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP) V.add(netwkJs, threaded=True) ctr.js = netwkJs V.add( ctr, inputs=['cam/image_array'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) else: #This web controller will create a web server that is capable #of managing steering, throttle, and modes, and more. ctr = LocalWebController(port=cfg.WEB_CONTROL_PORT, mode=cfg.WEB_INIT_MODE) V.add( ctr, inputs=['cam/image_array', 'tub/num_records'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) #this throttle filter will allow one tap back for esc reverse th_filter = ThrottleFilter() V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle']) #滤除油门小于的数据,关于倒车zpy #See if we should even run the pilot module. #This is only needed because the part run_condition only accepts boolean class PilotCondition: def run(self, mode): if mode == 'user': return False else: return True V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot']) class LedConditionLogic: def __init__(self, cfg): self.cfg = cfg def run(self, mode, recording, recording_alert, behavior_state, model_file_changed, track_loc): #returns a blink rate. 0 for off. -1 for on. positive for rate. if track_loc is not None: led.set_rgb(*self.cfg.LOC_COLORS[track_loc]) return -1 if model_file_changed: led.set_rgb(self.cfg.MODEL_RELOADED_LED_R, self.cfg.MODEL_RELOADED_LED_G, self.cfg.MODEL_RELOADED_LED_B) return 0.1 else: led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B) if recording_alert: led.set_rgb(*recording_alert) return self.cfg.REC_COUNT_ALERT_BLINK_RATE else: led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B) if behavior_state is not None and model_type == 'behavior': r, g, b = self.cfg.BEHAVIOR_LED_COLORS[behavior_state] led.set_rgb(r, g, b) return -1 #solid on if recording: return -1 #solid on elif mode == 'user': return 1 elif mode == 'local_angle': return 0.5 elif mode == 'local': return 0.1 return 0 if cfg.HAVE_RGB_LED and not cfg.DONKEY_GYM: from donkeycar.parts.led_status import RGB_LED led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT) led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B) V.add(LedConditionLogic(cfg), inputs=[ 'user/mode', 'recording', "records/alert", 'behavior/state', 'modelfile/modified', "pilot/loc" ], outputs=['led/blink_rate']) V.add(led, inputs=['led/blink_rate']) def get_record_alert_color(num_records): col = (0, 0, 0) for count, color in cfg.RECORD_ALERT_COLOR_ARR: if num_records >= count: col = color return col class RecordTracker: def __init__(self): self.last_num_rec_print = 0 self.dur_alert = 0 self.force_alert = 0 def run(self, num_records): if num_records is None: return 0 if self.last_num_rec_print != num_records or self.force_alert: self.last_num_rec_print = num_records if num_records % 10 == 0: print("recorded", num_records, "records") if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert: self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC self.force_alert = 0 if self.dur_alert > 0: self.dur_alert -= 1 if self.dur_alert != 0: return get_record_alert_color(num_records) return 0 rec_tracker_part = RecordTracker() V.add(rec_tracker_part, inputs=["tub/num_records"], outputs=['records/alert']) if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController): #then we are not using the circle button. hijack that to force a record count indication def show_record_acount_status(): rec_tracker_part.last_num_rec_print = 0 rec_tracker_part.force_alert = 1 ctr.set_button_down_trigger('circle', show_record_acount_status) #Sombrero if cfg.HAVE_SOMBRERO: from donkeycar.parts.sombrero import Sombrero s = Sombrero() #IMU if cfg.HAVE_IMU: from donkeycar.parts.imu import IMU imu = IMU(sensor=cfg.IMU_SENSOR, dlp_setting=cfg.IMU_DLP_CONFIG) V.add(imu, outputs=[ 'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z' ], threaded=True) class ImgPreProcess(): ''' preprocess camera image for inference. normalize and crop if needed. ''' def __init__(self, cfg): self.cfg = cfg def run(self, img_arr): return normalize_and_crop(img_arr, self.cfg) #目标检测部分zpy class Detect(): def __init__(self, cfg): self.switch = 0 self.cfg = cfg def run(self, img): #print('Adding part Detect') #test=cv2.imread('/home/pi/mycar/picture/test1.jpg',1) hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) lower_green = np.array([35, 100, 46]) upper_green = np.array([77, 255, 255]) mask = cv2.inRange(hsv, lower_green, upper_green) masked = cv2.bitwise_and(img, img, mask=mask) template1 = cv2.imread('/home/pi/mycar/picture/GandB.jpg', 1) template2 = cv2.imread('/home/pi/mycar/picture/green2.jpg', 1) temp_b = cv2.cvtColor(template1, cv2.COLOR_BGR2GRAY) #template=cv2.cvtColor(template,COLOR_BGR2GRAY) res1 = cv2.matchTemplate(img, template1, cv2.TM_CCOEFF_NORMED) min_val1, max_val1, min_loc1, max_loc1 = cv2.minMaxLoc(res1) res2 = cv2.matchTemplate(img, template2, cv2.TM_CCOEFF_NORMED) min_val2, max_val2, min_loc2, max_loc2 = cv2.minMaxLoc(res2) #进门1,出门0 if max_val1 > 0.65 and max_val2 < 0.65: self.switch = 1 print("\tmatch-1\t", max_val1, max_val2) return self.switch elif max_val1 < 0.65 and max_val2 > 0.65: self.switch = 0 print("\tmatch-2\t", max_val1, max_val2) return self.switch else: print(max_val1, '---', max_val2) return self.switch if "coral" in model_type: inf_input = 'cam/image_array' else: inf_input = 'cam/normalized/cropped' V.add(ImgPreProcess(cfg), inputs=['cam/image_array'], outputs=[inf_input], run_condition='run_pilot') #利用imgprocess对输入的图像数据进行处理zpy #在自动驾驶模式下加入detect部分 V.add(Detect(cfg), inputs=['cam/image_array'], outputs=['match'], run_condition='run_pilot') # Use the FPV preview, which will show the cropped image output, or the full frame. if cfg.USE_FPV: V.add(WebFpv(), inputs=['cam/image_array'], threaded=True) #Behavioral state if cfg.TRAIN_BEHAVIORS: bh = BehaviorPart(cfg.BEHAVIOR_LIST) V.add(bh, outputs=[ 'behavior/state', 'behavior/label', "behavior/one_hot_state_array" ]) try: ctr.set_button_down_trigger('L1', bh.increment_state) except: pass inputs = [inf_input, "behavior/one_hot_state_array"] #IMU elif model_type == "imu": assert (cfg.HAVE_IMU) #Run the pilot if the mode is not user. inputs = [ inf_input, 'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z' ] else: inputs = [inf_input] def load_model(kl, model_path): start = time.time() print('loading model', model_path) kl.load(model_path) print('finished loading in %s sec.' % (str(time.time() - start))) def load_weights(kl, weights_path): start = time.time() try: print('loading model weights', weights_path) kl.model.load_weights(weights_path) print('finished loading in %s sec.' % (str(time.time() - start))) except Exception as e: print(e) print('ERR>> problems loading weights', weights_path) def load_model_json(kl, json_fnm): start = time.time() print('loading model json', json_fnm) from tensorflow.python import keras try: with open(json_fnm, 'r') as handle: contents = handle.read() kl.model = keras.models.model_from_json(contents) print('finished loading json in %s sec.' % (str(time.time() - start))) except Exception as e: print(e) print("ERR>> problems loading model json", json_fnm) if model_path: #When we have a model, first create an appropriate Keras part kl = dk.utils.get_model_by_type(model_type, cfg) model_reload_cb = None if '.h5' in model_path or '.uff' in model_path or 'tflite' in model_path or '.pkl' in model_path: #when we have a .h5 extension #load everything from the model file load_model(kl, model_path) def reload_model(filename): load_model(kl, filename) model_reload_cb = reload_model elif '.json' in model_path: #when we have a .json extension #load the model from there and look for a matching #.wts file with just weights load_model_json(kl, model_path) weights_path = model_path.replace('.json', '.weights') load_weights(kl, weights_path) def reload_weights(filename): weights_path = filename.replace('.json', '.weights') load_weights(kl, weights_path) model_reload_cb = reload_weights else: print("ERR>> Unknown extension type on model file!!") return if model_path_1: #When we have a model, first create an appropriate Keras part kl_1 = dk.utils.get_model_by_type(model_type, cfg) model_reload_cb = None if '.h5' in model_path_1 or '.uff' in model_path_1 or 'tflite' in model_path_1 or '.pkl' in model_path_1: #when we have a .h5 extension #load everything from the model file load_model(kl_1, model_path_1) def reload_model(filename): load_model(kl, filename) model_reload_cb = reload_model elif '.json' in model_path_1: #when we have a .json extension #load the model from there and look for a matching #.wts file with just weights load_model_json(kl, model_path_1) weights_path = model_path_1.replace('.json', '.weights') load_weights(kl, weights_path) def reload_weights(filename): weights_path = filename.replace('.json', '.weights') load_weights(kl, weights_path) model_reload_cb = reload_weights else: print("ERR>> Unknown extension type on model file!!") return #this part will signal visual LED, if connected V.add(FileWatcher(model_path, verbose=True), outputs=['modelfile/modified']) #these parts will reload the model file, but only when ai is running so we don't interrupt user driving V.add(FileWatcher(model_path), outputs=['modelfile/dirty'], run_condition="ai_running") V.add(DelayedTrigger(100), inputs=['modelfile/dirty'], outputs=['modelfile/reload'], run_condition="ai_running") V.add(TriggeredCallback(model_path, model_reload_cb), inputs=["modelfile/reload"], run_condition="ai_running") #outputs=['pilot/angle', 'pilot/throttle'] outputs_0 = ['pilot/angle_0', 'pilot/throttle_0'] outputs_1 = ['pilot/angle_1', 'pilot/throttle_1'] if cfg.TRAIN_LOCALIZER: outputs.append("pilot/loc") #从模型中得到转向油门数据zpy V.add(kl, inputs=inputs, outputs=outputs_0, run_condition='run_pilot') V.add(kl_1, inputs=inputs, outputs=outputs_1, run_condition='run_pilot') if cfg.STOP_SIGN_DETECTOR: from donkeycar.parts.object_detector.stop_sign_detector import StopSignDetector V.add(StopSignDetector(cfg.STOP_SIGN_MIN_SCORE, cfg.STOP_SIGN_SHOW_BOUNDING_BOX), inputs=['cam/image_array', 'pilot/throttle'], outputs=['pilot/throttle', 'cam/image_array']) #zpy更改模式 class MyPilot: def __init__(self): self.angle = 0 self.throttle = 0 self.angle_0 = 0 self.angle_1 = 0 self.throttle_0 = 0 self.throttle_1 = 0 self.mode = 0 self.state = [False, False, False, False] self.match = 0 def run_threaded(self, match, a0, t0, a1, t1): self.angle_0 = a0 self.angle_1 = a1 self.throttle_0 = t0 self.throttle_1 = t1 #self.mode = mode self.match = match return self.angle, self.throttle def update(self): while True: self.angle, self.throttle = self.run(self.match, self.angle_0, self.angle_1, self.throttle_0, self.throttle_1) #print(self.angle) def run(self, match, angle_0, throttle_0, angle_1, throttle_1): #print(match,"\tm0",angle_0,throttle_0,"\tm1",angle_1,throttle_1) if match == 0: #print("mode:",pilot_mode," angle:",angle_0," throttle:",throttle_0) return angle_0, throttle_0 elif match == 1: #print("mode:",pilot_mode," angle:",angle_1," throttle:",throttle_1) return angle_1, throttle_1 else: return 0, 0 V.add(MyPilot(), inputs=[ 'match', 'pilot/angle_0', 'pilot/throttle_0', 'pilot/angle_1', 'pilot/throttle_1' ], outputs=['mypilot/angle', 'mypilot/throttle'], run_condition='run_pilot') #Choose what inputs should change the car. class DriveMode: def run(self, mode, user_angle, user_throttle, pilot_angle, pilot_throttle): if mode == 'user': return user_angle, user_throttle elif mode == 'local_angle': return pilot_angle if pilot_angle else 0.0, user_throttle else: return pilot_angle if pilot_angle else 0.0, pilot_throttle * cfg.AI_THROTTLE_MULT if pilot_throttle else 0.0 V.add(DriveMode(), inputs=[ 'user/mode', 'user/angle', 'user/throttle', 'mypilot/angle', 'mypilot/throttle' ], outputs=['angle', 'throttle']) #to give the car a boost when starting ai mode in a race. aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE, cfg.AI_LAUNCH_KEEP_ENABLED) V.add(aiLauncher, inputs=['user/mode', 'throttle'], outputs=['throttle']) if isinstance(ctr, JoystickController): ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON, aiLauncher.enable_ai_launch) class AiRunCondition: ''' A bool part to let us know when ai is running. ''' def run(self, mode): if mode == "user": return False return True V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running']) #Ai Recording class AiRecordingCondition: ''' return True when ai mode, otherwize respect user mode recording flag ''' def run(self, mode, recording): if mode == 'user': return recording return True if cfg.RECORD_DURING_AI: V.add(AiRecordingCondition(), inputs=['user/mode', 'recording'], outputs=['recording']) #Drive train setup if cfg.DONKEY_GYM or cfg.DRIVE_TRAIN_TYPE == "MOCK": pass elif cfg.DRIVE_TRAIN_TYPE == "SERVO_ESC": from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) throttle = PWMThrottle(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM) V.add(steering, inputs=['angle'], threaded=True) V.add(throttle, inputs=['throttle'], threaded=True) elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE": from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT) throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL": from donkeycar.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD) right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD) two_wheel_control = TwoWheelSteeringThrottle() V.add(two_wheel_control, inputs=['throttle', 'angle'], outputs=['left_motor_speed', 'right_motor_speed']) V.add(left_motor, inputs=['left_motor_speed']) V.add(right_motor, inputs=['right_motor_speed']) elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM": from donkeycar.parts.actuator import ServoBlaster, PWMSteering steering_controller = ServoBlaster(cfg.STEERING_CHANNEL) #really pin #PWM pulse values should be in the range of 100 to 200 assert (cfg.STEERING_LEFT_PWM <= 200) assert (cfg.STEERING_RIGHT_PWM <= 200) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD) V.add(steering, inputs=['angle'], threaded=True) V.add(motor, inputs=["throttle"]) elif cfg.DRIVE_TRAIN_TYPE == "MM1": from donkeycar.parts.robohat import RoboHATDriver V.add(RoboHATDriver(cfg), inputs=['angle', 'throttle']) elif cfg.DRIVE_TRAIN_TYPE == "PIGPIO_PWM": from donkeycar.parts.actuator import PWMSteering, PWMThrottle, PiGPIO_PWM steering_controller = PiGPIO_PWM(cfg.STEERING_PWM_PIN, freq=cfg.STEERING_PWM_FREQ, inverted=cfg.STEERING_PWM_INVERTED) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) throttle_controller = PiGPIO_PWM(cfg.THROTTLE_PWM_PIN, freq=cfg.THROTTLE_PWM_FREQ, inverted=cfg.THROTTLE_PWM_INVERTED) throttle = PWMThrottle(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM) V.add(steering, inputs=['angle'], threaded=True) V.add(throttle, inputs=['throttle'], threaded=True) # OLED setup if cfg.USE_SSD1306_128_32: from donkeycar.parts.oled import OLEDPart auto_record_on_throttle = cfg.USE_JOYSTICK_AS_DEFAULT and cfg.AUTO_RECORD_ON_THROTTLE oled_part = OLEDPart(cfg.SSD1306_128_32_I2C_BUSNUM, auto_record_on_throttle=auto_record_on_throttle) V.add(oled_part, inputs=['recording', 'tub/num_records', 'user/mode'], outputs=[], threaded=True) #add tub to save data inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode'] types = ['image_array', 'float', 'float', 'str'] if cfg.TRAIN_BEHAVIORS: inputs += [ 'behavior/state', 'behavior/label', "behavior/one_hot_state_array" ] types += ['int', 'str', 'vector'] if cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_DEPTH: inputs += ['cam/depth_array'] types += ['gray16_array'] if cfg.HAVE_IMU or (cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_IMU): inputs += [ 'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z' ] types += ['float', 'float', 'float', 'float', 'float', 'float'] if cfg.RECORD_DURING_AI: inputs += ['pilot/angle', 'pilot/throttle'] types += ['float', 'float'] th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta) V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording') if cfg.PUB_CAMERA_IMAGES: from donkeycar.parts.network import TCPServeValue from donkeycar.parts.image import ImgArrToJpg pub = TCPServeValue("camera") V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin']) V.add(pub, inputs=['jpg/bin']) if type(ctr) is LocalWebController: if cfg.DONKEY_GYM: print("You can now go to http://localhost:%d to drive your car." % cfg.WEB_CONTROL_PORT) else: print( "You can now go to <your hostname.local>:%d to drive your car." % cfg.WEB_CONTROL_PORT) elif isinstance(ctr, JoystickController): print("You can now move your joystick to drive your car.") #tell the controller about the tub ctr.set_tub(tub) if cfg.BUTTON_PRESS_NEW_TUB: def new_tub_dir(): V.parts.pop() tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta) V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording') ctr.set_tub(tub) ctr.set_button_down_trigger('cross', new_tub_dir) ctr.print_controls() #run the vehicle for 20 seconds V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
def drive(cfg, model_path=None, model_type=None): ''' Construct a working robotic vehicle from many parts. Each part runs as a job in the Vehicle loop, calling either it's run or run_threaded method depending on the constructor flag `threaded`. All parts are updated one after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. Parts may have named outputs and inputs. The framework handles passing named outputs to parts requesting the same named input. ''' if model_type is None: model_type = cfg.DEFAULT_MODEL_TYPE #Initialize car V = dk.vehicle.Vehicle() cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) V.add(cam, outputs=['cam/image_array'], threaded=True) V.add(LocalWebController(), inputs=['cam/image_array'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) #See if we should even run the pilot module. #This is only needed because the part run_condition only accepts boolean class PilotCondition: def run(self, mode): if mode == 'user': return False else: return True V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot']) #Sombrero if cfg.HAVE_SOMBRERO: from donkeycar.parts.sombrero import Sombrero s = Sombrero() inputs = ['cam/image_array'] def load_model(kl, model_path): start = time.time() try: print('loading model', model_path) kl.load(model_path) print('finished loading in %s sec.' % (str(time.time() - start))) except Exception as e: print(e) print('ERR>> problems loading model', model_path) def load_weights(kl, weights_path): start = time.time() try: print('loading model weights', weights_path) kl.model.load_weights(weights_path) print('finished loading in %s sec.' % (str(time.time() - start))) except Exception as e: print(e) print('ERR>> problems loading weights', weights_path) def load_model_json(kl, json_fnm): start = time.time() print('loading model json', json_fnm) import keras try: with open(json_fnm, 'r') as handle: contents = handle.read() kl.model = keras.models.model_from_json(contents) print('finished loading json in %s sec.' % (str(time.time() - start))) except Exception as e: print(e) print("ERR>> problems loading model json", json_fnm) if model_path: #When we have a model, first create an appropriate Keras part kl = dk.utils.get_model_by_type(model_type, cfg) if '.h5' in model_path: #when we have a .h5 extension #load everything from the model file load_model(kl, model_path) elif '.json' in model_path: #when we have a .json extension #load the model from their and look for a matching #.wts file with just weights load_model_json(kl, model_path) weights_path = model_path.replace('.json', '.weights') load_weights(kl, weights_path) outputs = ['pilot/angle', 'pilot/throttle'] V.add(kl, inputs=inputs, outputs=outputs, run_condition='run_pilot') #Choose what inputs should change the car. class DriveMode: def run(self, mode, user_angle, user_throttle, pilot_angle, pilot_throttle): if mode == 'user': return user_angle, user_throttle elif mode == 'local_angle': return pilot_angle, user_throttle else: return pilot_angle, pilot_throttle * cfg.AI_THROTTLE_MULT V.add(DriveMode(), inputs=[ 'user/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle' ], outputs=['angle', 'throttle']) #Drive train setup from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) throttle = PWMThrottle(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) #add tub to save data inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode'] types = ['image_array', 'float', 'float', 'str'] th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types) V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording') print("You can now go to <your pi ip address>:8887 to drive your car.") #run the vehicle for 20 seconds V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
def test_rs2(cfg): """ 独自パーツを使用。 """ print('Use original T265 part class') V = dk.vehicle.Vehicle() V.mem['enc_vel_ms'] = 0.0 try: from donkeypart_t265 import FullDataReader except: raise V.add(FullDataReader(image_output=True, debug=False), outputs=[ 'pos_x', 'pos_y', 'pos_z', 'vel_x', 'vel_y', 'vel_z', 'gyr_x', 'gyr_y', 'gyr_z', 'acc_x', 'acc_y', 'acc_z', 'gyr_ax', 'gyr_ay', 'gyr_az', 'rot_i', 'rot_j', 'rot_k', 'rot_l', 'posemap_conf', 'pose_conf', 'roll', 'pitch', 'yaw', 'left_image_array', 'right_image_array' ], threaded=True) # image shape (800, 848) class ImageNoneCheck(object): def __init__(self, cfg): self.cfg = cfg def run(self, image_array): if image_array is None: #print('[RS] image_array is None') return np.zeros( (self.cfg.IMAGE_H, self.cfg.IMAGE_W, self.cfg.IMAGE_DEPTH), dtype=np.uint8) else: #print('[RS] image.shape:{}'.format(str(image_array.shape))) return image_array def shutdown(self): pass img_chk = ImageNoneCheck(cfg) V.add(img_chk, inputs=['left_image_array'], outputs=['left_image_array']) V.add(img_chk, inputs=['right_image_array'], outputs=['right_image_array']) class PrintT265: def __init__(self): print( 'pos_x,pos_y,pos_z,vel_x,vel_y,vel_z,gyr_x,gyr_y,gyr_z,acc_x,acc_y,acc_z,gyr_ax,gyr_ay,gyr_az,rot_i,rot_j,rot_k,rot_l,posemap_conf,pose_conf,roll,pitch,yaw' ) def run(self, pos_x, pos_y, pos_z, vel_x, vel_y, vel_z, gyr_x, gyr_y, gyr_z, acc_x, acc_y, acc_z, gyr_ax, gyr_ay, gyr_az, rot_i, rot_j, rot_k, rot_l, posemap_conf, pose_conf, roll, pitch, yaw): print( '%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%d,%d,%f,%f,%f' % (pos_x, pos_y, pos_z, vel_x, vel_y, vel_z, gyr_x, gyr_y, gyr_z, acc_x, acc_y, acc_z, gyr_ax, gyr_ay, gyr_az, rot_i, rot_j, rot_k, rot_l, posemap_conf, pose_conf, roll, pitch, yaw)) def shutdown(self): pass V.add(PrintT265(), inputs=[ 'pos_x', 'pos_y', 'pos_z', 'vel_x', 'vel_y', 'vel_z', 'gyr_x', 'gyr_y', 'gyr_z', 'acc_x', 'acc_y', 'acc_z', 'gyr_ax', 'gyr_ay', 'gyr_az', 'rot_i', 'rot_j', 'rot_k', 'rot_l', 'posemap_conf', 'pose_conf', 'roll', 'pitch', 'yaw' ]) inputs = [ 'left_image_array', 'pos_x', 'pos_y', 'pos_z', 'vel_x', 'vel_y', 'vel_z', 'acc_x', 'acc_y', 'acc_z', 'gyr_x', 'gyr_y', 'gyr_z', 'roll', 'pitch', 'yaw', ] types = [ 'image_array', 'float', 'float', 'float', 'float', 'float', 'float', 'float', 'float', 'float', 'float', 'float', 'float', 'float', 'float', 'float', ] from donkeycar.parts.datastore import TubHandler th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=[]) V.add(tub, inputs=inputs, outputs=["tub/num_records"]) try: V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.DRIVE_LOOP_HZ * 60) # 1min ## max_loop_count=cfg.MAX_LOOPS) # infinity except KeyboardInterrupt: print('exit') finally: print('done')
def drive(cfg, model_path=None, use_joystick=False): ''' Construct a working robotic vehicle from many parts. Each part runs as a job in the Vehicle loop, calling either it's run or run_threaded method depending on the constructor flag `threaded`. All parts are updated one after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. Parts may have named outputs and inputs. The framework handles passing named outputs to parts requesting the same named input. ''' #Initialize car V = dk.vehicle.Vehicle() cam = PiCamera(resolution=cfg.CAMERA_RESOLUTION) V.add(cam, outputs=['cam/image_array'], threaded=True) if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT: #modify max_throttle closer to 1.0 to have more power #modify steering_scale lower than 1.0 to have less responsive steering ctr = JoystickController( max_throttle=cfg.JOYSTICK_MAX_THROTTLE, steering_scale=cfg.JOYSTICK_STEERING_SCALE, auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE) else: #This web controller will create a web server that is capable #of managing steering, throttle, and modes, and more. ctr = LocalWebController() V.add(ctr, inputs=['cam/image_array'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) #See if we should even run the pilot module. #This is only needed because the part run_condition only accepts boolean def pilot_condition(mode): if mode == 'user': return False else: return True pilot_condition_part = Lambda(pilot_condition) V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot']) #Run the pilot if the mode is not user. kl = KerasCategorical() if model_path: kl.load(model_path) V.add(kl, inputs=['cam/image_array'], outputs=['pilot/angle', 'pilot/throttle'], run_condition='run_pilot') #Choose what inputs should change the car. def drive_mode(mode, user_angle, user_throttle, pilot_angle, pilot_throttle): if mode == 'user': return user_angle, user_throttle elif mode == 'local_angle': return pilot_angle, user_throttle else: return pilot_angle, pilot_throttle drive_mode_part = Lambda(drive_mode) V.add(drive_mode_part, inputs=[ 'user/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle' ], outputs=['angle', 'throttle']) steering_controller = PCA9685(cfg.STEERING_CHANNEL) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL) throttle = PWMThrottle(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) #add tub to save data inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode'] types = ['image_array', 'float', 'float', 'str'] th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types) V.add(tub, inputs=inputs, run_condition='recording') #setup AWS IoT if cfg.IOT_ENABLED: aws = AWSHandler(vehicle_id=cfg.VEHICLE_ID, endpoint=cfg.AWS_ENDPOINT, ca=cfg.CA_PATH, private_key=cfg.PRIVATE_KEY_PATH, certificate=cfg.CERTIFICATE_PATH) iot = aws.new_iot_publisher(intputs=inputs, types=types) V.add(iot, inputs=inputs, run_condition='recording') #run the vehicle V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS) print("You can now go to <your pi ip address>:8887 to drive your car.")
def test_rs(cfg): """ Donekycar v3.1.1 上のパーツクラスを使用。 """ V = dk.vehicle.Vehicle() from donkeycar.parts.realsense2 import RS_T265 rs = RS_T265(image_output=True) V.add(rs, outputs=['pos', 'vel', 'acc', 'image_array'], threaded=False) class Split(object): def run(self, pos, vel, acc): return pos.x, pos.y, pos.z, vel.x, vel.y, vel.z, acc.x, acc.y, acc.z def shutdown(self): pass V.add(Split(), inputs=['pos', 'vel', 'acc'], outputs=[ 'pos_x', 'pos_y', 'pos_z', 'vel_x', 'vel_y', 'vel_z', 'acc_x', 'acc_y', 'acc_z' ]) # image shape (800, 848) class ImageNoneCheck(object): def __init__(self, cfg): self.cfg = cfg def run(self, image_array): if image_array is None: #print('[RS] image_array is None') return np.zeros( (self.cfg.IMAGE_H, self.cfg.IMAGE_W, self.cfg.IMAGE_DEPTH), dtype=np.uint8) else: #print('[RS] image.shape:{}'.format(str(image_array.shape))) return image_array def shutdown(self): pass V.add(ImageNoneCheck(cfg), inputs=['image_array'], outputs=['image_array']) class PrintRS(object): def run(self, image_array, pos_x, pos_y, pos_z, vel_x, vel_y, vel_z, acc_x, acc_y, acc_z): #print((image_array is None)) #print(type(image_array)) #print(image_array.shape) #print('[RS] pos:({:.5g}, {:.5g}, {:.5g}) vel:({:.5g}, {:.5g}, {:.5g}) acc:({:.5g}, {:.5g}, {:.5g})'.format( print( '{:.5g},{:.5g},{:.5g},{:.5g},{:.5g},{:.5g},{:.5g},{:.5g}, {:.5g}' .format(pos_x, pos_y, pos_z, vel_x, vel_y, vel_z, acc_x, acc_y, acc_z)) def shutdown(self): pass V.add(PrintRS(), inputs=[ 'image_array', 'pos_x', 'pos_y', 'pos_z', 'vel_x', 'vel_y', 'vel_z', 'acc_x', 'acc_y', 'acc_z' ]) inputs = [ 'image_array', 'pos_x', 'pos_y', 'pos_z', 'vel_x', 'vel_y', 'vel_z', 'acc_x', 'acc_y', 'acc_z', ] types = [ 'image_array', 'float', 'float', 'float', 'float', 'float', 'float', 'float', 'float', 'float', ] from donkeycar.parts.datastore import TubHandler th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=[]) V.add(tub, inputs=inputs, outputs=["tub/num_records"]) try: V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.DRIVE_LOOP_HZ * 60) # 1min ## max_loop_count=cfg.MAX_LOOPS) # infinity except KeyboardInterrupt: print('exit') finally: print('done')
def train_drive_reinforcement(cfg, args, script_mode): ''' Construct a working robotic vehicle from many parts. Each part runs as a job in the Vehicle loop, calling either it's run or run_threaded method depending on the constructor flag `threaded`. All parts are updated one after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. Parts may have named outputs and inputs. The framework handles passing named outputs to parts requesting the same named input. ''' model_path = args['--model'] use_joystick = args['--js'] meta = args['--meta'] tub_in = args['--tub'] model_type = args['--type'] camera_type = args['--camera'] vae_path = args['--vae'] auto_mode = 0 if args['--auto']: auto_mode = 1 env_type = 'simulator' if args['--env']: env_type = args['--env'] print('VAE_PATH: %s' % vae_path) global r global ctr global cam if cfg.DONKEY_GYM: #the simulator will use cuda and then we usually run out of resources #if we also try to use cuda. so disable for donkey_gym. os.environ["CUDA_VISIBLE_DEVICES"] = "-1" if model_type is None: if cfg.TRAIN_LOCALIZER: model_type = "localizer" elif cfg.TRAIN_BEHAVIORS: model_type = "behavior" else: model_type = cfg.DEFAULT_MODEL_TYPE #Initialize car V = dk.vehicle.Vehicle() print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE) if camera_type == "stereo": if cfg.CAMERA_TYPE == "WEBCAM": from donkeycar.parts.camera import Webcam camA = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=0) camB = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=1) elif cfg.CAMERA_TYPE == "CVCAM": from donkeycar.parts.cv import CvCam camA = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=0) camB = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=1) else: raise (Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE)) V.add(camA, outputs=['cam/image_array_a'], threaded=True) V.add(camB, outputs=['cam/image_array_b'], threaded=True) from donkeycar.parts.image import StereoPair V.add(StereoPair(), inputs=['cam/image_array_a', 'cam/image_array_b'], outputs=['cam/image_array']) elif cfg.CAMERA_TYPE == "D435": from donkeycar.parts.realsense435i import RealSense435i cam = RealSense435i(enable_rgb=cfg.REALSENSE_D435_RGB, enable_depth=cfg.REALSENSE_D435_DEPTH, enable_imu=cfg.REALSENSE_D435_IMU, device_id=cfg.REALSENSE_D435_ID) V.add(cam, inputs=[], outputs=[ 'cam/image_array', 'cam/depth_array', 'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z' ], threaded=True) else: if False and cfg.DONKEY_GYM: from donkeycar.parts.dgym import DonkeyGymEnv inputs = [] threaded = True if cfg.DONKEY_GYM: from donkeycar.parts.dgym import DonkeyGymEnv cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, host=cfg.SIM_HOST, env_name=cfg.DONKEY_GYM_ENV_NAME, conf=cfg.GYM_CONF, delay=cfg.SIM_ARTIFICIAL_LATENCY) threaded = True inputs = ['angle', 'throttle'] elif cfg.CAMERA_TYPE == "PICAM": from donkeycar.parts.camera import PiCamera cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, vflip=cfg.CAMERA_VFLIP, hflip=cfg.CAMERA_HFLIP) elif cfg.CAMERA_TYPE == "WEBCAM": from donkeycar.parts.camera import Webcam cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) elif cfg.CAMERA_TYPE == "CVCAM": from donkeycar.parts.cv import CvCam cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) elif cfg.CAMERA_TYPE == "CSIC": from donkeycar.parts.camera import CSICamera cam = CSICamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM) elif cfg.CAMERA_TYPE == "V4L": from donkeycar.parts.camera import V4LCamera cam = V4LCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE) elif cfg.CAMERA_TYPE == "MOCK": from donkeycar.parts.camera import MockCamera cam = MockCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) elif cfg.CAMERA_TYPE == "IMAGE_LIST": from donkeycar.parts.camera import ImageListCamera cam = ImageListCamera(path_mask=cfg.PATH_MASK) else: raise (Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE)) V.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=threaded) if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT: #modify max_throttle closer to 1.0 to have more power #modify steering_scale lower than 1.0 to have less responsive steering if cfg.CONTROLLER_TYPE == "MM1": from donkeycar.parts.robohat import RoboHATController ctr = RoboHATController(cfg) elif "custom" == cfg.CONTROLLER_TYPE: # # custom controller created with `donkey createjs` command # from my_joystick import MyJoystickController ctr = MyJoystickController( throttle_dir=cfg.JOYSTICK_THROTTLE_DIR, throttle_scale=cfg.JOYSTICK_MAX_THROTTLE, steering_scale=cfg.JOYSTICK_STEERING_SCALE, auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE) ctr.set_deadzone(cfg.JOYSTICK_DEADZONE) else: from donkeycar.parts.controller import get_js_controller ctr = get_js_controller(cfg) if cfg.USE_NETWORKED_JS: from donkeycar.parts.controller import JoyStickSub netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP) V.add(netwkJs, threaded=True) ctr.js = netwkJs V.add( ctr, inputs=['cam/image_array'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) else: #This web controller will create a web server that is capable #of managing steering, throttle, and modes, and more. ctr = LocalWebController(port=cfg.WEB_CONTROL_PORT, mode=cfg.WEB_INIT_MODE) V.add( ctr, inputs=['cam/image_array', 'tub/num_records'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) #this throttle filter will allow one tap back for esc reverse th_filter = ThrottleFilter() V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle']) #See if we should even run the pilot module. #This is only needed because the part run_condition only accepts boolean class PilotCondition: def run(self, mode): if mode == 'user': return False else: return True V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot']) class LedConditionLogic: def __init__(self, cfg): self.cfg = cfg def run(self, mode, recording, recording_alert, behavior_state, model_file_changed, track_loc): #returns a blink rate. 0 for off. -1 for on. positive for rate. if track_loc is not None: led.set_rgb(*self.cfg.LOC_COLORS[track_loc]) return -1 if model_file_changed: led.set_rgb(self.cfg.MODEL_RELOADED_LED_R, self.cfg.MODEL_RELOADED_LED_G, self.cfg.MODEL_RELOADED_LED_B) return 0.1 else: led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B) if recording_alert: led.set_rgb(*recording_alert) return self.cfg.REC_COUNT_ALERT_BLINK_RATE else: led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B) if behavior_state is not None and model_type == 'behavior': r, g, b = self.cfg.BEHAVIOR_LED_COLORS[behavior_state] led.set_rgb(r, g, b) return -1 #solid on if recording: return -1 #solid on elif mode == 'user': return 1 elif mode == 'local_angle': return 0.5 elif mode == 'local': return 0.1 return 0 if cfg.HAVE_RGB_LED and not cfg.DONKEY_GYM: from donkeycar.parts.led_status import RGB_LED led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT) led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B) V.add(LedConditionLogic(cfg), inputs=[ 'user/mode', 'recording', "records/alert", 'behavior/state', 'modelfile/modified', "pilot/loc" ], outputs=['led/blink_rate']) V.add(led, inputs=['led/blink_rate']) def get_record_alert_color(num_records): col = (0, 0, 0) for count, color in cfg.RECORD_ALERT_COLOR_ARR: if num_records >= count: col = color return col class RecordTracker: def __init__(self): self.last_num_rec_print = 0 self.dur_alert = 0 self.force_alert = 0 def run(self, num_records): if num_records is None: return 0 if self.last_num_rec_print != num_records or self.force_alert: self.last_num_rec_print = num_records if num_records % 10 == 0: print("recorded", num_records, "records") if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert: self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC self.force_alert = 0 if self.dur_alert > 0: self.dur_alert -= 1 if self.dur_alert != 0: return get_record_alert_color(num_records) return 0 rec_tracker_part = RecordTracker() V.add(rec_tracker_part, inputs=["tub/num_records"], outputs=['records/alert']) if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController): #then we are not using the circle button. hijack that to force a record count indication def show_record_acount_status(): rec_tracker_part.last_num_rec_print = 0 rec_tracker_part.force_alert = 1 ctr.set_button_down_trigger('circle', show_record_acount_status) #Sombrero if cfg.HAVE_SOMBRERO: from donkeycar.parts.sombrero import Sombrero s = Sombrero() #IMU if cfg.HAVE_IMU: from donkeycar.parts.imu import IMU imu = IMU(sensor=cfg.IMU_SENSOR, dlp_setting=cfg.IMU_DLP_CONFIG) V.add(imu, outputs=[ 'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z' ], threaded=True) class ImgPreProcess(): ''' preprocess camera image for inference. normalize and crop if needed. ''' def __init__(self, cfg): self.cfg = cfg def run(self, img_arr): return normalize_and_crop(img_arr, self.cfg) if "coral" in model_type: inf_input = 'cam/image_array' else: inf_input = 'cam/normalized/cropped' V.add(ImgPreProcess(cfg), inputs=['cam/image_array'], outputs=[inf_input], run_condition='run_pilot') # Use the FPV preview, which will show the cropped image output, or the full frame. if False and cfg.USE_FPV: V.add(WebFpv(), inputs=['cam/image_array'], threaded=True) #Behavioral state if cfg.TRAIN_BEHAVIORS: bh = BehaviorPart(cfg.BEHAVIOR_LIST) V.add(bh, outputs=[ 'behavior/state', 'behavior/label', "behavior/one_hot_state_array" ]) try: ctr.set_button_down_trigger('L1', bh.increment_state) except: pass inputs = [inf_input, "behavior/one_hot_state_array"] #IMU elif model_type == "imu": assert (cfg.HAVE_IMU) #Run the pilot if the mode is not user. inputs = [ inf_input, 'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z' ] else: inputs = [inf_input] def load_model(kl, model_path): start = time.time() print('loading model', model_path) kl.load(model_path) print('finished loading in %s sec.' % (str(time.time() - start))) #Choose what inputs should change the car. class DriveMode: def run(self, mode, user_angle, user_throttle, pilot_angle, pilot_throttle): if mode == 'user': return user_angle, user_throttle elif mode == 'local_angle': return pilot_angle, user_throttle else: #return pilot_angle, pilot_throttle * cfg.AI_THROTTLE_MULT return user_angle, user_throttle V.add(DriveMode(), inputs=[ 'user/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle' ], outputs=['angle', 'throttle']) #to give the car a boost when starting ai mode in a race. aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE, cfg.AI_LAUNCH_KEEP_ENABLED) V.add(aiLauncher, inputs=['user/mode', 'throttle'], outputs=['throttle']) if isinstance(ctr, JoystickController): ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON, aiLauncher.enable_ai_launch) class AiRunCondition: ''' A bool part to let us know when ai is running. ''' def run(self, mode): if mode == "user": return False return True V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running']) #Ai Recording class AiRecordingCondition: ''' return True when ai mode, otherwize respect user mode recording flag ''' def run(self, mode, recording): if mode == 'user': return recording return True if cfg.RECORD_DURING_AI: V.add(AiRecordingCondition(), inputs=['user/mode', 'recording'], outputs=['recording']) #Drive train setup if cfg.DONKEY_GYM or cfg.DRIVE_TRAIN_TYPE == "MOCK": pass elif cfg.DRIVE_TRAIN_TYPE == "SERVO_ESC": from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) throttle = PWMThrottle(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM) V.add(steering, inputs=['angle'], threaded=True) V.add(throttle, inputs=['throttle'], threaded=True) elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE": from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT) throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL": from donkeycar.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD) right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD) two_wheel_control = TwoWheelSteeringThrottle() V.add(two_wheel_control, inputs=['throttle', 'angle'], outputs=['left_motor_speed', 'right_motor_speed']) V.add(left_motor, inputs=['left_motor_speed']) V.add(right_motor, inputs=['right_motor_speed']) elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM": from donkeycar.parts.actuator import ServoBlaster, PWMSteering steering_controller = ServoBlaster(cfg.STEERING_CHANNEL) #really pin #PWM pulse values should be in the range of 100 to 200 assert (cfg.STEERING_LEFT_PWM <= 200) assert (cfg.STEERING_RIGHT_PWM <= 200) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD) V.add(steering, inputs=['angle'], threaded=True) V.add(motor, inputs=["throttle"]) elif cfg.DRIVE_TRAIN_TYPE == "MM1": from donkeycar.parts.robohat import RoboHATDriver V.add(RoboHATDriver(cfg), inputs=['angle', 'throttle']) elif cfg.DRIVE_TRAIN_TYPE == "PIGPIO_PWM": from donkeycar.parts.actuator import PWMSteering, PWMThrottle, PiGPIO_PWM steering_controller = PiGPIO_PWM(cfg.STEERING_PWM_PIN, freq=cfg.STEERING_PWM_FREQ, inverted=cfg.STEERING_PWM_INVERTED) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) throttle_controller = PiGPIO_PWM(cfg.THROTTLE_PWM_PIN, freq=cfg.THROTTLE_PWM_FREQ, inverted=cfg.THROTTLE_PWM_INVERTED) throttle = PWMThrottle(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM) V.add(steering, inputs=['angle'], threaded=True) V.add(throttle, inputs=['throttle'], threaded=True) # OLED setup if cfg.USE_SSD1306_128_32: from donkeycar.parts.oled import OLEDPart auto_record_on_throttle = cfg.USE_JOYSTICK_AS_DEFAULT and cfg.AUTO_RECORD_ON_THROTTLE oled_part = OLEDPart(cfg.SSD1306_128_32_I2C_BUSNUM, auto_record_on_throttle=auto_record_on_throttle) V.add(oled_part, inputs=['recording', 'tub/num_records', 'user/mode'], outputs=[], threaded=True) #add tub to save data inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode'] types = ['image_array', 'float', 'float', 'str'] if cfg.TRAIN_BEHAVIORS: inputs += [ 'behavior/state', 'behavior/label', "behavior/one_hot_state_array" ] types += ['int', 'str', 'vector'] if cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_DEPTH: inputs += ['cam/depth_array'] types += ['gray16_array'] if cfg.HAVE_IMU or (cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_IMU): inputs += [ 'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z' ] types += ['float', 'float', 'float', 'float', 'float', 'float'] if cfg.RECORD_DURING_AI: inputs += ['pilot/angle', 'pilot/throttle'] types += ['float', 'float'] th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta) V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording') if cfg.PUB_CAMERA_IMAGES: from donkeycar.parts.network import TCPServeValue from donkeycar.parts.image import ImgArrToJpg pub = TCPServeValue("camera") V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin']) V.add(pub, inputs=['jpg/bin']) if type(ctr) is LocalWebController: if cfg.DONKEY_GYM: print("You can now go to http://localhost:%d to drive your car." % cfg.WEB_CONTROL_PORT) else: print( "You can now go to <your hostname.local>:%d to drive your car." % cfg.WEB_CONTROL_PORT) elif isinstance(ctr, JoystickController): print("You can now move your joystick to drive your car.") #tell the controller about the tub ctr.set_tub(tub) if cfg.BUTTON_PRESS_NEW_TUB: def new_tub_dir(): V.parts.pop() tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta) V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording') ctr.set_tub(tub) ctr.set_button_down_trigger('cross', new_tub_dir) ctr.print_controls() if script_mode == 'train': _thread.start_new_thread( observe_and_learn, (cfg, model_path, vae_path, auto_mode, env_type)) _thread.start_new_thread(save_reconstruction, (cfg, )) elif script_mode == 'drive': _thread.start_new_thread(drive_model, (cfg, model_path, vae_path, env_type)) elif script_mode == 'optimize': _thread.start_new_thread( optimize_model, (cfg, model_path, vae_path, auto_mode, env_type)) elif script_mode == 'train_vae': print('collecting data for vae training...') #run the vehicle for 20 seconds V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
def drive(cfg, model_path=None, meta=[]): model_type = cfg.DEFAULT_MODEL_TYPE #Initialize car V = dk.vehicle.Vehicle() if cfg.RS_PATH_PLANNER: from donkeycar.parts.realsense2 import RS_T265 print("RS t265 with path tracking") odometry = RS_T265(path_output=True, stereo_output=False, image_output=False) V.add(odometry, outputs=['rs/trans', 'rs/yaw'], threaded=True) V.add(PosStream(), inputs=['rs/trans', 'rs/yaw'], outputs=['pos/x', 'pos/y', 'pos/yaw']) else: raise Exception("This script is for RS_PATH_PLANNER only") from donkeycar.parts.realsense2 import RS_D435i cam = RS_D435i(img_type=cfg.D435_IMG_TYPE, image_w=cfg.D435_IMAGE_W, image_h=cfg.D435_IMAGE_H, frame_rate=cfg.D435_FRAME_RATE) V.add(cam, inputs=[], outputs=['cam/image_array'], threaded=True) # class Distance: # def run(self, img): # h,w = img.shape # arr = img[50:w-50,0:h-60].flatten() # mean = np.mean(arr) # print(mean) # V.add(Distance(), # inputs=['cam/image_array'], outputs=['null']) ctr = get_js_controller(cfg) V.add(ctr, inputs=['null'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) # class debug: # def run(self, angle): # print(angle) # V.add(debug(), inputs=['user/angle'], outputs=['null']) #This web controller will create a web server web_ctr = LocalWebControllerPlanner() V.add( web_ctr, inputs=[ 'map/image', 'cam/image_array', 'user/mode', 'recording', 'throttle', 'angle' ], outputs=['web/angle', 'web/throttle', 'web/mode', 'web/recording'], # no part consumes this threaded=True) class UserCondition: def run(self, mode): if mode == 'user': return True else: return False V.add(UserCondition(), inputs=['user/mode'], outputs=['run_user']) class PilotCondition: def run(self, mode): if mode == 'user': return False else: return True V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot']) myRoute = Route(min_dist=cfg.PATH_MIN_DIST) V.add(myRoute, inputs=['pos/x', 'pos/y'], outputs=['nav/path', 'nav/waypoints'], threaded=True) if os.path.exists(cfg.RS_ROUTE_FILE): myRoute.load(cfg.RS_ROUTE_FILE) print("loaded route:", cfg.RS_ROUTE_FILE) def save_waypt(): myRoute.save_waypoint() ctr.set_button_down_trigger(cfg.SAVE_WPT_BTN, save_waypt) def save_route(): myRoute.save_route(cfg.RS_ROUTE_FILE) ctr.set_button_down_trigger(cfg.SAVE_ROUTE_BTN, save_route) def clear_route(): myRoute.clear() ctr.set_button_down_trigger(cfg.CLEAR_ROUTE_BTN, clear_route) img = PImage(resolution=(cfg.D435_IMAGE_W, cfg.D435_IMAGE_H), clear_each_frame=True) V.add(img, outputs=['map/image']) plot = WaypointPlot(scale=cfg.PATH_SCALE, offset=(cfg.D435_IMAGE_W // 4, cfg.D435_IMAGE_H // 2), color=(0, 0, 255)) V.add(plot, inputs=['map/image', 'nav/waypoints'], outputs=['map/image'], threaded=True) plot = PathPlot(scale=cfg.PATH_SCALE, offset=(cfg.D435_IMAGE_W // 4, cfg.D435_IMAGE_H // 2), color=(0, 0, 255)) V.add(plot, inputs=['map/image', 'nav/path'], outputs=['map/image'], threaded=True) # this is error function for PID control nav = Navigator(wpt_reach_tolerance=cfg.WPT_TOLERANCE) V.add(nav, inputs=['nav/waypoints', 'pos/x', 'pos/y', 'pos/yaw'], outputs=['nav/shouldRun', 'nav/error'], threaded=True, run_condition="run_pilot") ctr.set_button_down_trigger("left_shoulder", nav.decrease_target) ctr.set_button_down_trigger("right_shoulder", nav.increase_target) pid = PIDController(p=cfg.PID_P, i=cfg.PID_I, d=cfg.PID_D, debug=False) pilot = PID_Pilot(pid, cfg.PID_THROTTLE) V.add(pilot, inputs=['nav/shouldRun', 'nav/error'], outputs=['pilot/angle', 'pilot/throttle'], run_condition="run_pilot") pos_plot = PlotPose(scale=cfg.PATH_SCALE, offset=(cfg.D435_IMAGE_W // 4, cfg.D435_IMAGE_H // 2)) V.add(pos_plot, inputs=['map/image', 'pos/x', 'pos/y', 'pos/yaw'], outputs=['map/image']) if model_path: def load_model(kl, model_path): start = time.time() print('loading model', model_path) kl.load(model_path) print('finished loading in %s sec.' % (str(time.time() - start))) #When we have a model, first create an appropriate Keras part kl = dk.utils.get_model_by_type(model_type, cfg) load_model(kl, model_path) outputs = ['pilot/angle', 'pilot/throttle'] V.add(kl, inputs=['cam/image_array'], outputs=outputs, run_condition='run_pilot') #Choose what inputs should change the car. class DriveMode: def run(self, mode, user_angle, user_throttle, pilot_angle, pilot_throttle): if mode == 'user': return user_angle, user_throttle elif mode == 'local_angle': return pilot_angle, user_throttle else: return pilot_angle, pilot_throttle * cfg.AI_THROTTLE_MULT V.add(DriveMode(), inputs=[ 'user/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle' ], outputs=['angle', 'throttle']) from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) throttle = PWMThrottle(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) # data collection inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode'] types = ['image_array', 'float', 'float', 'str'] def get_record_alert_color(num_records): col = (0, 0, 0) for count, color in cfg.RECORD_ALERT_COLOR_ARR: if num_records >= count: col = color return col class RecordTracker: def __init__(self): self.last_num_rec_print = 0 self.dur_alert = 0 self.force_alert = 0 def run(self, num_records): if num_records is None: return 0 if self.last_num_rec_print != num_records or self.force_alert: self.last_num_rec_print = num_records if num_records % 10 == 0: print("recorded", num_records, "records") if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert: self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC self.force_alert = 0 if self.dur_alert > 0: self.dur_alert -= 1 if self.dur_alert != 0: return get_record_alert_color(num_records) return 0 V.add(RecordTracker(), inputs=["tub/num_records"], outputs=['records/alert']) th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta) V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording') #tell the controller about the tub ctr.set_tub(tub) ctr.print_controls() #run the vehicle V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
def drive(cfg, model_path=None, use_joystick=False, model_type=None, camera_type='single', meta=[]): ''' Construct a working robotic vehicle from many parts. Each part runs as a job in the Vehicle loop, calling either it's run or run_threaded method depending on the constructor flag `threaded`. All parts are updated one after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. Parts may have named outputs and inputs. The framework handles passing named outputs to parts requesting the same named input. ''' if cfg.DONKEY_GYM: #the simulator will use cuda and then we usually run out of resources #if we also try to use cuda. so disable for donkey_gym. os.environ["CUDA_VISIBLE_DEVICES"] = "-1" if model_type is None: if cfg.TRAIN_LOCALIZER: model_type = "localizer" elif cfg.TRAIN_BEHAVIORS: model_type = "behavior" else: model_type = cfg.DEFAULT_MODEL_TYPE #Initialize car V = dk.vehicle.Vehicle() if camera_type == "stereo": if cfg.CAMERA_TYPE == "WEBCAM": from donkeycar.parts.camera import Webcam camA = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=0) camB = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=1) elif cfg.CAMERA_TYPE == "CVCAM": from donkeycar.parts.cv import CvCam camA = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=0) camB = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=1) else: raise (Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE)) V.add(camA, outputs=['cam/image_array_a'], threaded=True) V.add(camB, outputs=['cam/image_array_b'], threaded=True) from donkeycar.parts.image import StereoPair V.add(StereoPair(), inputs=['cam/image_array_a', 'cam/image_array_b'], outputs=['cam/image_array']) else: print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE) if cfg.DONKEY_GYM: from donkeycar.parts.dgym import DonkeyGymEnv inputs = [] threaded = True print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE) if cfg.DONKEY_GYM: from donkeycar.parts.dgym import DonkeyGymEnv cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, env_name=cfg.DONKEY_GYM_ENV_NAME) threaded = True inputs = ['angle', 'throttle'] elif cfg.CAMERA_TYPE == "PICAM": from donkeycar.parts.camera import PiCamera cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) elif cfg.CAMERA_TYPE == "WEBCAM": from donkeycar.parts.camera import Webcam cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) elif cfg.CAMERA_TYPE == "CVCAM": from donkeycar.parts.cv import CvCam cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) elif cfg.CAMERA_TYPE == "CSIC": from donkeycar.parts.camera import CSICamera cam = CSICamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM) elif cfg.CAMERA_TYPE == "V4L": from donkeycar.parts.camera import V4LCamera cam = V4LCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE) elif cfg.CAMERA_TYPE == "MOCK": from donkeycar.parts.camera import MockCamera cam = MockCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH) else: raise (Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE)) V.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=threaded) if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT: #modify max_throttle closer to 1.0 to have more power #modify steering_scale lower than 1.0 to have less responsive steering from donkeycar.parts.controller import get_js_controller ctr = get_js_controller(cfg) if cfg.USE_NETWORKED_JS: from donkeycar.parts.controller import JoyStickSub netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP) V.add(netwkJs, threaded=True) ctr.js = netwkJs else: #This web controller will create a web server that is capable #of managing steering, throttle, and modes, and more. ctr = LocalWebController() V.add(ctr, inputs=['cam/image_array'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) #this throttle filter will allow one tap back for esc reverse th_filter = ThrottleFilter() V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle']) #See if we should even run the pilot module. #This is only needed because the part run_condition only accepts boolean class PilotCondition: def run(self, mode): if mode == 'user': return False else: return True V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot']) class LedConditionLogic: def __init__(self, cfg): self.cfg = cfg def run(self, mode, recording, recording_alert, behavior_state, model_file_changed, track_loc): #returns a blink rate. 0 for off. -1 for on. positive for rate. if track_loc is not None: led.set_rgb(*self.cfg.LOC_COLORS[track_loc]) return -1 if model_file_changed: led.set_rgb(self.cfg.MODEL_RELOADED_LED_R, self.cfg.MODEL_RELOADED_LED_G, self.cfg.MODEL_RELOADED_LED_B) return 0.1 else: led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B) if recording_alert: led.set_rgb(*recording_alert) return self.cfg.REC_COUNT_ALERT_BLINK_RATE else: led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B) if behavior_state is not None and model_type == 'behavior': r, g, b = self.cfg.BEHAVIOR_LED_COLORS[behavior_state] led.set_rgb(r, g, b) return -1 #solid on if recording: return -1 #solid on elif mode == 'user': return 1 elif mode == 'local_angle': return 0.5 elif mode == 'local': return 0.1 return 0 if cfg.HAVE_RGB_LED and not cfg.DONKEY_GYM: from donkeycar.parts.led_status import RGB_LED led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT) led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B) V.add(LedConditionLogic(cfg), inputs=[ 'user/mode', 'recording', "records/alert", 'behavior/state', 'modelfile/modified', "pilot/loc" ], outputs=['led/blink_rate']) V.add(led, inputs=['led/blink_rate']) def get_record_alert_color(num_records): col = (0, 0, 0) for count, color in cfg.RECORD_ALERT_COLOR_ARR: if num_records >= count: col = color return col class RecordTracker: def __init__(self): self.last_num_rec_print = 0 self.dur_alert = 0 self.force_alert = 0 def run(self, num_records): if num_records is None: return 0 if self.last_num_rec_print != num_records or self.force_alert: self.last_num_rec_print = num_records if num_records % 10 == 0: print("recorded", num_records, "records") if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert: self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC self.force_alert = 0 if self.dur_alert > 0: self.dur_alert -= 1 if self.dur_alert != 0: return get_record_alert_color(num_records) return 0 rec_tracker_part = RecordTracker() V.add(rec_tracker_part, inputs=["tub/num_records"], outputs=['records/alert']) if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController): #then we are not using the circle button. hijack that to force a record count indication def show_record_acount_status(): rec_tracker_part.last_num_rec_print = 0 rec_tracker_part.force_alert = 1 ctr.set_button_down_trigger('circle', show_record_acount_status) #Sombrero if cfg.HAVE_SOMBRERO: from donkeycar.parts.sombrero import Sombrero s = Sombrero() #IMU if cfg.HAVE_IMU: from donkeycar.parts.imu import Mpu6050 imu = Mpu6050() V.add(imu, outputs=[ 'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z' ], threaded=True) class ImgPreProcess(): ''' preprocess camera image for inference. normalize and crop if needed. ''' def __init__(self, cfg): self.cfg = cfg def run(self, img_arr): return normalize_and_crop(img_arr, self.cfg) if "coral" in model_type: inf_input = 'cam/image_array' else: inf_input = 'cam/normalized/cropped' V.add(ImgPreProcess(cfg), inputs=['cam/image_array'], outputs=[inf_input], run_condition='run_pilot') #Behavioral state if cfg.TRAIN_BEHAVIORS: bh = BehaviorPart(cfg.BEHAVIOR_LIST) V.add(bh, outputs=[ 'behavior/state', 'behavior/label', "behavior/one_hot_state_array" ]) try: ctr.set_button_down_trigger('L1', bh.increment_state) except: pass inputs = [inf_input, "behavior/one_hot_state_array"] #IMU elif model_type == "imu": assert (cfg.HAVE_IMU) #Run the pilot if the mode is not user. inputs = [ inf_input, 'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z' ] else: inputs = [inf_input] def load_model(kl, model_path): start = time.time() print('loading model', model_path) kl.load(model_path) print('finished loading in %s sec.' % (str(time.time() - start))) def load_weights(kl, weights_path): start = time.time() try: print('loading model weights', weights_path) kl.model.load_weights(weights_path) print('finished loading in %s sec.' % (str(time.time() - start))) except Exception as e: print(e) print('ERR>> problems loading weights', weights_path) def load_model_json(kl, json_fnm): start = time.time() print('loading model json', json_fnm) from tensorflow.python import keras try: with open(json_fnm, 'r') as handle: contents = handle.read() kl.model = keras.models.model_from_json(contents) print('finished loading json in %s sec.' % (str(time.time() - start))) except Exception as e: print(e) print("ERR>> problems loading model json", json_fnm) if model_path: #When we have a model, first create an appropriate Keras part kl = dk.utils.get_model_by_type(model_type, cfg) model_reload_cb = None if '.h5' in model_path or '.uff' in model_path or 'tflite' in model_path or '.pkl' in model_path: #when we have a .h5 extension #load everything from the model file load_model(kl, model_path) def reload_model(filename): load_model(kl, filename) model_reload_cb = reload_model elif '.json' in model_path: #when we have a .json extension #load the model from there and look for a matching #.wts file with just weights load_model_json(kl, model_path) weights_path = model_path.replace('.json', '.weights') load_weights(kl, weights_path) def reload_weights(filename): weights_path = filename.replace('.json', '.weights') load_weights(kl, weights_path) model_reload_cb = reload_weights else: print("ERR>> Unknown extension type on model file!!") return #this part will signal visual LED, if connected V.add(FileWatcher(model_path, verbose=True), outputs=['modelfile/modified']) #these parts will reload the model file, but only when ai is running so we don't interrupt user driving V.add(FileWatcher(model_path), outputs=['modelfile/dirty'], run_condition="ai_running") V.add(DelayedTrigger(100), inputs=['modelfile/dirty'], outputs=['modelfile/reload'], run_condition="ai_running") V.add(TriggeredCallback(model_path, model_reload_cb), inputs=["modelfile/reload"], run_condition="ai_running") outputs = ['pilot/angle', 'pilot/throttle'] if cfg.TRAIN_LOCALIZER: outputs.append("pilot/loc") V.add(kl, inputs=inputs, outputs=outputs, run_condition='run_pilot') #Choose what inputs should change the car. class DriveMode: def run(self, mode, user_angle, user_throttle, pilot_angle, pilot_throttle): if mode == 'user': return user_angle, user_throttle elif mode == 'local_angle': return pilot_angle, user_throttle else: return pilot_angle, pilot_throttle * cfg.AI_THROTTLE_MULT V.add(DriveMode(), inputs=[ 'user/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle' ], outputs=['angle', 'throttle']) #to give the car a boost when starting ai mode in a race. aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE, cfg.AI_LAUNCH_KEEP_ENABLED) V.add(aiLauncher, inputs=['user/mode', 'throttle'], outputs=['throttle']) if isinstance(ctr, JoystickController): ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON, aiLauncher.enable_ai_launch) class AiRunCondition: ''' A bool part to let us know when ai is running. ''' def run(self, mode): if mode == "user": return False return True V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running']) #Ai Recording class AiRecordingCondition: ''' return True when ai mode, otherwize respect user mode recording flag ''' def run(self, mode, recording): if mode == 'user': return recording return True if cfg.RECORD_DURING_AI: V.add(AiRecordingCondition(), inputs=['user/mode', 'recording'], outputs=['recording']) #Drive train setup if cfg.DONKEY_GYM: pass elif cfg.DRIVE_TRAIN_TYPE == "SERVO_ESC": from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM) throttle = PWMThrottle(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE": from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT) throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL": from donkeycar.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD) right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD) two_wheel_control = TwoWheelSteeringThrottle() V.add(two_wheel_control, inputs=['throttle', 'angle'], outputs=['left_motor_speed', 'right_motor_speed']) V.add(left_motor, inputs=['left_motor_speed']) V.add(right_motor, inputs=['right_motor_speed']) elif cfg.DRIVE_TRAIN_TYPE == "SKID_STEER": from donkeycar.parts.actuator import TwoWheelSteeringThrottle, SBC_PiMotor right_motor = SBC_PiMotor(1) left_motor = SBC_PiMotor(2) two_wheel_control = TwoWheelSteeringThrottle() V.add(two_wheel_control, inputs=['throttle', 'angle'], outputs=['left_motor_speed', 'right_motor_speed']) V.add(left_motor, inputs=['left_motor_speed']) V.add(right_motor, inputs=['right_motor_speed']) elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM": from donkeycar.parts.actuator import ServoBlaster, PWMSteering steering_controller = ServoBlaster(cfg.STEERING_CHANNEL) #really pin #PWM pulse values should be in the range of 100 to 200 assert (cfg.STEERING_LEFT_PWM <= 200) assert (cfg.STEERING_RIGHT_PWM <= 200) steering = PWMSteering(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM) from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD) V.add(steering, inputs=['angle']) V.add(motor, inputs=["throttle"]) #add tub to save data inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode'] types = ['image_array', 'float', 'float', 'str'] if cfg.TRAIN_BEHAVIORS: inputs += [ 'behavior/state', 'behavior/label', "behavior/one_hot_state_array" ] types += ['int', 'str', 'vector'] if cfg.HAVE_IMU: inputs += [ 'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z' ] types += ['float', 'float', 'float', 'float', 'float', 'float'] if cfg.RECORD_DURING_AI: inputs += ['pilot/angle', 'pilot/throttle'] types += ['float', 'float'] th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta) V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording') if cfg.PUB_CAMERA_IMAGES: from donkeycar.parts.network import TCPServeValue from donkeycar.parts.image import ImgArrToJpg pub = TCPServeValue("camera") V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin']) V.add(pub, inputs=['jpg/bin']) if type(ctr) is LocalWebController: print( "You can now go to <your pis hostname.local>:8887 to drive your car." ) elif isinstance(ctr, JoystickController): print("You can now move your joystick to drive your car.") #tell the controller about the tub ctr.set_tub(tub) if cfg.BUTTON_PRESS_NEW_TUB: def new_tub_dir(): V.parts.pop() tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta) V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording') ctr.set_tub(tub) ctr.set_button_down_trigger('cross', new_tub_dir) ctr.print_controls() #run the vehicle for 20 seconds V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
def drive(cfg, model_path=None, use_joystick=False): ''' Construct a working robotic vehicle from many parts. Each part runs as a job in the Vehicle loop, calling either it's run or run_threaded method depending on the constructor flag `threaded`. All parts are updated one after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner. Parts may have named outputs and inputs. The framework handles passing named outputs to parts requesting the same named input. ''' #Initialize car V = dk.vehicle.Vehicle() cam = PiCamera(resolution=cfg.CAMERA_RESOLUTION) cam.camera.rotation = 180 #flip camera V.add(cam, outputs=['cam/image_array'], threaded=True) if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT: #modify max_throttle closer to 1.0 to have more power #modify steering_scale lower than 1.0 to have less responsive steering ctr = JoystickController(max_throttle=cfg.JOYSTICK_MAX_THROTTLE, steering_scale=cfg.JOYSTICK_STEERING_SCALE, auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE) else: #This web controller will create a web server that is capable #of managing steering, throttle, and modes, and more. ctr = LocalWebController() V.add(ctr, inputs=['cam/image_array'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'], threaded=True) #See if we should even run the pilot module. #This is only needed because the part run_condition only accepts boolean def pilot_condition(mode): if mode == 'user': return False else: return True pilot_condition_part = Lambda(pilot_condition) V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot']) #Run the pilot if the mode is not user. kl = KerasCategorical() if model_path: kl.load(model_path) V.add(kl, inputs=['cam/image_array'], outputs=['pilot/angle', 'pilot/throttle'], run_condition='run_pilot') #Choose what inputs should change the car. def drive_mode(mode, user_angle, user_throttle, pilot_angle, pilot_throttle): if mode == 'user': return user_angle, user_throttle elif mode == 'local_angle': return pilot_angle, user_throttle else: return pilot_angle, pilot_throttle drive_mode_part = Lambda(drive_mode) V.add(drive_mode_part, inputs=['user/mode', 'user/angle', 'user/throttle', 'pilot/angle', 'pilot/throttle'], outputs=['angle', 'throttle']) #GPIO.clean_up() GPIO.setmode(GPIO.BOARD) PCA9685_pwm = Adafruit_PCA9685.PCA9685() PCA9685_pwm.set_pwm_freq(cfg.MOTFREQ) time.sleep(1) throttle_controller = PCA9685L298N(cfg.THROTTLE_CHANNEL,PCA9685_pwm) throttle = PWMThrottleL298N(controller=throttle_controller, max_pulse=cfg.THROTTLE_FORWARD_PWM, zero_pulse=cfg.THROTTLE_STOPPED_PWM, min_pulse=cfg.THROTTLE_REVERSE_PWM, pin1 = cfg.THROTIN1, pin2 = cfg.THROTIN2, io = GPIO) steering_controller = PCA9685L298N(cfg.STEERING_CHANNEL,PCA9685_pwm) steering = PWMSteeringL298N(controller=steering_controller, left_pulse=cfg.STEERING_LEFT_PWM, right_pulse=cfg.STEERING_RIGHT_PWM, pin1 = cfg.STEERIN1, pin2 = cfg.STEERIN2, io = GPIO) V.add(steering, inputs=['angle']) V.add(throttle, inputs=['throttle']) #add tub to save data inputs=['cam/image_array', 'user/angle', 'user/throttle', 'user/mode'] types=['image_array', 'float', 'float', 'str'] th = TubHandler(path=cfg.DATA_PATH) tub = th.new_tub_writer(inputs=inputs, types=types) V.add(tub, inputs=inputs, run_condition='recording') #run the vehicle for 20 seconds V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS) print("You can now go to <your pi ip address>:8887 to drive your car.")