def _fire_task(self): section_task = self._current_task.move_next() rospy.loginfo("fire_task {} ".format(section_task)) if section_task: if isinstance(section_task, ReplaySectionTask): replay = section_task.to_replay_task() self._replay_pub.publish(replay) elif isinstance(section_task, LaneSectionTask): lane = section_task.to_lane_task() self._lane_pub.publish(lane) elif isinstance(section_task, TrjSectionTask): trj = section_task.to_trj_task() self._trajectory_pub.publish(trj) rospy.loginfo("publish trj section") elif isinstance(section_task, SiteSectionTask): site = section_task.to_site_task() self._site_pub.publish(site) rospy.loginfo("publish site section") elif isinstance(section_task, LidarSectionTask): global_path = section_task.make_global_path() self._global_path_pub.publish(global_path) rospy.loginfo("[LidarSectionTask]: publish global path") section_task.start_stamp = time.time() else: self._current_task.set_state(TaskState.FINISHED) self._history_task.append(self._current_task) ecu = ECU() ecu.motor = 0 ecu.servo = 0 ecu.shift = 0 self._ecu_pub.publish(ecu) self.report_finished() self._current_task = None
def instruction_to_ecu(instruction): """ :type instruction dict :return: """ ecu = ECU() ecu.shift = int(instruction['shift']) ecu.servo = float(instruction['servo']) ecu.motor = float(instruction['motor']) return ecu
def stop_lane_cb(self, stop_msg): if self.task and stop_msg.sectionId == self.task.sectionId: rospy.loginfo("stop_lane_cb") self.is_in_task.clear() self.task = None self.task_state.header.stamp = rospy.Time.now() self.task_state.state = 0x31 # killed self.task_reporter.publish(self.task_state) self.task_state = None ecu = ECU() ecu.shift = 0 ecu.servo = 0 ecu.motor = 0 self.ecu_pub.publish(ecu)
def shutdown(self): """ :return: """ if self.task: self.task = None ecu = ECU() ecu.servo = 0 ecu.motor = 0 ecu.shift = 0 self.ecu_pub.publish(ecu) report = ModuleHealth() report.requestId = -1 report.state = ModuleHealth.MODULE_ERROR report.moduleType = self.moduleType report.moduleName = rospy.get_name() self.module_reporter.publish(report)
def picture_cb(self, message): """ :param message: :type message Picture :return: """ filter_flag = 0 if len(self.last_left_bound) > 0 and len(self.last_right_bound) > 0: filter_flag = 1 if self.is_in_task.is_set(): if self.shm_filename != message.dev_name: # device /dev/video1, shm_filename : /dev/shm/ss_file_name return mat = None try: with open(message.shm_filename, "r+") as f: with contextlib.closing(mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)) as m: m.seek(message.offset) mat = m.read(message.block_size) # print self.is_valid_jpg(m) except Exception, e: return if mat: nparr = np.fromstring(mat, np.uint8) frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR) # try to write to cv2.imwrite('/tmp/temp.jpg', frame) if not self.is_valid_jpg('/tmp/temp.jpg'): self.state = ModuleHealth.MODULE_WARNING return frame = cv2.undistort(frame, self.lane_detection_client.camera_matrix, self.lane_detection_client.distortion_coefficients, None, self.lane_detection_client.camera_matrix) frame = cv2.resize(frame, (self.lane_detection_client.img_w, self.lane_detection_client.img_h)) images = [] image = frame / 255.0 image = image.astype(np.float32) images.append(image) mask_prob = self.lane_detection_client.session.run(self.lane_detection_client.output_tensor, feed_dict={ self.lane_detection_client.input_tensor: images}) points_x = [] points_y = [] for i in range(int(self.lane_detection_client.img_h * (1 - self.lane_detection_client.area_blocks)), self.lane_detection_client.img_h): points_x.append(i * 1.0) temp_y = 0 for j in range(0, self.lane_detection_client.img_w): if mask_prob[0][i][j][0] < mask_prob[0][i][j][1]: ratio = self.lane_detection_client.filter_ratio index = i - int( self.lane_detection_client.img_h * (1 - self.lane_detection_client.area_blocks)) if filter_flag == 0: self.last_left_bound.append(j) else: self.last_left_bound[index] = self.last_left_bound[index] * ratio + j * (1 - ratio) temp_y = temp_y + self.last_left_bound[index] cv2.circle(frame, (int(self.last_left_bound[index]), i), 3, (0, 0, 255), 1) break for j in range(self.lane_detection_client.img_w - 1, -1, -1): if mask_prob[0][i][j][0] < mask_prob[0][i][j][1]: ratio = self.lane_detection_client.filter_ratio index = i - int( self.lane_detection_client.img_h * (1 - self.lane_detection_client.area_blocks)) if filter_flag == 0: self.last_right_bound.append(j) else: self.last_right_bound[index] = self.last_right_bound[index] * ratio + j * (1 - ratio) temp_y = temp_y + self.last_right_bound[index] cv2.circle(frame, (int(self.last_right_bound[index]), i), 3, (0, 0, 255), 1) break points_y.append(temp_y / 2.0) fit_vals = dict() x = np.array(points_x) y = np.array(points_y) fit_cr = np.polyfit(x, y, 2) fit_vals['a'] = fit_cr[0] fit_vals['b'] = fit_cr[1] fit_vals['x0'] = fit_cr[2] for i in range(int(self.lane_detection_client.img_h * (1 - self.lane_detection_client.area_blocks)), self.lane_detection_client.img_h): cv2.circle(frame, (int(fit_vals['a'] * i * i + fit_vals['b'] * i + fit_vals['x0']), i), 1, (0, 255, 0), 1) cv2.imshow("result", frame) cv2.waitKey(1) # 1, vision info y_pix = (self.lane_detection_client.img_h * ( 1 - self.lane_detection_client.area_blocks) + self.lane_detection_client.img_h) / 2.0 error_dis = self.lane_detection_client.img_w // 2 - ( fit_vals['a'] * y_pix * y_pix + fit_vals['b'] * y_pix + fit_vals['x0']) tan_value = 2.0 * fit_vals['a'] * y_pix + fit_vals['b'] error_angle = np.arctan2(tan_value, 1) / np.pi * 180 # 2, fuzzy convert process cte = self.lane_detection_client.fuzzyconvert.cal_cte_level(error_angle, error_dis) # 3, control process self.lane_detection_client.pid.UpdateError(cte) steer_value = self.lane_detection_client.pid.TotalError() # 4, send ecu if self.is_in_task.is_set(): ecu = ECU() ecu.motor = self.speed ecu.servo = steer_value ecu.shift = self.direction self.ecu_pub.publish(ecu)