def next_optimized_waypoint(self, cur_pos, cur_rot, pos_path, cur_pos_index): """ Returns the furthest point from path without an obstacle. Stops at the first position where the laser of nearest angle (laser angle ~= aimed position angle) detects an obstacle (laser distance < aimed position distance). :param cur_pos: current position of the robot :type cur_pos: Vector :param cur_rot: current orientation of the robot :type cur_rot: Quaternion :param pos_path: loaded path :type pos_path: list :param cur_pos_index: index of the current position in the path (-1 :type cur_pos_index: int """ lasers_angles = self.get_laser_scan_angles() lasers = self.get_laser_scan()['Echoes'] max_lookahead_index = min(cur_pos_index + self.__max_lookahead - 1, len(pos_path) - 1) # Go through every position on the path starting at the position next to the current one and stopping at # max_lookahead_index positions further for i in range(cur_pos_index + 1, max_lookahead_index): tar_pos = pos_path[i] # convert potential aimed position to RCS rcs_tar_pos = pure_pursuit.convert_to_rcs(tar_pos, cur_pos, cur_rot) # current robot position in RCS rcs_origin = Vector(0, 0, 0) # compute angle between current robot position and aimed position tar_angle = rcs_origin.get_angle(rcs_tar_pos) min_ind = 0 min_dist = tar_angle - lasers_angles[0] # search for the nearest angle in laser_angles # could be simplified with a calculation instead of this iteration for j in range(1, len(lasers_angles)): dist = tar_angle - lasers_angles[j] if dist < min_dist: min_dist = dist min_ind = j # if the laser hits an obstacle, return the index of the previous position on the path if lasers[min_ind] < cur_pos.distance_to(tar_pos): return i - 1 return max_lookahead_index
class WrappedVector(CopyOnWriteVector): def __init__(self, vector): assert isinstance(vector, Vector) self._impl = vector @classmethod def from_instance(cls, instance): return cls(instance._impl) def to_numpy(self, ensure_copy=False): result = np.frombuffer(self._impl, dtype=np.float) if ensure_copy: result = result.copy() return result def _copy_data(self): self._impl = Vector(self._impl) def _scal(self, alpha): self._impl.scal(alpha) def _axpy(self, alpha, x): self._impl.axpy(alpha, x._impl) def dot(self, other): return self._impl.dot(other._impl) def l1_norm(self): raise NotImplementedError def l2_norm(self): return math.sqrt(self.dot(self)) def l2_norm2(self): return self.dot(self) def sup_norm(self): raise NotImplementedError def dofs(self, dof_indices): raise NotImplementedError def amax(self): raise NotImplementedError
def test_vectors(self): v1 = Vector((1, 2, 1, 2), 1, 1) self._event.add_vector(v1, ( 'test', 0.55, )) self.assertEqual(self._event.vectors, [{ 'bbox': (2, 4, 1, 2), 'probability': 0.55, 'label': 'test' }], 'Vectors are invalid')
def get_pos(self): """ Reads the current position from the MRDS and returns it as a Vector instance. """ self.__mrds.request('GET', '/lokarria/localization') response = self.__mrds.getresponse() if response.status == 200: pos_data = json.loads(response.read()) response.close() return Vector.from_dict(pos_data['Pose']['Position']) else: raise self.UnexpectedResponse(response)
def positionPath(self, dict=False): """ Parses the positions in the loaded file into a list of either Vector instances or dictionaries depending on the value of the dict parameter. :param dict: if set to True, will parse into dictionaries instead of into Vector instances :type dict: bool """ if dict: return [{ 'X': p['Pose']['Position']['X'], 'Y': p['Pose']['Position']['Y'], 'Z': p['Pose']['Position']['Z'] } for p in self.path] else: return [Vector.from_dict(p['Pose']['Position']) for p in self.path]
def get_pos_and_orientation(self): """ Reads the current position and orientation from the MRDS server and returns it as a tuple (Vector, Quaternion). """ self.__mrds.request('GET', '/lokarria/localization') response = self.__mrds.getresponse() if response.status == 200: pos_data = json.loads(response.read()) response.close() return Vector.from_dict( pos_data['Pose']['Position']), Quaternion.from_dict( pos_data['Pose']['Orientation']) else: raise self.UnexpectedResponse(response)
def orientationPath(self, dict=False): """ Parses the orientations in the loaded file into a list of either Quaternion instances or dictionaries depending on the value of the dict parameter. :param dict: if set to True, will parse into dictionaries instead of into Quaternion instances :type dict: bool """ if dict: return [{ 'W': p['Pose']['Orientation']['W'], 'X': p['Pose']['Orientation']['X'], 'Y': p['Pose']['Orientation']['Y'], 'Z': p['Pose']['Orientation']['Z'] } for p in self.path] else: return [ Quaternion( p['Pose']['Orientation']['W'], Vector(p['Pose']['Orientation']['X'], p['Pose']['Orientation']['Y'], p['Pose']['Orientation']['Z'])) for p in self.path ]
def apply_one_vector(u): v = Vector(self.range.dim, 0) self.op.apply(u._impl, v) return v
def zero_vector(self): return WrappedVector(Vector(self.dim, 0))
def _copy_data(self): self._impl = Vector(self._impl)
def setUp(self): self._vector = Vector((1, 2, 1, 2), 1, 1)
def greengrass_infinite_infer_run(): try: # TODO: Parameterize model name model_name = 'deploy_ssd_resnet50_300' model_path = "/opt/awscam/artifacts/mxnet_{}_FP16_FUSED.xml".format( model_name) model_type = "ssd" # Trained image shape input_width = 300 input_height = 300 with open('docs/synset.txt', 'r') as _f: labels = dict(enumerate(_f, 1)) client.publish(topic=iotTopic, payload="Labels loaded") # Start thread to write frames to disk results_thread = FIFO_Thread() results_thread.start() # MxNet Model model = awscam.Model(model_path, {"GPU": 1}) client.publish(topic=iotTopic, payload="Model loaded") while True: ret, frame = awscam.getLastFrame() if ret == False: raise Exception("Failed to get frame from the stream") inferred_obj = InferEvent(frame, model_name, model_type) frameResize = cv2.resize(frame, (input_width, input_height)) # Run inference output = model.doInference(frameResize) parsed_results = model.parseResult(model_type, output)['ssd'] # Parse and document vectors for obj in parsed_results: if obj['prob'] > 0.5: vector = Vector(( obj['xmin'], obj['xmax'], obj['ymin'], obj['ymax'], ), inferred_obj.xscale, inferred_obj.yscale) inferred_obj.add_vector(vector, ( labels[obj['label']], obj['prob'], )) cv2.rectangle(frame, (vector.xmin, vector.ymin), (vector.xmax, vector.ymax), (255, 165, 20), 4) cv2.putText( frame, '{}: {:.2f}'.format(labels[obj['label']], obj['prob']), (vector.xmin, vector.ymin - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 165, 20), 4) # Trigger event pipeline client.publish(topic=iotTopic, payload=str(inferred_obj)) # Update output global jpeg ret, jpeg = cv2.imencode('.jpg', frame) except Exception as e: client.publish(topic=iotTopic, payload=str(e))